1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 */ 6 #include <linux/skbuff.h> 7 #include <linux/ctype.h> 8 #include <net/mac80211.h> 9 #include <net/cfg80211.h> 10 #include <linux/completion.h> 11 #include <linux/if_ether.h> 12 #include <linux/types.h> 13 #include <linux/pci.h> 14 #include <linux/uuid.h> 15 #include <linux/time.h> 16 #include <linux/of.h> 17 #include <linux/cleanup.h> 18 #include "core.h" 19 #include "debugfs.h" 20 #include "debug.h" 21 #include "mac.h" 22 #include "hw.h" 23 #include "peer.h" 24 #include "p2p.h" 25 #include "testmode.h" 26 27 struct ath12k_wmi_svc_ready_parse { 28 bool wmi_svc_bitmap_done; 29 }; 30 31 struct wmi_tlv_fw_stats_parse { 32 const struct wmi_stats_event *ev; 33 struct ath12k_fw_stats *stats; 34 const struct wmi_per_chain_rssi_stat_params *rssi; 35 int rssi_num; 36 bool chain_rssi_done; 37 }; 38 39 struct ath12k_wmi_dma_ring_caps_parse { 40 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps; 41 u32 n_dma_ring_caps; 42 }; 43 44 struct ath12k_wmi_service_ext_arg { 45 u32 default_conc_scan_config_bits; 46 u32 default_fw_config_bits; 47 struct ath12k_wmi_ppe_threshold_arg ppet; 48 u32 he_cap_info; 49 u32 mpdu_density; 50 u32 max_bssid_rx_filters; 51 u32 num_hw_modes; 52 u32 num_phy; 53 }; 54 55 struct ath12k_wmi_svc_rdy_ext_parse { 56 struct ath12k_wmi_service_ext_arg arg; 57 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps; 58 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 59 u32 n_hw_mode_caps; 60 u32 tot_phy_id; 61 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps; 62 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps; 63 u32 n_mac_phy_caps; 64 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps; 65 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps; 66 u32 n_ext_hal_reg_caps; 67 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 68 bool hw_mode_done; 69 bool mac_phy_done; 70 bool ext_hal_reg_done; 71 bool mac_phy_chainmask_combo_done; 72 bool mac_phy_chainmask_cap_done; 73 bool oem_dma_ring_cap_done; 74 bool dma_ring_cap_done; 75 }; 76 77 struct ath12k_wmi_svc_rdy_ext2_arg { 78 u32 reg_db_version; 79 u32 hw_min_max_tx_power_2ghz; 80 u32 hw_min_max_tx_power_5ghz; 81 u32 chwidth_num_peer_caps; 82 u32 preamble_puncture_bw; 83 u32 max_user_per_ppdu_ofdma; 84 u32 max_user_per_ppdu_mumimo; 85 u32 target_cap_flags; 86 u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE]; 87 u32 max_num_linkview_peers; 88 u32 max_num_msduq_supported_per_tid; 89 u32 default_num_msduq_supported_per_tid; 90 }; 91 92 struct ath12k_wmi_svc_rdy_ext2_parse { 93 struct ath12k_wmi_svc_rdy_ext2_arg arg; 94 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 95 bool dma_ring_cap_done; 96 bool spectral_bin_scaling_done; 97 bool mac_phy_caps_ext_done; 98 bool hal_reg_caps_ext2_done; 99 bool scan_radio_caps_ext2_done; 100 bool twt_caps_done; 101 bool htt_msdu_idx_to_qtype_map_done; 102 bool dbs_or_sbs_cap_ext_done; 103 }; 104 105 struct ath12k_wmi_rdy_parse { 106 u32 num_extra_mac_addr; 107 }; 108 109 struct ath12k_wmi_dma_buf_release_arg { 110 struct ath12k_wmi_dma_buf_release_fixed_params fixed; 111 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry; 112 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data; 113 u32 num_buf_entry; 114 u32 num_meta; 115 bool buf_entry_done; 116 bool meta_data_done; 117 }; 118 119 struct ath12k_wmi_tlv_policy { 120 size_t min_len; 121 }; 122 123 struct wmi_tlv_mgmt_rx_parse { 124 const struct ath12k_wmi_mgmt_rx_params *fixed; 125 const u8 *frame_buf; 126 bool frame_buf_done; 127 }; 128 129 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { 130 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, 131 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, 132 [WMI_TAG_SERVICE_READY_EVENT] = { 133 .min_len = sizeof(struct wmi_service_ready_event) }, 134 [WMI_TAG_SERVICE_READY_EXT_EVENT] = { 135 .min_len = sizeof(struct wmi_service_ready_ext_event) }, 136 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { 137 .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) }, 138 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { 139 .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) }, 140 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { 141 .min_len = sizeof(struct wmi_vdev_start_resp_event) }, 142 [WMI_TAG_PEER_DELETE_RESP_EVENT] = { 143 .min_len = sizeof(struct wmi_peer_delete_resp_event) }, 144 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { 145 .min_len = sizeof(struct wmi_bcn_tx_status_event) }, 146 [WMI_TAG_VDEV_STOPPED_EVENT] = { 147 .min_len = sizeof(struct wmi_vdev_stopped_event) }, 148 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { 149 .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, 150 [WMI_TAG_MGMT_RX_HDR] = { 151 .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) }, 152 [WMI_TAG_MGMT_TX_COMPL_EVENT] = { 153 .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, 154 [WMI_TAG_SCAN_EVENT] = { 155 .min_len = sizeof(struct wmi_scan_event) }, 156 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { 157 .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 158 [WMI_TAG_ROAM_EVENT] = { 159 .min_len = sizeof(struct wmi_roam_event) }, 160 [WMI_TAG_CHAN_INFO_EVENT] = { 161 .min_len = sizeof(struct wmi_chan_info_event) }, 162 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { 163 .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, 164 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { 165 .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, 166 [WMI_TAG_READY_EVENT] = { 167 .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) }, 168 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = { 169 .min_len = sizeof(struct wmi_service_available_event) }, 170 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { 171 .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, 172 [WMI_TAG_RFKILL_EVENT] = { 173 .min_len = sizeof(struct wmi_rfkill_state_change_event) }, 174 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { 175 .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, 176 [WMI_TAG_HOST_SWFDA_EVENT] = { 177 .min_len = sizeof(struct wmi_fils_discovery_event) }, 178 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { 179 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, 180 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { 181 .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, 182 [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = { 183 .min_len = sizeof(struct wmi_twt_enable_event) }, 184 [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = { 185 .min_len = sizeof(struct wmi_twt_disable_event) }, 186 [WMI_TAG_P2P_NOA_INFO] = { 187 .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) }, 188 [WMI_TAG_P2P_NOA_EVENT] = { 189 .min_len = sizeof(struct wmi_p2p_noa_event) }, 190 [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { 191 .min_len = sizeof(struct wmi_11d_new_cc_event) }, 192 [WMI_TAG_PER_CHAIN_RSSI_STATS] = { 193 .min_len = sizeof(struct wmi_per_chain_rssi_stat_params) }, 194 [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { 195 .min_len = sizeof(struct wmi_obss_color_collision_event) }, 196 }; 197 198 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) 199 { 200 return le32_encode_bits(cmd, WMI_TLV_TAG) | 201 le32_encode_bits(len, WMI_TLV_LEN); 202 } 203 204 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len) 205 { 206 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE); 207 } 208 209 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, 210 struct ath12k_wmi_resource_config_arg *config) 211 { 212 config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab); 213 config->num_peers = ab->num_radios * 214 ath12k_core_get_max_peers_per_radio(ab); 215 config->num_offload_peers = TARGET_NUM_OFFLD_PEERS; 216 config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS; 217 config->num_peer_keys = TARGET_NUM_PEER_KEYS; 218 config->ast_skid_limit = TARGET_AST_SKID_LIMIT; 219 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 220 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 221 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; 222 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 223 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 224 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 225 226 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) 227 config->rx_decap_mode = TARGET_DECAP_MODE_RAW; 228 else 229 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 230 231 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 232 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 233 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; 234 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; 235 config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS; 236 config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS; 237 config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE; 238 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; 239 config->num_wds_entries = TARGET_NUM_WDS_ENTRIES; 240 config->dma_burst_size = TARGET_DMA_BURST_SIZE; 241 config->rx_skip_defrag_timeout_dup_detection_check = 242 TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 243 config->vow_config = TARGET_VOW_CONFIG; 244 config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV; 245 config->num_msdu_desc = TARGET_NUM_MSDU_DESC; 246 config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD; 247 config->rx_batchmode = TARGET_RX_BATCHMODE; 248 /* Indicates host supports peer map v3 and unmap v2 support */ 249 config->peer_map_unmap_version = 0x32; 250 config->twt_ap_pdev_count = ab->num_radios; 251 config->twt_ap_sta_count = 1000; 252 config->ema_max_vap_cnt = ab->num_radios; 253 config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD; 254 config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt; 255 256 if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map)) 257 config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B; 258 } 259 260 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, 261 struct ath12k_wmi_resource_config_arg *config) 262 { 263 config->num_vdevs = 4; 264 config->num_peers = 16; 265 config->num_tids = 32; 266 267 config->num_offload_peers = 3; 268 config->num_offload_reorder_buffs = 3; 269 config->num_peer_keys = TARGET_NUM_PEER_KEYS; 270 config->ast_skid_limit = TARGET_AST_SKID_LIMIT; 271 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 272 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 273 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; 274 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 275 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 276 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 277 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 278 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 279 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 280 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; 281 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; 282 config->num_mcast_groups = 0; 283 config->num_mcast_table_elems = 0; 284 config->mcast2ucast_mode = 0; 285 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; 286 config->num_wds_entries = 0; 287 config->dma_burst_size = 0; 288 config->rx_skip_defrag_timeout_dup_detection_check = 0; 289 config->vow_config = TARGET_VOW_CONFIG; 290 config->gtk_offload_max_vdev = 2; 291 config->num_msdu_desc = 0x400; 292 config->beacon_tx_offload_max_vdev = 2; 293 config->rx_batchmode = TARGET_RX_BATCHMODE; 294 295 config->peer_map_unmap_version = 0x1; 296 config->use_pdev_id = 1; 297 config->max_frag_entries = 0xa; 298 config->num_tdls_vdevs = 0x1; 299 config->num_tdls_conn_table_entries = 8; 300 config->beacon_tx_offload_max_vdev = 0x2; 301 config->num_multicast_filter_entries = 0x20; 302 config->num_wow_filters = 0x16; 303 config->num_keep_alive_pattern = 0; 304 } 305 306 #define PRIMAP(_hw_mode_) \ 307 [_hw_mode_] = _hw_mode_##_PRI 308 309 static const int ath12k_hw_mode_pri_map[] = { 310 PRIMAP(WMI_HOST_HW_MODE_SINGLE), 311 PRIMAP(WMI_HOST_HW_MODE_DBS), 312 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), 313 PRIMAP(WMI_HOST_HW_MODE_SBS), 314 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), 315 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), 316 /* keep last */ 317 PRIMAP(WMI_HOST_HW_MODE_MAX), 318 }; 319 320 static int 321 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 322 int (*iter)(struct ath12k_base *ab, u16 tag, u16 len, 323 const void *ptr, void *data), 324 void *data) 325 { 326 const void *begin = ptr; 327 const struct wmi_tlv *tlv; 328 u16 tlv_tag, tlv_len; 329 int ret; 330 331 while (len > 0) { 332 if (len < sizeof(*tlv)) { 333 ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 334 ptr - begin, len, sizeof(*tlv)); 335 return -EINVAL; 336 } 337 338 tlv = ptr; 339 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 340 tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN); 341 ptr += sizeof(*tlv); 342 len -= sizeof(*tlv); 343 344 if (tlv_len > len) { 345 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 346 tlv_tag, ptr - begin, len, tlv_len); 347 return -EINVAL; 348 } 349 350 if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) && 351 ath12k_wmi_tlv_policies[tlv_tag].min_len && 352 ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 353 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", 354 tlv_tag, ptr - begin, tlv_len, 355 ath12k_wmi_tlv_policies[tlv_tag].min_len); 356 return -EINVAL; 357 } 358 359 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 360 if (ret) 361 return ret; 362 363 ptr += tlv_len; 364 len -= tlv_len; 365 } 366 367 return 0; 368 } 369 370 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len, 371 const void *ptr, void *data) 372 { 373 const void **tb = data; 374 375 if (tag < WMI_TAG_MAX) 376 tb[tag] = ptr; 377 378 return 0; 379 } 380 381 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, 382 const void *ptr, size_t len) 383 { 384 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse, 385 (void *)tb); 386 } 387 388 static const void ** 389 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, 390 struct sk_buff *skb, gfp_t gfp) 391 { 392 const void **tb; 393 int ret; 394 395 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp); 396 if (!tb) 397 return ERR_PTR(-ENOMEM); 398 399 ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len); 400 if (ret) { 401 kfree(tb); 402 return ERR_PTR(ret); 403 } 404 405 return tb; 406 } 407 408 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 409 u32 cmd_id) 410 { 411 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 412 struct ath12k_base *ab = wmi->wmi_ab->ab; 413 struct wmi_cmd_hdr *cmd_hdr; 414 int ret; 415 416 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr))) 417 return -ENOMEM; 418 419 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 420 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID); 421 422 memset(skb_cb, 0, sizeof(*skb_cb)); 423 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb); 424 425 if (ret) 426 goto err_pull; 427 428 return 0; 429 430 err_pull: 431 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 432 return ret; 433 } 434 435 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 436 u32 cmd_id) 437 { 438 struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab; 439 int ret = -EOPNOTSUPP; 440 441 might_sleep(); 442 443 wait_event_timeout(wmi_ab->tx_credits_wq, ({ 444 ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); 445 446 if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) 447 ret = -ESHUTDOWN; 448 449 (ret != -EAGAIN); 450 }), WMI_SEND_TIMEOUT_HZ); 451 452 if (ret == -EAGAIN) 453 ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); 454 455 return ret; 456 } 457 458 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 459 const void *ptr, 460 struct ath12k_wmi_service_ext_arg *arg) 461 { 462 const struct wmi_service_ready_ext_event *ev = ptr; 463 int i; 464 465 if (!ev) 466 return -EINVAL; 467 468 /* Move this to host based bitmap */ 469 arg->default_conc_scan_config_bits = 470 le32_to_cpu(ev->default_conc_scan_config_bits); 471 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits); 472 arg->he_cap_info = le32_to_cpu(ev->he_cap_info); 473 arg->mpdu_density = le32_to_cpu(ev->mpdu_density); 474 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters); 475 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1); 476 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info); 477 478 for (i = 0; i < WMI_MAX_NUM_SS; i++) 479 arg->ppet.ppet16_ppet8_ru3_ru0[i] = 480 le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]); 481 482 return 0; 483 } 484 485 static int 486 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 487 struct ath12k_wmi_svc_rdy_ext_parse *svc, 488 u8 hw_mode_id, u8 phy_id, 489 struct ath12k_pdev *pdev) 490 { 491 const struct ath12k_wmi_mac_phy_caps_params *mac_caps; 492 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps; 493 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps; 494 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps; 495 struct ath12k_base *ab = wmi_handle->wmi_ab->ab; 496 struct ath12k_band_cap *cap_band; 497 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 498 struct ath12k_fw_pdev *fw_pdev; 499 u32 phy_map; 500 u32 hw_idx, phy_idx = 0; 501 int i; 502 503 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps) 504 return -EINVAL; 505 506 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) { 507 if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id)) 508 break; 509 510 phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map); 511 phy_idx = fls(phy_map); 512 } 513 514 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes)) 515 return -EINVAL; 516 517 phy_idx += phy_id; 518 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy)) 519 return -EINVAL; 520 521 mac_caps = wmi_mac_phy_caps + phy_idx; 522 523 pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 524 pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps); 525 pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands); 526 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); 527 528 fw_pdev = &ab->fw_pdev[ab->fw_pdev_count]; 529 fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands); 530 fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 531 fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id); 532 ab->fw_pdev_count++; 533 534 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from 535 * band to band for a single radio, need to see how this should be 536 * handled. 537 */ 538 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) { 539 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g); 540 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g); 541 } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) { 542 pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g); 543 pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g); 544 pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 545 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g); 546 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g); 547 pdev_cap->nss_ratio_enabled = 548 WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio); 549 pdev_cap->nss_ratio_info = 550 WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio); 551 } else { 552 return -EINVAL; 553 } 554 555 /* tx/rx chainmask reported from fw depends on the actual hw chains used, 556 * For example, for 4x4 capable macphys, first 4 chains can be used for first 557 * mac and the remaining 4 chains can be used for the second mac or vice-versa. 558 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 559 * will be advertised for second mac or vice-versa. Compute the shift value 560 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to 561 * mac80211. 562 */ 563 pdev_cap->tx_chain_mask_shift = 564 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); 565 pdev_cap->rx_chain_mask_shift = 566 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); 567 568 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) { 569 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 570 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 571 cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g); 572 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g); 573 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g); 574 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext); 575 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g); 576 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 577 cap_band->he_cap_phy_info[i] = 578 le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]); 579 580 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1); 581 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info); 582 583 for (i = 0; i < WMI_MAX_NUM_SS; i++) 584 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 585 le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]); 586 } 587 588 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) { 589 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 590 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 591 cap_band->max_bw_supported = 592 le32_to_cpu(mac_caps->max_bw_supported_5g); 593 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 594 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 595 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 596 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 597 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 598 cap_band->he_cap_phy_info[i] = 599 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 600 601 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 602 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 603 604 for (i = 0; i < WMI_MAX_NUM_SS; i++) 605 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 606 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 607 608 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; 609 cap_band->max_bw_supported = 610 le32_to_cpu(mac_caps->max_bw_supported_5g); 611 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 612 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 613 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 614 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 615 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 616 cap_band->he_cap_phy_info[i] = 617 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 618 619 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 620 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 621 622 for (i = 0; i < WMI_MAX_NUM_SS; i++) 623 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 624 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 625 } 626 627 return 0; 628 } 629 630 static int 631 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle, 632 const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps, 633 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps, 634 u8 phy_idx, 635 struct ath12k_wmi_hal_reg_capabilities_ext_arg *param) 636 { 637 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap; 638 639 if (!reg_caps || !ext_caps) 640 return -EINVAL; 641 642 if (phy_idx >= le32_to_cpu(reg_caps->num_phy)) 643 return -EINVAL; 644 645 ext_reg_cap = &ext_caps[phy_idx]; 646 647 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id); 648 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain); 649 param->eeprom_reg_domain_ext = 650 le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext); 651 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1); 652 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2); 653 /* check if param->wireless_mode is needed */ 654 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan); 655 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan); 656 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan); 657 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan); 658 659 return 0; 660 } 661 662 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab, 663 const void *evt_buf, 664 struct ath12k_wmi_target_cap_arg *cap) 665 { 666 const struct wmi_service_ready_event *ev = evt_buf; 667 668 if (!ev) { 669 ath12k_err(ab, "%s: failed by NULL param\n", 670 __func__); 671 return -EINVAL; 672 } 673 674 cap->phy_capability = le32_to_cpu(ev->phy_capability); 675 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry); 676 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains); 677 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info); 678 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info); 679 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs); 680 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power); 681 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power); 682 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info); 683 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable); 684 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size); 685 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels); 686 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs); 687 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps); 688 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask); 689 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index); 690 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc); 691 692 return 0; 693 } 694 695 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in 696 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each 697 * 4-byte word. 698 */ 699 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi, 700 const u32 *wmi_svc_bm) 701 { 702 int i, j; 703 704 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { 705 do { 706 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) 707 set_bit(j, wmi->wmi_ab->svc_map); 708 } while (++j % WMI_SERVICE_BITS_IN_SIZE32); 709 } 710 } 711 712 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 713 const void *ptr, void *data) 714 { 715 struct ath12k_wmi_svc_ready_parse *svc_ready = data; 716 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 717 u16 expect_len; 718 719 switch (tag) { 720 case WMI_TAG_SERVICE_READY_EVENT: 721 if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) 722 return -EINVAL; 723 break; 724 725 case WMI_TAG_ARRAY_UINT32: 726 if (!svc_ready->wmi_svc_bitmap_done) { 727 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); 728 if (len < expect_len) { 729 ath12k_warn(ab, "invalid len %d for the tag 0x%x\n", 730 len, tag); 731 return -EINVAL; 732 } 733 734 ath12k_wmi_service_bitmap_copy(wmi_handle, ptr); 735 736 svc_ready->wmi_svc_bitmap_done = true; 737 } 738 break; 739 default: 740 break; 741 } 742 743 return 0; 744 } 745 746 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 747 { 748 struct ath12k_wmi_svc_ready_parse svc_ready = { }; 749 int ret; 750 751 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 752 ath12k_wmi_svc_rdy_parse, 753 &svc_ready); 754 if (ret) { 755 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 756 return ret; 757 } 758 759 return 0; 760 } 761 762 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar, 763 struct ieee80211_tx_info *info) 764 { 765 struct ath12k_base *ab = ar->ab; 766 u32 freq = 0; 767 768 if (ab->hw_params->single_pdev_only && 769 ar->scan.is_roc && 770 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 771 freq = ar->scan.roc_freq; 772 773 return freq; 774 } 775 776 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len) 777 { 778 struct sk_buff *skb; 779 struct ath12k_base *ab = wmi_ab->ab; 780 u32 round_len = roundup(len, 4); 781 782 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); 783 if (!skb) 784 return NULL; 785 786 skb_reserve(skb, WMI_SKB_HEADROOM); 787 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 788 ath12k_warn(ab, "unaligned WMI skb data\n"); 789 790 skb_put(skb, round_len); 791 memset(skb->data, 0, round_len); 792 793 return skb; 794 } 795 796 int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id, 797 struct sk_buff *frame) 798 { 799 struct ath12k *ar = arvif->ar; 800 struct ath12k_wmi_pdev *wmi = ar->wmi; 801 struct wmi_mgmt_send_cmd *cmd; 802 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); 803 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data; 804 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 805 int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params); 806 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; 807 struct ath12k_wmi_mlo_mgmt_send_params *ml_params; 808 struct ath12k_base *ab = ar->ab; 809 struct wmi_tlv *frame_tlv, *tlv; 810 struct ath12k_skb_cb *skb_cb; 811 u32 buf_len, buf_len_aligned; 812 u32 vdev_id = arvif->vdev_id; 813 bool link_agnostic = false; 814 struct sk_buff *skb; 815 int ret, len; 816 void *ptr; 817 818 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN); 819 820 buf_len_aligned = roundup(buf_len, sizeof(u32)); 821 822 len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 823 824 if (ieee80211_vif_is_mld(vif)) { 825 skb_cb = ATH12K_SKB_CB(frame); 826 if ((skb_cb->flags & ATH12K_SKB_MLO_STA) && 827 ab->hw_params->hw_ops->is_frame_link_agnostic && 828 ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) { 829 len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params); 830 ath12k_generic_dbg(ATH12K_DBG_MGMT, 831 "Sending Mgmt Frame fc 0x%0x as link agnostic", 832 mgmt->frame_control); 833 link_agnostic = true; 834 } 835 } 836 837 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 838 if (!skb) 839 return -ENOMEM; 840 841 cmd = (struct wmi_mgmt_send_cmd *)skb->data; 842 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD, 843 sizeof(*cmd)); 844 cmd->vdev_id = cpu_to_le32(vdev_id); 845 cmd->desc_id = cpu_to_le32(buf_id); 846 cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info)); 847 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr)); 848 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr)); 849 cmd->frame_len = cpu_to_le32(frame->len); 850 cmd->buf_len = cpu_to_le32(buf_len); 851 cmd->tx_params_valid = 0; 852 853 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 854 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned); 855 856 memcpy(frame_tlv->value, frame->data, buf_len); 857 858 if (!link_agnostic) 859 goto send; 860 861 ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 862 863 tlv = ptr; 864 865 /* Tx params not used currently */ 866 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len); 867 ptr += cmd_len; 868 869 tlv = ptr; 870 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params)); 871 ptr += TLV_HDR_SIZE; 872 873 ml_params = ptr; 874 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS, 875 sizeof(*ml_params)); 876 877 ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID); 878 879 send: 880 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); 881 if (ret) { 882 ath12k_warn(ar->ab, 883 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); 884 dev_kfree_skb(skb); 885 } 886 887 return ret; 888 } 889 890 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, 891 u32 vdev_id, u32 pdev_id) 892 { 893 struct ath12k_wmi_pdev *wmi = ar->wmi; 894 struct wmi_request_stats_cmd *cmd; 895 struct sk_buff *skb; 896 int ret; 897 898 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 899 if (!skb) 900 return -ENOMEM; 901 902 cmd = (struct wmi_request_stats_cmd *)skb->data; 903 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD, 904 sizeof(*cmd)); 905 906 cmd->stats_id = cpu_to_le32(stats_id); 907 cmd->vdev_id = cpu_to_le32(vdev_id); 908 cmd->pdev_id = cpu_to_le32(pdev_id); 909 910 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); 911 if (ret) { 912 ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); 913 dev_kfree_skb(skb); 914 } 915 916 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 917 "WMI request stats 0x%x vdev id %d pdev id %d\n", 918 stats_id, vdev_id, pdev_id); 919 920 return ret; 921 } 922 923 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, 924 struct ath12k_wmi_vdev_create_arg *args) 925 { 926 struct ath12k_wmi_pdev *wmi = ar->wmi; 927 struct wmi_vdev_create_cmd *cmd; 928 struct sk_buff *skb; 929 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; 930 bool is_ml_vdev = is_valid_ether_addr(args->mld_addr); 931 struct wmi_vdev_create_mlo_params *ml_params; 932 struct wmi_tlv *tlv; 933 int ret, len; 934 void *ptr; 935 936 /* It can be optimized my sending tx/rx chain configuration 937 * only for supported bands instead of always sending it for 938 * both the bands. 939 */ 940 len = sizeof(*cmd) + TLV_HDR_SIZE + 941 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) + 942 (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0); 943 944 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 945 if (!skb) 946 return -ENOMEM; 947 948 cmd = (struct wmi_vdev_create_cmd *)skb->data; 949 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD, 950 sizeof(*cmd)); 951 952 cmd->vdev_id = cpu_to_le32(args->if_id); 953 cmd->vdev_type = cpu_to_le32(args->type); 954 cmd->vdev_subtype = cpu_to_le32(args->subtype); 955 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); 956 cmd->pdev_id = cpu_to_le32(args->pdev_id); 957 cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags); 958 cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id); 959 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); 960 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); 961 962 if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID) 963 cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0)); 964 965 ptr = skb->data + sizeof(*cmd); 966 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 967 968 tlv = ptr; 969 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 970 971 ptr += TLV_HDR_SIZE; 972 txrx_streams = ptr; 973 len = sizeof(*txrx_streams); 974 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 975 len); 976 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G); 977 txrx_streams->supported_tx_streams = 978 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx); 979 txrx_streams->supported_rx_streams = 980 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx); 981 982 txrx_streams++; 983 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 984 len); 985 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G); 986 txrx_streams->supported_tx_streams = 987 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx); 988 txrx_streams->supported_rx_streams = 989 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx); 990 991 ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 992 993 if (is_ml_vdev) { 994 tlv = ptr; 995 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 996 sizeof(*ml_params)); 997 ptr += TLV_HDR_SIZE; 998 ml_params = ptr; 999 1000 ml_params->tlv_header = 1001 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS, 1002 sizeof(*ml_params)); 1003 ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr); 1004 } 1005 1006 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1007 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", 1008 args->if_id, args->type, args->subtype, 1009 macaddr, args->pdev_id); 1010 1011 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); 1012 if (ret) { 1013 ath12k_warn(ar->ab, 1014 "failed to submit WMI_VDEV_CREATE_CMDID\n"); 1015 dev_kfree_skb(skb); 1016 } 1017 1018 return ret; 1019 } 1020 1021 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id) 1022 { 1023 struct ath12k_wmi_pdev *wmi = ar->wmi; 1024 struct wmi_vdev_delete_cmd *cmd; 1025 struct sk_buff *skb; 1026 int ret; 1027 1028 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1029 if (!skb) 1030 return -ENOMEM; 1031 1032 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 1033 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD, 1034 sizeof(*cmd)); 1035 cmd->vdev_id = cpu_to_le32(vdev_id); 1036 1037 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); 1038 1039 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); 1040 if (ret) { 1041 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); 1042 dev_kfree_skb(skb); 1043 } 1044 1045 return ret; 1046 } 1047 1048 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id) 1049 { 1050 struct ath12k_wmi_pdev *wmi = ar->wmi; 1051 struct wmi_vdev_stop_cmd *cmd; 1052 struct sk_buff *skb; 1053 int ret; 1054 1055 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1056 if (!skb) 1057 return -ENOMEM; 1058 1059 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 1060 1061 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD, 1062 sizeof(*cmd)); 1063 cmd->vdev_id = cpu_to_le32(vdev_id); 1064 1065 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id); 1066 1067 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); 1068 if (ret) { 1069 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); 1070 dev_kfree_skb(skb); 1071 } 1072 1073 return ret; 1074 } 1075 1076 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id) 1077 { 1078 struct ath12k_wmi_pdev *wmi = ar->wmi; 1079 struct wmi_vdev_down_cmd *cmd; 1080 struct sk_buff *skb; 1081 int ret; 1082 1083 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1084 if (!skb) 1085 return -ENOMEM; 1086 1087 cmd = (struct wmi_vdev_down_cmd *)skb->data; 1088 1089 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD, 1090 sizeof(*cmd)); 1091 cmd->vdev_id = cpu_to_le32(vdev_id); 1092 1093 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id); 1094 1095 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); 1096 if (ret) { 1097 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); 1098 dev_kfree_skb(skb); 1099 } 1100 1101 return ret; 1102 } 1103 1104 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, 1105 struct wmi_vdev_start_req_arg *arg) 1106 { 1107 u32 center_freq1 = arg->band_center_freq1; 1108 1109 memset(chan, 0, sizeof(*chan)); 1110 1111 chan->mhz = cpu_to_le32(arg->freq); 1112 chan->band_center_freq1 = cpu_to_le32(center_freq1); 1113 if (arg->mode == MODE_11BE_EHT320) { 1114 if (arg->freq > center_freq1) 1115 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80); 1116 else 1117 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80); 1118 1119 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1120 1121 } else if (arg->mode == MODE_11BE_EHT160 || 1122 arg->mode == MODE_11AX_HE160) { 1123 if (arg->freq > center_freq1) 1124 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40); 1125 else 1126 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40); 1127 1128 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1129 } else { 1130 chan->band_center_freq2 = 0; 1131 } 1132 1133 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE); 1134 if (arg->passive) 1135 chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 1136 if (arg->allow_ibss) 1137 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED); 1138 if (arg->allow_ht) 1139 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 1140 if (arg->allow_vht) 1141 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 1142 if (arg->allow_he) 1143 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 1144 if (arg->ht40plus) 1145 chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS); 1146 if (arg->chan_radar) 1147 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 1148 if (arg->freq2_radar) 1149 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2); 1150 1151 chan->reg_info_1 = le32_encode_bits(arg->max_power, 1152 WMI_CHAN_REG_INFO1_MAX_PWR) | 1153 le32_encode_bits(arg->max_reg_power, 1154 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 1155 1156 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain, 1157 WMI_CHAN_REG_INFO2_ANT_MAX) | 1158 le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR); 1159 } 1160 1161 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, 1162 bool restart) 1163 { 1164 struct wmi_vdev_start_mlo_params *ml_params; 1165 struct wmi_partner_link_info *partner_info; 1166 struct ath12k_wmi_pdev *wmi = ar->wmi; 1167 struct wmi_vdev_start_request_cmd *cmd; 1168 struct sk_buff *skb; 1169 struct ath12k_wmi_channel_params *chan; 1170 struct wmi_tlv *tlv; 1171 void *ptr; 1172 int ret, len, i, ml_arg_size = 0; 1173 1174 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 1175 return -EINVAL; 1176 1177 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; 1178 1179 if (!restart && arg->ml.enabled) { 1180 ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) + 1181 TLV_HDR_SIZE + (arg->ml.num_partner_links * 1182 sizeof(*partner_info)); 1183 len += ml_arg_size; 1184 } 1185 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1186 if (!skb) 1187 return -ENOMEM; 1188 1189 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 1190 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD, 1191 sizeof(*cmd)); 1192 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1193 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval); 1194 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate); 1195 cmd->dtim_period = cpu_to_le32(arg->dtim_period); 1196 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors); 1197 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams); 1198 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams); 1199 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms); 1200 cmd->regdomain = cpu_to_le32(arg->regdomain); 1201 cmd->he_ops = cpu_to_le32(arg->he_ops); 1202 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 1203 cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags); 1204 cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id); 1205 1206 if (!restart) { 1207 if (arg->ssid) { 1208 cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len); 1209 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 1210 } 1211 if (arg->hidden_ssid) 1212 cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID); 1213 if (arg->pmf_enabled) 1214 cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED); 1215 } 1216 1217 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED); 1218 1219 ptr = skb->data + sizeof(*cmd); 1220 chan = ptr; 1221 1222 ath12k_wmi_put_wmi_channel(chan, arg); 1223 1224 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 1225 sizeof(*chan)); 1226 ptr += sizeof(*chan); 1227 1228 tlv = ptr; 1229 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 1230 1231 /* Note: This is a nested TLV containing: 1232 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv].. 1233 */ 1234 1235 ptr += sizeof(*tlv); 1236 1237 if (ml_arg_size) { 1238 tlv = ptr; 1239 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1240 sizeof(*ml_params)); 1241 ptr += TLV_HDR_SIZE; 1242 1243 ml_params = ptr; 1244 1245 ml_params->tlv_header = 1246 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS, 1247 sizeof(*ml_params)); 1248 1249 ml_params->flags = le32_encode_bits(arg->ml.enabled, 1250 ATH12K_WMI_FLAG_MLO_ENABLED) | 1251 le32_encode_bits(arg->ml.assoc_link, 1252 ATH12K_WMI_FLAG_MLO_ASSOC_LINK) | 1253 le32_encode_bits(arg->ml.mcast_link, 1254 ATH12K_WMI_FLAG_MLO_MCAST_VDEV) | 1255 le32_encode_bits(arg->ml.link_add, 1256 ATH12K_WMI_FLAG_MLO_LINK_ADD); 1257 1258 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n", 1259 arg->vdev_id, ml_params->flags); 1260 1261 ptr += sizeof(*ml_params); 1262 1263 tlv = ptr; 1264 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1265 arg->ml.num_partner_links * 1266 sizeof(*partner_info)); 1267 ptr += TLV_HDR_SIZE; 1268 1269 partner_info = ptr; 1270 1271 for (i = 0; i < arg->ml.num_partner_links; i++) { 1272 partner_info->tlv_header = 1273 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS, 1274 sizeof(*partner_info)); 1275 partner_info->vdev_id = 1276 cpu_to_le32(arg->ml.partner_info[i].vdev_id); 1277 partner_info->hw_link_id = 1278 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 1279 ether_addr_copy(partner_info->vdev_addr.addr, 1280 arg->ml.partner_info[i].addr); 1281 1282 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n", 1283 partner_info->vdev_id, partner_info->hw_link_id, 1284 partner_info->vdev_addr.addr); 1285 1286 partner_info++; 1287 } 1288 1289 ptr = partner_info; 1290 } 1291 1292 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", 1293 restart ? "restart" : "start", arg->vdev_id, 1294 arg->freq, arg->mode); 1295 1296 if (restart) 1297 ret = ath12k_wmi_cmd_send(wmi, skb, 1298 WMI_VDEV_RESTART_REQUEST_CMDID); 1299 else 1300 ret = ath12k_wmi_cmd_send(wmi, skb, 1301 WMI_VDEV_START_REQUEST_CMDID); 1302 if (ret) { 1303 ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n", 1304 restart ? "restart" : "start"); 1305 dev_kfree_skb(skb); 1306 } 1307 1308 return ret; 1309 } 1310 1311 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params) 1312 { 1313 struct ath12k_wmi_pdev *wmi = ar->wmi; 1314 struct wmi_vdev_up_cmd *cmd; 1315 struct sk_buff *skb; 1316 int ret; 1317 1318 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1319 if (!skb) 1320 return -ENOMEM; 1321 1322 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1323 1324 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, 1325 sizeof(*cmd)); 1326 cmd->vdev_id = cpu_to_le32(params->vdev_id); 1327 cmd->vdev_assoc_id = cpu_to_le32(params->aid); 1328 1329 ether_addr_copy(cmd->vdev_bssid.addr, params->bssid); 1330 1331 if (params->tx_bssid) { 1332 ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid); 1333 cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx); 1334 cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt); 1335 } 1336 1337 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1338 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 1339 params->vdev_id, params->aid, params->bssid); 1340 1341 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); 1342 if (ret) { 1343 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); 1344 dev_kfree_skb(skb); 1345 } 1346 1347 return ret; 1348 } 1349 1350 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, 1351 struct ath12k_wmi_peer_create_arg *arg) 1352 { 1353 struct ath12k_wmi_pdev *wmi = ar->wmi; 1354 struct wmi_peer_create_cmd *cmd; 1355 struct sk_buff *skb; 1356 int ret, len; 1357 struct wmi_peer_create_mlo_params *ml_param; 1358 void *ptr; 1359 struct wmi_tlv *tlv; 1360 1361 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param); 1362 1363 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1364 if (!skb) 1365 return -ENOMEM; 1366 1367 cmd = (struct wmi_peer_create_cmd *)skb->data; 1368 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD, 1369 sizeof(*cmd)); 1370 1371 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr); 1372 cmd->peer_type = cpu_to_le32(arg->peer_type); 1373 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1374 1375 ptr = skb->data + sizeof(*cmd); 1376 tlv = ptr; 1377 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1378 sizeof(*ml_param)); 1379 ptr += TLV_HDR_SIZE; 1380 ml_param = ptr; 1381 ml_param->tlv_header = 1382 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS, 1383 sizeof(*ml_param)); 1384 if (arg->ml_enabled) 1385 ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 1386 1387 ptr += sizeof(*ml_param); 1388 1389 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1390 "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n", 1391 arg->vdev_id, arg->peer_addr, ml_param->flags); 1392 1393 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); 1394 if (ret) { 1395 ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); 1396 dev_kfree_skb(skb); 1397 } 1398 1399 return ret; 1400 } 1401 1402 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar, 1403 const u8 *peer_addr, u8 vdev_id) 1404 { 1405 struct ath12k_wmi_pdev *wmi = ar->wmi; 1406 struct wmi_peer_delete_cmd *cmd; 1407 struct sk_buff *skb; 1408 int ret; 1409 1410 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1411 if (!skb) 1412 return -ENOMEM; 1413 1414 cmd = (struct wmi_peer_delete_cmd *)skb->data; 1415 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD, 1416 sizeof(*cmd)); 1417 1418 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1419 cmd->vdev_id = cpu_to_le32(vdev_id); 1420 1421 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1422 "WMI peer delete vdev_id %d peer_addr %pM\n", 1423 vdev_id, peer_addr); 1424 1425 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); 1426 if (ret) { 1427 ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); 1428 dev_kfree_skb(skb); 1429 } 1430 1431 return ret; 1432 } 1433 1434 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar, 1435 struct ath12k_wmi_pdev_set_regdomain_arg *arg) 1436 { 1437 struct ath12k_wmi_pdev *wmi = ar->wmi; 1438 struct wmi_pdev_set_regdomain_cmd *cmd; 1439 struct sk_buff *skb; 1440 int ret; 1441 1442 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1443 if (!skb) 1444 return -ENOMEM; 1445 1446 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 1447 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD, 1448 sizeof(*cmd)); 1449 1450 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use); 1451 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g); 1452 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g); 1453 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g); 1454 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g); 1455 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain); 1456 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 1457 1458 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1459 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", 1460 arg->current_rd_in_use, arg->current_rd_2g, 1461 arg->current_rd_5g, arg->dfs_domain, arg->pdev_id); 1462 1463 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 1464 if (ret) { 1465 ath12k_warn(ar->ab, 1466 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); 1467 dev_kfree_skb(skb); 1468 } 1469 1470 return ret; 1471 } 1472 1473 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr, 1474 u32 vdev_id, u32 param_id, u32 param_val) 1475 { 1476 struct ath12k_wmi_pdev *wmi = ar->wmi; 1477 struct wmi_peer_set_param_cmd *cmd; 1478 struct sk_buff *skb; 1479 int ret; 1480 1481 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1482 if (!skb) 1483 return -ENOMEM; 1484 1485 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 1486 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD, 1487 sizeof(*cmd)); 1488 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1489 cmd->vdev_id = cpu_to_le32(vdev_id); 1490 cmd->param_id = cpu_to_le32(param_id); 1491 cmd->param_value = cpu_to_le32(param_val); 1492 1493 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1494 "WMI vdev %d peer 0x%pM set param %d value %d\n", 1495 vdev_id, peer_addr, param_id, param_val); 1496 1497 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); 1498 if (ret) { 1499 ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); 1500 dev_kfree_skb(skb); 1501 } 1502 1503 return ret; 1504 } 1505 1506 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, 1507 u8 peer_addr[ETH_ALEN], 1508 u32 peer_tid_bitmap, 1509 u8 vdev_id) 1510 { 1511 struct ath12k_wmi_pdev *wmi = ar->wmi; 1512 struct wmi_peer_flush_tids_cmd *cmd; 1513 struct sk_buff *skb; 1514 int ret; 1515 1516 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1517 if (!skb) 1518 return -ENOMEM; 1519 1520 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 1521 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD, 1522 sizeof(*cmd)); 1523 1524 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1525 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap); 1526 cmd->vdev_id = cpu_to_le32(vdev_id); 1527 1528 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1529 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n", 1530 vdev_id, peer_addr, peer_tid_bitmap); 1531 1532 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); 1533 if (ret) { 1534 ath12k_warn(ar->ab, 1535 "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); 1536 dev_kfree_skb(skb); 1537 } 1538 1539 return ret; 1540 } 1541 1542 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar, 1543 int vdev_id, const u8 *addr, 1544 dma_addr_t paddr, u8 tid, 1545 u8 ba_window_size_valid, 1546 u32 ba_window_size) 1547 { 1548 struct wmi_peer_reorder_queue_setup_cmd *cmd; 1549 struct sk_buff *skb; 1550 int ret; 1551 1552 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 1553 if (!skb) 1554 return -ENOMEM; 1555 1556 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; 1557 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD, 1558 sizeof(*cmd)); 1559 1560 ether_addr_copy(cmd->peer_macaddr.addr, addr); 1561 cmd->vdev_id = cpu_to_le32(vdev_id); 1562 cmd->tid = cpu_to_le32(tid); 1563 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr)); 1564 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr)); 1565 cmd->queue_no = cpu_to_le32(tid); 1566 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid); 1567 cmd->ba_window_size = cpu_to_le32(ba_window_size); 1568 1569 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1570 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n", 1571 addr, vdev_id, tid); 1572 1573 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 1574 WMI_PEER_REORDER_QUEUE_SETUP_CMDID); 1575 if (ret) { 1576 ath12k_warn(ar->ab, 1577 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); 1578 dev_kfree_skb(skb); 1579 } 1580 1581 return ret; 1582 } 1583 1584 int 1585 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar, 1586 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg) 1587 { 1588 struct ath12k_wmi_pdev *wmi = ar->wmi; 1589 struct wmi_peer_reorder_queue_remove_cmd *cmd; 1590 struct sk_buff *skb; 1591 int ret; 1592 1593 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1594 if (!skb) 1595 return -ENOMEM; 1596 1597 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; 1598 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD, 1599 sizeof(*cmd)); 1600 1601 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr); 1602 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1603 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap); 1604 1605 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1606 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, 1607 arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap); 1608 1609 ret = ath12k_wmi_cmd_send(wmi, skb, 1610 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); 1611 if (ret) { 1612 ath12k_warn(ar->ab, 1613 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); 1614 dev_kfree_skb(skb); 1615 } 1616 1617 return ret; 1618 } 1619 1620 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id, 1621 u32 param_value, u8 pdev_id) 1622 { 1623 struct ath12k_wmi_pdev *wmi = ar->wmi; 1624 struct wmi_pdev_set_param_cmd *cmd; 1625 struct sk_buff *skb; 1626 int ret; 1627 1628 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1629 if (!skb) 1630 return -ENOMEM; 1631 1632 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 1633 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD, 1634 sizeof(*cmd)); 1635 cmd->pdev_id = cpu_to_le32(pdev_id); 1636 cmd->param_id = cpu_to_le32(param_id); 1637 cmd->param_value = cpu_to_le32(param_value); 1638 1639 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1640 "WMI pdev set param %d pdev id %d value %d\n", 1641 param_id, pdev_id, param_value); 1642 1643 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); 1644 if (ret) { 1645 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1646 dev_kfree_skb(skb); 1647 } 1648 1649 return ret; 1650 } 1651 1652 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable) 1653 { 1654 struct ath12k_wmi_pdev *wmi = ar->wmi; 1655 struct wmi_pdev_set_ps_mode_cmd *cmd; 1656 struct sk_buff *skb; 1657 int ret; 1658 1659 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1660 if (!skb) 1661 return -ENOMEM; 1662 1663 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; 1664 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD, 1665 sizeof(*cmd)); 1666 cmd->vdev_id = cpu_to_le32(vdev_id); 1667 cmd->sta_ps_mode = cpu_to_le32(enable); 1668 1669 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1670 "WMI vdev set psmode %d vdev id %d\n", 1671 enable, vdev_id); 1672 1673 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); 1674 if (ret) { 1675 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1676 dev_kfree_skb(skb); 1677 } 1678 1679 return ret; 1680 } 1681 1682 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt, 1683 u32 pdev_id) 1684 { 1685 struct ath12k_wmi_pdev *wmi = ar->wmi; 1686 struct wmi_pdev_suspend_cmd *cmd; 1687 struct sk_buff *skb; 1688 int ret; 1689 1690 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1691 if (!skb) 1692 return -ENOMEM; 1693 1694 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 1695 1696 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD, 1697 sizeof(*cmd)); 1698 1699 cmd->suspend_opt = cpu_to_le32(suspend_opt); 1700 cmd->pdev_id = cpu_to_le32(pdev_id); 1701 1702 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1703 "WMI pdev suspend pdev_id %d\n", pdev_id); 1704 1705 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); 1706 if (ret) { 1707 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); 1708 dev_kfree_skb(skb); 1709 } 1710 1711 return ret; 1712 } 1713 1714 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id) 1715 { 1716 struct ath12k_wmi_pdev *wmi = ar->wmi; 1717 struct wmi_pdev_resume_cmd *cmd; 1718 struct sk_buff *skb; 1719 int ret; 1720 1721 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1722 if (!skb) 1723 return -ENOMEM; 1724 1725 cmd = (struct wmi_pdev_resume_cmd *)skb->data; 1726 1727 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD, 1728 sizeof(*cmd)); 1729 cmd->pdev_id = cpu_to_le32(pdev_id); 1730 1731 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1732 "WMI pdev resume pdev id %d\n", pdev_id); 1733 1734 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); 1735 if (ret) { 1736 ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); 1737 dev_kfree_skb(skb); 1738 } 1739 1740 return ret; 1741 } 1742 1743 /* TODO FW Support for the cmd is not available yet. 1744 * Can be tested once the command and corresponding 1745 * event is implemented in FW 1746 */ 1747 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, 1748 enum wmi_bss_chan_info_req_type type) 1749 { 1750 struct ath12k_wmi_pdev *wmi = ar->wmi; 1751 struct wmi_pdev_bss_chan_info_req_cmd *cmd; 1752 struct sk_buff *skb; 1753 int ret; 1754 1755 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1756 if (!skb) 1757 return -ENOMEM; 1758 1759 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; 1760 1761 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, 1762 sizeof(*cmd)); 1763 cmd->req_type = cpu_to_le32(type); 1764 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1765 1766 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1767 "WMI bss chan info req type %d\n", type); 1768 1769 ret = ath12k_wmi_cmd_send(wmi, skb, 1770 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); 1771 if (ret) { 1772 ath12k_warn(ar->ab, 1773 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); 1774 dev_kfree_skb(skb); 1775 } 1776 1777 return ret; 1778 } 1779 1780 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr, 1781 struct ath12k_wmi_ap_ps_arg *arg) 1782 { 1783 struct ath12k_wmi_pdev *wmi = ar->wmi; 1784 struct wmi_ap_ps_peer_cmd *cmd; 1785 struct sk_buff *skb; 1786 int ret; 1787 1788 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1789 if (!skb) 1790 return -ENOMEM; 1791 1792 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 1793 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD, 1794 sizeof(*cmd)); 1795 1796 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1797 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1798 cmd->param = cpu_to_le32(arg->param); 1799 cmd->value = cpu_to_le32(arg->value); 1800 1801 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1802 "WMI set ap ps vdev id %d peer %pM param %d value %d\n", 1803 arg->vdev_id, peer_addr, arg->param, arg->value); 1804 1805 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); 1806 if (ret) { 1807 ath12k_warn(ar->ab, 1808 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); 1809 dev_kfree_skb(skb); 1810 } 1811 1812 return ret; 1813 } 1814 1815 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id, 1816 u32 param, u32 param_value) 1817 { 1818 struct ath12k_wmi_pdev *wmi = ar->wmi; 1819 struct wmi_sta_powersave_param_cmd *cmd; 1820 struct sk_buff *skb; 1821 int ret; 1822 1823 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1824 if (!skb) 1825 return -ENOMEM; 1826 1827 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 1828 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD, 1829 sizeof(*cmd)); 1830 1831 cmd->vdev_id = cpu_to_le32(vdev_id); 1832 cmd->param = cpu_to_le32(param); 1833 cmd->value = cpu_to_le32(param_value); 1834 1835 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1836 "WMI set sta ps vdev_id %d param %d value %d\n", 1837 vdev_id, param, param_value); 1838 1839 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 1840 if (ret) { 1841 ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); 1842 dev_kfree_skb(skb); 1843 } 1844 1845 return ret; 1846 } 1847 1848 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms) 1849 { 1850 struct ath12k_wmi_pdev *wmi = ar->wmi; 1851 struct wmi_force_fw_hang_cmd *cmd; 1852 struct sk_buff *skb; 1853 int ret, len; 1854 1855 len = sizeof(*cmd); 1856 1857 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1858 if (!skb) 1859 return -ENOMEM; 1860 1861 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 1862 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD, 1863 len); 1864 1865 cmd->type = cpu_to_le32(type); 1866 cmd->delay_time_ms = cpu_to_le32(delay_time_ms); 1867 1868 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); 1869 1870 if (ret) { 1871 ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); 1872 dev_kfree_skb(skb); 1873 } 1874 return ret; 1875 } 1876 1877 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id, 1878 u32 param_id, u32 param_value) 1879 { 1880 struct ath12k_wmi_pdev *wmi = ar->wmi; 1881 struct wmi_vdev_set_param_cmd *cmd; 1882 struct sk_buff *skb; 1883 int ret; 1884 1885 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1886 if (!skb) 1887 return -ENOMEM; 1888 1889 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 1890 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD, 1891 sizeof(*cmd)); 1892 1893 cmd->vdev_id = cpu_to_le32(vdev_id); 1894 cmd->param_id = cpu_to_le32(param_id); 1895 cmd->param_value = cpu_to_le32(param_value); 1896 1897 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1898 "WMI vdev id 0x%x set param %d value %d\n", 1899 vdev_id, param_id, param_value); 1900 1901 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); 1902 if (ret) { 1903 ath12k_warn(ar->ab, 1904 "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); 1905 dev_kfree_skb(skb); 1906 } 1907 1908 return ret; 1909 } 1910 1911 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar) 1912 { 1913 struct ath12k_wmi_pdev *wmi = ar->wmi; 1914 struct wmi_get_pdev_temperature_cmd *cmd; 1915 struct sk_buff *skb; 1916 int ret; 1917 1918 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1919 if (!skb) 1920 return -ENOMEM; 1921 1922 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; 1923 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD, 1924 sizeof(*cmd)); 1925 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1926 1927 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1928 "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); 1929 1930 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); 1931 if (ret) { 1932 ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); 1933 dev_kfree_skb(skb); 1934 } 1935 1936 return ret; 1937 } 1938 1939 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, 1940 u32 vdev_id, u32 bcn_ctrl_op) 1941 { 1942 struct ath12k_wmi_pdev *wmi = ar->wmi; 1943 struct wmi_bcn_offload_ctrl_cmd *cmd; 1944 struct sk_buff *skb; 1945 int ret; 1946 1947 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1948 if (!skb) 1949 return -ENOMEM; 1950 1951 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; 1952 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD, 1953 sizeof(*cmd)); 1954 1955 cmd->vdev_id = cpu_to_le32(vdev_id); 1956 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op); 1957 1958 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1959 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n", 1960 vdev_id, bcn_ctrl_op); 1961 1962 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); 1963 if (ret) { 1964 ath12k_warn(ar->ab, 1965 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); 1966 dev_kfree_skb(skb); 1967 } 1968 1969 return ret; 1970 } 1971 1972 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, 1973 const u8 *p2p_ie) 1974 { 1975 struct ath12k_wmi_pdev *wmi = ar->wmi; 1976 struct wmi_p2p_go_set_beacon_ie_cmd *cmd; 1977 size_t p2p_ie_len, aligned_len; 1978 struct wmi_tlv *tlv; 1979 struct sk_buff *skb; 1980 void *ptr; 1981 int ret, len; 1982 1983 p2p_ie_len = p2p_ie[1] + 2; 1984 aligned_len = roundup(p2p_ie_len, sizeof(u32)); 1985 1986 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 1987 1988 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1989 if (!skb) 1990 return -ENOMEM; 1991 1992 ptr = skb->data; 1993 cmd = ptr; 1994 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE, 1995 sizeof(*cmd)); 1996 cmd->vdev_id = cpu_to_le32(vdev_id); 1997 cmd->ie_buf_len = cpu_to_le32(p2p_ie_len); 1998 1999 ptr += sizeof(*cmd); 2000 tlv = ptr; 2001 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 2002 aligned_len); 2003 memcpy(tlv->value, p2p_ie, p2p_ie_len); 2004 2005 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); 2006 if (ret) { 2007 ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); 2008 dev_kfree_skb(skb); 2009 } 2010 2011 return ret; 2012 } 2013 2014 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, 2015 struct ieee80211_mutable_offsets *offs, 2016 struct sk_buff *bcn, 2017 struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) 2018 { 2019 struct ath12k *ar = arvif->ar; 2020 struct ath12k_wmi_pdev *wmi = ar->wmi; 2021 struct ath12k_base *ab = ar->ab; 2022 struct wmi_bcn_tmpl_cmd *cmd; 2023 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; 2024 struct ath12k_vif *ahvif = arvif->ahvif; 2025 struct ieee80211_bss_conf *conf; 2026 u32 vdev_id = arvif->vdev_id; 2027 struct wmi_tlv *tlv; 2028 struct sk_buff *skb; 2029 u32 ema_params = 0; 2030 void *ptr; 2031 int ret, len; 2032 size_t aligned_len = roundup(bcn->len, 4); 2033 2034 conf = ath12k_mac_get_link_bss_conf(arvif); 2035 if (!conf) { 2036 ath12k_warn(ab, 2037 "unable to access bss link conf in beacon template command for vif %pM link %u\n", 2038 ahvif->vif->addr, arvif->link_id); 2039 return -EINVAL; 2040 } 2041 2042 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; 2043 2044 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2045 if (!skb) 2046 return -ENOMEM; 2047 2048 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; 2049 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD, 2050 sizeof(*cmd)); 2051 cmd->vdev_id = cpu_to_le32(vdev_id); 2052 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); 2053 2054 if (conf->csa_active) { 2055 cmd->csa_switch_count_offset = 2056 cpu_to_le32(offs->cntdwn_counter_offs[0]); 2057 cmd->ext_csa_switch_count_offset = 2058 cpu_to_le32(offs->cntdwn_counter_offs[1]); 2059 cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF); 2060 arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]]; 2061 } 2062 2063 cmd->buf_len = cpu_to_le32(bcn->len); 2064 cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); 2065 if (ema_args) { 2066 u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT); 2067 u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX); 2068 if (ema_args->bcn_index == 0) 2069 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST); 2070 if (ema_args->bcn_index + 1 == ema_args->bcn_cnt) 2071 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST); 2072 cmd->ema_params = cpu_to_le32(ema_params); 2073 } 2074 cmd->feature_enable_bitmap = 2075 cpu_to_le32(u32_encode_bits(arvif->beacon_prot, 2076 WMI_BEACON_PROTECTION_EN_BIT)); 2077 2078 ptr = skb->data + sizeof(*cmd); 2079 2080 bcn_prb_info = ptr; 2081 len = sizeof(*bcn_prb_info); 2082 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 2083 len); 2084 bcn_prb_info->caps = 0; 2085 bcn_prb_info->erp = 0; 2086 2087 ptr += sizeof(*bcn_prb_info); 2088 2089 tlv = ptr; 2090 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 2091 memcpy(tlv->value, bcn->data, bcn->len); 2092 2093 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); 2094 if (ret) { 2095 ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 2096 dev_kfree_skb(skb); 2097 } 2098 2099 return ret; 2100 } 2101 2102 int ath12k_wmi_vdev_install_key(struct ath12k *ar, 2103 struct wmi_vdev_install_key_arg *arg) 2104 { 2105 struct ath12k_wmi_pdev *wmi = ar->wmi; 2106 struct wmi_vdev_install_key_cmd *cmd; 2107 struct wmi_tlv *tlv; 2108 struct sk_buff *skb; 2109 int ret, len, key_len_aligned; 2110 2111 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key 2112 * length is specified in cmd->key_len. 2113 */ 2114 key_len_aligned = roundup(arg->key_len, 4); 2115 2116 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; 2117 2118 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2119 if (!skb) 2120 return -ENOMEM; 2121 2122 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 2123 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD, 2124 sizeof(*cmd)); 2125 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2126 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 2127 cmd->key_idx = cpu_to_le32(arg->key_idx); 2128 cmd->key_flags = cpu_to_le32(arg->key_flags); 2129 cmd->key_cipher = cpu_to_le32(arg->key_cipher); 2130 cmd->key_len = cpu_to_le32(arg->key_len); 2131 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len); 2132 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len); 2133 2134 if (arg->key_rsc_counter) 2135 cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter); 2136 2137 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 2138 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned); 2139 memcpy(tlv->value, arg->key_data, arg->key_len); 2140 2141 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2142 "WMI vdev install key idx %d cipher %d len %d\n", 2143 arg->key_idx, arg->key_cipher, arg->key_len); 2144 2145 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); 2146 if (ret) { 2147 ath12k_warn(ar->ab, 2148 "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); 2149 dev_kfree_skb(skb); 2150 } 2151 2152 return ret; 2153 } 2154 2155 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, 2156 struct ath12k_wmi_peer_assoc_arg *arg, 2157 bool hw_crypto_disabled) 2158 { 2159 cmd->peer_flags = 0; 2160 cmd->peer_flags_ext = 0; 2161 2162 if (arg->is_wme_set) { 2163 if (arg->qos_flag) 2164 cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS); 2165 if (arg->apsd_flag) 2166 cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD); 2167 if (arg->ht_flag) 2168 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT); 2169 if (arg->bw_40) 2170 cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ); 2171 if (arg->bw_80) 2172 cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ); 2173 if (arg->bw_160) 2174 cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); 2175 if (arg->bw_320) 2176 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ); 2177 2178 /* Typically if STBC is enabled for VHT it should be enabled 2179 * for HT as well 2180 **/ 2181 if (arg->stbc_flag) 2182 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC); 2183 2184 /* Typically if LDPC is enabled for VHT it should be enabled 2185 * for HT as well 2186 **/ 2187 if (arg->ldpc_flag) 2188 cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC); 2189 2190 if (arg->static_mimops_flag) 2191 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS); 2192 if (arg->dynamic_mimops_flag) 2193 cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS); 2194 if (arg->spatial_mux_flag) 2195 cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX); 2196 if (arg->vht_flag) 2197 cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT); 2198 if (arg->he_flag) 2199 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE); 2200 if (arg->twt_requester) 2201 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ); 2202 if (arg->twt_responder) 2203 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP); 2204 if (arg->eht_flag) 2205 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT); 2206 } 2207 2208 /* Suppress authorization for all AUTH modes that need 4-way handshake 2209 * (during re-association). 2210 * Authorization will be done for these modes on key installation. 2211 */ 2212 if (arg->auth_flag) 2213 cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH); 2214 if (arg->need_ptk_4_way) { 2215 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY); 2216 if (!hw_crypto_disabled && arg->is_assoc) 2217 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH); 2218 } 2219 if (arg->need_gtk_2_way) 2220 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY); 2221 /* safe mode bypass the 4-way handshake */ 2222 if (arg->safe_mode_enabled) 2223 cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY | 2224 WMI_PEER_NEED_GTK_2_WAY)); 2225 2226 if (arg->is_pmf_enabled) 2227 cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF); 2228 2229 /* Disable AMSDU for station transmit, if user configures it */ 2230 /* Disable AMSDU for AP transmit to 11n Stations, if user configures 2231 * it 2232 * if (arg->amsdu_disable) Add after FW support 2233 **/ 2234 2235 /* Target asserts if node is marked HT and all MCS is set to 0. 2236 * Mark the node as non-HT if all the mcs rates are disabled through 2237 * iwpriv 2238 **/ 2239 if (arg->peer_ht_rates.num_rates == 0) 2240 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT); 2241 } 2242 2243 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, 2244 struct ath12k_wmi_peer_assoc_arg *arg) 2245 { 2246 struct ath12k_wmi_pdev *wmi = ar->wmi; 2247 struct wmi_peer_assoc_complete_cmd *cmd; 2248 struct ath12k_wmi_vht_rate_set_params *mcs; 2249 struct ath12k_wmi_he_rate_set_params *he_mcs; 2250 struct ath12k_wmi_eht_rate_set_params *eht_mcs; 2251 struct wmi_peer_assoc_mlo_params *ml_params; 2252 struct wmi_peer_assoc_mlo_partner_info_params *partner_info; 2253 struct sk_buff *skb; 2254 struct wmi_tlv *tlv; 2255 void *ptr; 2256 u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay; 2257 u32 peer_ht_rates_align, eml_trans_timeout; 2258 int i, ret, len; 2259 u16 eml_cap; 2260 __le32 v; 2261 2262 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, 2263 sizeof(u32)); 2264 peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates, 2265 sizeof(u32)); 2266 2267 len = sizeof(*cmd) + 2268 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + 2269 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + 2270 sizeof(*mcs) + TLV_HDR_SIZE + 2271 (sizeof(*he_mcs) * arg->peer_he_mcs_count) + 2272 TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count); 2273 2274 if (arg->ml.enabled) 2275 len += TLV_HDR_SIZE + sizeof(*ml_params) + 2276 TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info)); 2277 else 2278 len += (2 * TLV_HDR_SIZE); 2279 2280 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2281 if (!skb) 2282 return -ENOMEM; 2283 2284 ptr = skb->data; 2285 2286 cmd = ptr; 2287 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD, 2288 sizeof(*cmd)); 2289 2290 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2291 2292 cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc); 2293 cmd->peer_associd = cpu_to_le32(arg->peer_associd); 2294 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 2295 2296 ath12k_wmi_copy_peer_flags(cmd, arg, 2297 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, 2298 &ar->ab->dev_flags)); 2299 2300 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac); 2301 2302 cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps); 2303 cmd->peer_caps = cpu_to_le32(arg->peer_caps); 2304 cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval); 2305 cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps); 2306 cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu); 2307 cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density); 2308 cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps); 2309 cmd->peer_phymode = cpu_to_le32(arg->peer_phymode); 2310 2311 /* Update 11ax capabilities */ 2312 cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]); 2313 cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]); 2314 cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal); 2315 cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz); 2316 cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops); 2317 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 2318 cmd->peer_he_cap_phy[i] = 2319 cpu_to_le32(arg->peer_he_cap_phyinfo[i]); 2320 cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1); 2321 cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask); 2322 for (i = 0; i < WMI_MAX_NUM_SS; i++) 2323 cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] = 2324 cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]); 2325 2326 /* Update 11be capabilities */ 2327 memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac), 2328 arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac), 2329 0); 2330 memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy), 2331 arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy), 2332 0); 2333 memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet), 2334 &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0); 2335 2336 /* Update peer legacy rate information */ 2337 ptr += sizeof(*cmd); 2338 2339 tlv = ptr; 2340 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align); 2341 2342 ptr += TLV_HDR_SIZE; 2343 2344 cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates); 2345 memcpy(ptr, arg->peer_legacy_rates.rates, 2346 arg->peer_legacy_rates.num_rates); 2347 2348 /* Update peer HT rate information */ 2349 ptr += peer_legacy_rates_align; 2350 2351 tlv = ptr; 2352 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align); 2353 ptr += TLV_HDR_SIZE; 2354 cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates); 2355 memcpy(ptr, arg->peer_ht_rates.rates, 2356 arg->peer_ht_rates.num_rates); 2357 2358 /* VHT Rates */ 2359 ptr += peer_ht_rates_align; 2360 2361 mcs = ptr; 2362 2363 mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET, 2364 sizeof(*mcs)); 2365 2366 cmd->peer_nss = cpu_to_le32(arg->peer_nss); 2367 2368 /* Update bandwidth-NSS mapping */ 2369 cmd->peer_bw_rxnss_override = 0; 2370 cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override); 2371 2372 if (arg->vht_capable) { 2373 /* Firmware interprets mcs->tx_mcs_set field as peer's 2374 * RX capability 2375 */ 2376 mcs->rx_max_rate = cpu_to_le32(arg->tx_max_rate); 2377 mcs->rx_mcs_set = cpu_to_le32(arg->tx_mcs_set); 2378 mcs->tx_max_rate = cpu_to_le32(arg->rx_max_rate); 2379 mcs->tx_mcs_set = cpu_to_le32(arg->rx_mcs_set); 2380 } 2381 2382 /* HE Rates */ 2383 cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count); 2384 cmd->min_data_rate = cpu_to_le32(arg->min_data_rate); 2385 2386 ptr += sizeof(*mcs); 2387 2388 len = arg->peer_he_mcs_count * sizeof(*he_mcs); 2389 2390 tlv = ptr; 2391 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2392 ptr += TLV_HDR_SIZE; 2393 2394 /* Loop through the HE rate set */ 2395 for (i = 0; i < arg->peer_he_mcs_count; i++) { 2396 he_mcs = ptr; 2397 he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, 2398 sizeof(*he_mcs)); 2399 2400 he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]); 2401 he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]); 2402 ptr += sizeof(*he_mcs); 2403 } 2404 2405 tlv = ptr; 2406 len = arg->ml.enabled ? sizeof(*ml_params) : 0; 2407 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2408 ptr += TLV_HDR_SIZE; 2409 if (!len) 2410 goto skip_ml_params; 2411 2412 ml_params = ptr; 2413 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS, 2414 len); 2415 ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2416 2417 if (arg->ml.assoc_link) 2418 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2419 2420 if (arg->ml.primary_umac) 2421 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2422 2423 if (arg->ml.logical_link_idx_valid) 2424 ml_params->flags |= 2425 cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID); 2426 2427 if (arg->ml.peer_id_valid) 2428 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID); 2429 2430 ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr); 2431 ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx); 2432 ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id); 2433 ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id); 2434 2435 eml_cap = arg->ml.eml_cap; 2436 if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) { 2437 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT); 2438 /* Padding delay */ 2439 eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap); 2440 ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay); 2441 /* Transition delay */ 2442 eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap); 2443 ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay); 2444 /* Transition timeout */ 2445 eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap); 2446 ml_params->emlsr_trans_timeout_us = 2447 cpu_to_le32(eml_trans_timeout); 2448 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u", 2449 arg->peer_mac, eml_pad_delay, eml_trans_delay, 2450 eml_trans_timeout); 2451 } 2452 2453 ptr += sizeof(*ml_params); 2454 2455 skip_ml_params: 2456 /* Loop through the EHT rate set */ 2457 len = arg->peer_eht_mcs_count * sizeof(*eht_mcs); 2458 tlv = ptr; 2459 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2460 ptr += TLV_HDR_SIZE; 2461 2462 for (i = 0; i < arg->peer_eht_mcs_count; i++) { 2463 eht_mcs = ptr; 2464 eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET, 2465 sizeof(*eht_mcs)); 2466 2467 eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]); 2468 eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]); 2469 ptr += sizeof(*eht_mcs); 2470 } 2471 2472 /* Update MCS15 capability */ 2473 if (arg->eht_disable_mcs15) 2474 cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE); 2475 2476 tlv = ptr; 2477 len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0; 2478 /* fill ML Partner links */ 2479 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2480 ptr += TLV_HDR_SIZE; 2481 2482 if (len == 0) 2483 goto send; 2484 2485 for (i = 0; i < arg->ml.num_partner_links; i++) { 2486 u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC; 2487 2488 partner_info = ptr; 2489 partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd, 2490 sizeof(*partner_info)); 2491 partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id); 2492 partner_info->hw_link_id = 2493 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 2494 partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2495 2496 if (arg->ml.partner_info[i].assoc_link) 2497 partner_info->flags |= 2498 cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2499 2500 if (arg->ml.partner_info[i].primary_umac) 2501 partner_info->flags |= 2502 cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2503 2504 if (arg->ml.partner_info[i].logical_link_idx_valid) { 2505 v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID); 2506 partner_info->flags |= v; 2507 } 2508 2509 partner_info->logical_link_idx = 2510 cpu_to_le32(arg->ml.partner_info[i].logical_link_idx); 2511 ptr += sizeof(*partner_info); 2512 } 2513 2514 send: 2515 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2516 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n", 2517 cmd->vdev_id, cmd->peer_associd, arg->peer_mac, 2518 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, 2519 cmd->peer_listen_intval, cmd->peer_ht_caps, 2520 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, 2521 cmd->peer_mpdu_density, 2522 cmd->peer_vht_caps, cmd->peer_he_cap_info, 2523 cmd->peer_he_ops, cmd->peer_he_cap_info_ext, 2524 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], 2525 cmd->peer_he_cap_phy[2], 2526 cmd->peer_bw_rxnss_override, cmd->peer_flags_ext, 2527 cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1], 2528 cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1], 2529 cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops); 2530 2531 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); 2532 if (ret) { 2533 ath12k_warn(ar->ab, 2534 "failed to send WMI_PEER_ASSOC_CMDID\n"); 2535 dev_kfree_skb(skb); 2536 } 2537 2538 return ret; 2539 } 2540 2541 void ath12k_wmi_start_scan_init(struct ath12k *ar, 2542 struct ath12k_wmi_scan_req_arg *arg) 2543 { 2544 /* setup commonly used values */ 2545 arg->scan_req_id = 1; 2546 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2547 arg->dwell_time_active = 50; 2548 arg->dwell_time_active_2g = 0; 2549 arg->dwell_time_passive = 150; 2550 arg->dwell_time_active_6g = 70; 2551 arg->dwell_time_passive_6g = 70; 2552 arg->min_rest_time = 50; 2553 arg->max_rest_time = 500; 2554 arg->repeat_probe_time = 0; 2555 arg->probe_spacing_time = 0; 2556 arg->idle_time = 0; 2557 arg->max_scan_time = 20000; 2558 arg->probe_delay = 5; 2559 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | 2560 WMI_SCAN_EVENT_COMPLETED | 2561 WMI_SCAN_EVENT_BSS_CHANNEL | 2562 WMI_SCAN_EVENT_FOREIGN_CHAN | 2563 WMI_SCAN_EVENT_DEQUEUED; 2564 arg->scan_f_chan_stat_evnt = 1; 2565 arg->num_bssid = 1; 2566 2567 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be 2568 * ZEROs in probe request 2569 */ 2570 eth_broadcast_addr(arg->bssid_list[0].addr); 2571 } 2572 2573 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, 2574 struct ath12k_wmi_scan_req_arg *arg) 2575 { 2576 /* Scan events subscription */ 2577 if (arg->scan_ev_started) 2578 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED); 2579 if (arg->scan_ev_completed) 2580 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED); 2581 if (arg->scan_ev_bss_chan) 2582 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL); 2583 if (arg->scan_ev_foreign_chan) 2584 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN); 2585 if (arg->scan_ev_dequeued) 2586 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED); 2587 if (arg->scan_ev_preempted) 2588 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED); 2589 if (arg->scan_ev_start_failed) 2590 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED); 2591 if (arg->scan_ev_restarted) 2592 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED); 2593 if (arg->scan_ev_foreign_chn_exit) 2594 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT); 2595 if (arg->scan_ev_suspended) 2596 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED); 2597 if (arg->scan_ev_resumed) 2598 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED); 2599 2600 /** Set scan control flags */ 2601 cmd->scan_ctrl_flags = 0; 2602 if (arg->scan_f_passive) 2603 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE); 2604 if (arg->scan_f_strict_passive_pch) 2605 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN); 2606 if (arg->scan_f_promisc_mode) 2607 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS); 2608 if (arg->scan_f_capture_phy_err) 2609 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR); 2610 if (arg->scan_f_half_rate) 2611 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT); 2612 if (arg->scan_f_quarter_rate) 2613 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT); 2614 if (arg->scan_f_cck_rates) 2615 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES); 2616 if (arg->scan_f_ofdm_rates) 2617 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES); 2618 if (arg->scan_f_chan_stat_evnt) 2619 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT); 2620 if (arg->scan_f_filter_prb_req) 2621 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); 2622 if (arg->scan_f_bcast_probe) 2623 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ); 2624 if (arg->scan_f_offchan_mgmt_tx) 2625 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX); 2626 if (arg->scan_f_offchan_data_tx) 2627 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX); 2628 if (arg->scan_f_force_active_dfs_chn) 2629 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS); 2630 if (arg->scan_f_add_tpc_ie_in_probe) 2631 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ); 2632 if (arg->scan_f_add_ds_ie_in_probe) 2633 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ); 2634 if (arg->scan_f_add_spoofed_mac_in_probe) 2635 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ); 2636 if (arg->scan_f_add_rand_seq_in_probe) 2637 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ); 2638 if (arg->scan_f_en_ie_whitelist_in_probe) 2639 cmd->scan_ctrl_flags |= 2640 cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ); 2641 2642 cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode, 2643 WMI_SCAN_DWELL_MODE_MASK); 2644 } 2645 2646 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, 2647 struct ath12k_wmi_scan_req_arg *arg) 2648 { 2649 struct ath12k_wmi_pdev *wmi = ar->wmi; 2650 struct wmi_start_scan_cmd *cmd; 2651 struct ath12k_wmi_ssid_params *ssid = NULL; 2652 struct ath12k_wmi_mac_addr_params *bssid; 2653 struct sk_buff *skb; 2654 struct wmi_tlv *tlv; 2655 void *ptr; 2656 int i, ret, len; 2657 u32 *tmp_ptr, extraie_len_with_pad = 0; 2658 struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL; 2659 struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL; 2660 2661 len = sizeof(*cmd); 2662 2663 len += TLV_HDR_SIZE; 2664 if (arg->num_chan) 2665 len += arg->num_chan * sizeof(u32); 2666 2667 len += TLV_HDR_SIZE; 2668 if (arg->num_ssids) 2669 len += arg->num_ssids * sizeof(*ssid); 2670 2671 len += TLV_HDR_SIZE; 2672 if (arg->num_bssid) 2673 len += sizeof(*bssid) * arg->num_bssid; 2674 2675 if (arg->num_hint_bssid) 2676 len += TLV_HDR_SIZE + 2677 arg->num_hint_bssid * sizeof(*hint_bssid); 2678 2679 if (arg->num_hint_s_ssid) 2680 len += TLV_HDR_SIZE + 2681 arg->num_hint_s_ssid * sizeof(*s_ssid); 2682 2683 len += TLV_HDR_SIZE; 2684 if (arg->extraie.len) 2685 extraie_len_with_pad = 2686 roundup(arg->extraie.len, sizeof(u32)); 2687 if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) { 2688 len += extraie_len_with_pad; 2689 } else { 2690 ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n", 2691 arg->extraie.len); 2692 extraie_len_with_pad = 0; 2693 } 2694 2695 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2696 if (!skb) 2697 return -ENOMEM; 2698 2699 ptr = skb->data; 2700 2701 cmd = ptr; 2702 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD, 2703 sizeof(*cmd)); 2704 2705 cmd->scan_id = cpu_to_le32(arg->scan_id); 2706 cmd->scan_req_id = cpu_to_le32(arg->scan_req_id); 2707 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2708 if (ar->state_11d == ATH12K_11D_PREPARING) 2709 arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; 2710 else 2711 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2712 cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events); 2713 2714 ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg); 2715 2716 cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active); 2717 cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g); 2718 cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive); 2719 cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g); 2720 cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g); 2721 cmd->min_rest_time = cpu_to_le32(arg->min_rest_time); 2722 cmd->max_rest_time = cpu_to_le32(arg->max_rest_time); 2723 cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time); 2724 cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time); 2725 cmd->idle_time = cpu_to_le32(arg->idle_time); 2726 cmd->max_scan_time = cpu_to_le32(arg->max_scan_time); 2727 cmd->probe_delay = cpu_to_le32(arg->probe_delay); 2728 cmd->burst_duration = cpu_to_le32(arg->burst_duration); 2729 cmd->num_chan = cpu_to_le32(arg->num_chan); 2730 cmd->num_bssid = cpu_to_le32(arg->num_bssid); 2731 cmd->num_ssids = cpu_to_le32(arg->num_ssids); 2732 cmd->ie_len = cpu_to_le32(arg->extraie.len); 2733 cmd->n_probes = cpu_to_le32(arg->n_probes); 2734 2735 ptr += sizeof(*cmd); 2736 2737 len = arg->num_chan * sizeof(u32); 2738 2739 tlv = ptr; 2740 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len); 2741 ptr += TLV_HDR_SIZE; 2742 tmp_ptr = (u32 *)ptr; 2743 2744 memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4); 2745 2746 ptr += len; 2747 2748 len = arg->num_ssids * sizeof(*ssid); 2749 tlv = ptr; 2750 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2751 2752 ptr += TLV_HDR_SIZE; 2753 2754 if (arg->num_ssids) { 2755 ssid = ptr; 2756 for (i = 0; i < arg->num_ssids; ++i) { 2757 ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len); 2758 memcpy(ssid->ssid, arg->ssid[i].ssid, 2759 arg->ssid[i].ssid_len); 2760 ssid++; 2761 } 2762 } 2763 2764 ptr += (arg->num_ssids * sizeof(*ssid)); 2765 len = arg->num_bssid * sizeof(*bssid); 2766 tlv = ptr; 2767 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2768 2769 ptr += TLV_HDR_SIZE; 2770 bssid = ptr; 2771 2772 if (arg->num_bssid) { 2773 for (i = 0; i < arg->num_bssid; ++i) { 2774 ether_addr_copy(bssid->addr, 2775 arg->bssid_list[i].addr); 2776 bssid++; 2777 } 2778 } 2779 2780 ptr += arg->num_bssid * sizeof(*bssid); 2781 2782 len = extraie_len_with_pad; 2783 tlv = ptr; 2784 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); 2785 ptr += TLV_HDR_SIZE; 2786 2787 if (extraie_len_with_pad) 2788 memcpy(ptr, arg->extraie.ptr, 2789 arg->extraie.len); 2790 2791 ptr += extraie_len_with_pad; 2792 2793 if (arg->num_hint_s_ssid) { 2794 len = arg->num_hint_s_ssid * sizeof(*s_ssid); 2795 tlv = ptr; 2796 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2797 ptr += TLV_HDR_SIZE; 2798 s_ssid = ptr; 2799 for (i = 0; i < arg->num_hint_s_ssid; ++i) { 2800 s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags; 2801 s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid; 2802 s_ssid++; 2803 } 2804 ptr += len; 2805 } 2806 2807 if (arg->num_hint_bssid) { 2808 len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg); 2809 tlv = ptr; 2810 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2811 ptr += TLV_HDR_SIZE; 2812 hint_bssid = ptr; 2813 for (i = 0; i < arg->num_hint_bssid; ++i) { 2814 hint_bssid->freq_flags = 2815 arg->hint_bssid[i].freq_flags; 2816 ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0], 2817 &hint_bssid->bssid.addr[0]); 2818 hint_bssid++; 2819 } 2820 } 2821 2822 ret = ath12k_wmi_cmd_send(wmi, skb, 2823 WMI_START_SCAN_CMDID); 2824 if (ret) { 2825 ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); 2826 dev_kfree_skb(skb); 2827 } 2828 2829 return ret; 2830 } 2831 2832 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar, 2833 struct ath12k_wmi_scan_cancel_arg *arg) 2834 { 2835 struct ath12k_wmi_pdev *wmi = ar->wmi; 2836 struct wmi_stop_scan_cmd *cmd; 2837 struct sk_buff *skb; 2838 int ret; 2839 2840 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2841 if (!skb) 2842 return -ENOMEM; 2843 2844 cmd = (struct wmi_stop_scan_cmd *)skb->data; 2845 2846 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD, 2847 sizeof(*cmd)); 2848 2849 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2850 cmd->requestor = cpu_to_le32(arg->requester); 2851 cmd->scan_id = cpu_to_le32(arg->scan_id); 2852 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2853 /* stop the scan with the corresponding scan_id */ 2854 if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { 2855 /* Cancelling all scans */ 2856 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL); 2857 } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { 2858 /* Cancelling VAP scans */ 2859 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL); 2860 } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) { 2861 /* Cancelling specific scan */ 2862 cmd->req_type = WMI_SCAN_STOP_ONE; 2863 } else { 2864 ath12k_warn(ar->ab, "invalid scan cancel req_type %d", 2865 arg->req_type); 2866 dev_kfree_skb(skb); 2867 return -EINVAL; 2868 } 2869 2870 ret = ath12k_wmi_cmd_send(wmi, skb, 2871 WMI_STOP_SCAN_CMDID); 2872 if (ret) { 2873 ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); 2874 dev_kfree_skb(skb); 2875 } 2876 2877 return ret; 2878 } 2879 2880 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, 2881 struct ath12k_wmi_scan_chan_list_arg *arg) 2882 { 2883 struct ath12k_wmi_pdev *wmi = ar->wmi; 2884 struct wmi_scan_chan_list_cmd *cmd; 2885 struct sk_buff *skb; 2886 struct ath12k_wmi_channel_params *chan_info; 2887 struct ath12k_wmi_channel_arg *channel_arg; 2888 struct wmi_tlv *tlv; 2889 void *ptr; 2890 int i, ret, len; 2891 u16 num_send_chans, num_sends = 0, max_chan_limit = 0; 2892 __le32 *reg1, *reg2; 2893 2894 channel_arg = &arg->channel[0]; 2895 while (arg->nallchans) { 2896 len = sizeof(*cmd) + TLV_HDR_SIZE; 2897 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / 2898 sizeof(*chan_info); 2899 2900 num_send_chans = min(arg->nallchans, max_chan_limit); 2901 2902 arg->nallchans -= num_send_chans; 2903 len += sizeof(*chan_info) * num_send_chans; 2904 2905 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2906 if (!skb) 2907 return -ENOMEM; 2908 2909 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 2910 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD, 2911 sizeof(*cmd)); 2912 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2913 cmd->num_scan_chans = cpu_to_le32(num_send_chans); 2914 if (num_sends) 2915 cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG); 2916 2917 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2918 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", 2919 num_send_chans, len, cmd->pdev_id, num_sends); 2920 2921 ptr = skb->data + sizeof(*cmd); 2922 2923 len = sizeof(*chan_info) * num_send_chans; 2924 tlv = ptr; 2925 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT, 2926 len); 2927 ptr += TLV_HDR_SIZE; 2928 2929 for (i = 0; i < num_send_chans; ++i) { 2930 chan_info = ptr; 2931 memset(chan_info, 0, sizeof(*chan_info)); 2932 len = sizeof(*chan_info); 2933 chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 2934 len); 2935 2936 reg1 = &chan_info->reg_info_1; 2937 reg2 = &chan_info->reg_info_2; 2938 chan_info->mhz = cpu_to_le32(channel_arg->mhz); 2939 chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1); 2940 chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2); 2941 2942 if (channel_arg->is_chan_passive) 2943 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 2944 if (channel_arg->allow_he) 2945 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 2946 else if (channel_arg->allow_vht) 2947 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 2948 else if (channel_arg->allow_ht) 2949 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 2950 if (channel_arg->half_rate) 2951 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE); 2952 if (channel_arg->quarter_rate) 2953 chan_info->info |= 2954 cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE); 2955 2956 if (channel_arg->psc_channel) 2957 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC); 2958 2959 if (channel_arg->dfs_set) 2960 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 2961 2962 chan_info->info |= le32_encode_bits(channel_arg->phy_mode, 2963 WMI_CHAN_INFO_MODE); 2964 *reg1 |= le32_encode_bits(channel_arg->minpower, 2965 WMI_CHAN_REG_INFO1_MIN_PWR); 2966 *reg1 |= le32_encode_bits(channel_arg->maxpower, 2967 WMI_CHAN_REG_INFO1_MAX_PWR); 2968 *reg1 |= le32_encode_bits(channel_arg->maxregpower, 2969 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 2970 *reg1 |= le32_encode_bits(channel_arg->reg_class_id, 2971 WMI_CHAN_REG_INFO1_REG_CLS); 2972 *reg2 |= le32_encode_bits(channel_arg->antennamax, 2973 WMI_CHAN_REG_INFO2_ANT_MAX); 2974 *reg2 |= le32_encode_bits(channel_arg->maxregpower, 2975 WMI_CHAN_REG_INFO2_MAX_TX_PWR); 2976 2977 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2978 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", 2979 i, chan_info->mhz, chan_info->info); 2980 2981 ptr += sizeof(*chan_info); 2982 2983 channel_arg++; 2984 } 2985 2986 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); 2987 if (ret) { 2988 ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); 2989 dev_kfree_skb(skb); 2990 return ret; 2991 } 2992 2993 num_sends++; 2994 } 2995 2996 return 0; 2997 } 2998 2999 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id, 3000 struct wmi_wmm_params_all_arg *param) 3001 { 3002 struct ath12k_wmi_pdev *wmi = ar->wmi; 3003 struct wmi_vdev_set_wmm_params_cmd *cmd; 3004 struct wmi_wmm_params *wmm_param; 3005 struct wmi_wmm_params_arg *wmi_wmm_arg; 3006 struct sk_buff *skb; 3007 int ret, ac; 3008 3009 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3010 if (!skb) 3011 return -ENOMEM; 3012 3013 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; 3014 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 3015 sizeof(*cmd)); 3016 3017 cmd->vdev_id = cpu_to_le32(vdev_id); 3018 cmd->wmm_param_type = 0; 3019 3020 for (ac = 0; ac < WME_NUM_AC; ac++) { 3021 switch (ac) { 3022 case WME_AC_BE: 3023 wmi_wmm_arg = ¶m->ac_be; 3024 break; 3025 case WME_AC_BK: 3026 wmi_wmm_arg = ¶m->ac_bk; 3027 break; 3028 case WME_AC_VI: 3029 wmi_wmm_arg = ¶m->ac_vi; 3030 break; 3031 case WME_AC_VO: 3032 wmi_wmm_arg = ¶m->ac_vo; 3033 break; 3034 } 3035 3036 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; 3037 wmm_param->tlv_header = 3038 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 3039 sizeof(*wmm_param)); 3040 3041 wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs); 3042 wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin); 3043 wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax); 3044 wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop); 3045 wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm); 3046 wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack); 3047 3048 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3049 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 3050 ac, wmm_param->aifs, wmm_param->cwmin, 3051 wmm_param->cwmax, wmm_param->txoplimit, 3052 wmm_param->acm, wmm_param->no_ack); 3053 } 3054 ret = ath12k_wmi_cmd_send(wmi, skb, 3055 WMI_VDEV_SET_WMM_PARAMS_CMDID); 3056 if (ret) { 3057 ath12k_warn(ar->ab, 3058 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); 3059 dev_kfree_skb(skb); 3060 } 3061 3062 return ret; 3063 } 3064 3065 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, 3066 u32 pdev_id) 3067 { 3068 struct ath12k_wmi_pdev *wmi = ar->wmi; 3069 struct wmi_dfs_phyerr_offload_cmd *cmd; 3070 struct sk_buff *skb; 3071 int ret; 3072 3073 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3074 if (!skb) 3075 return -ENOMEM; 3076 3077 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; 3078 cmd->tlv_header = 3079 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, 3080 sizeof(*cmd)); 3081 3082 cmd->pdev_id = cpu_to_le32(pdev_id); 3083 3084 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3085 "WMI dfs phy err offload enable pdev id %d\n", pdev_id); 3086 3087 ret = ath12k_wmi_cmd_send(wmi, skb, 3088 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); 3089 if (ret) { 3090 ath12k_warn(ar->ab, 3091 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); 3092 dev_kfree_skb(skb); 3093 } 3094 3095 return ret; 3096 } 3097 3098 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, 3099 const u8 *buf, size_t buf_len) 3100 { 3101 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3102 struct wmi_pdev_set_bios_interface_cmd *cmd; 3103 struct wmi_tlv *tlv; 3104 struct sk_buff *skb; 3105 u8 *ptr; 3106 u32 len, len_aligned; 3107 int ret; 3108 3109 len_aligned = roundup(buf_len, sizeof(u32)); 3110 len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned; 3111 3112 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3113 if (!skb) 3114 return -ENOMEM; 3115 3116 cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data; 3117 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD, 3118 sizeof(*cmd)); 3119 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3120 cmd->param_type_id = cpu_to_le32(param_id); 3121 cmd->length = cpu_to_le32(buf_len); 3122 3123 ptr = skb->data + sizeof(*cmd); 3124 tlv = (struct wmi_tlv *)ptr; 3125 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned); 3126 ptr += TLV_HDR_SIZE; 3127 memcpy(ptr, buf, buf_len); 3128 3129 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3130 skb, 3131 WMI_PDEV_SET_BIOS_INTERFACE_CMDID); 3132 if (ret) { 3133 ath12k_warn(ab, 3134 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n", 3135 param_id, ret); 3136 dev_kfree_skb(skb); 3137 } 3138 3139 return 0; 3140 } 3141 3142 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table) 3143 { 3144 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3145 struct wmi_pdev_set_bios_sar_table_cmd *cmd; 3146 struct wmi_tlv *tlv; 3147 struct sk_buff *skb; 3148 int ret; 3149 u8 *buf_ptr; 3150 u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned; 3151 const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET; 3152 const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET; 3153 3154 sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32)); 3155 sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN, 3156 sizeof(u32)); 3157 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned + 3158 TLV_HDR_SIZE + sar_dbs_backoff_len_aligned; 3159 3160 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3161 if (!skb) 3162 return -ENOMEM; 3163 3164 cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data; 3165 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD, 3166 sizeof(*cmd)); 3167 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3168 cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3169 cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3170 3171 buf_ptr = skb->data + sizeof(*cmd); 3172 tlv = (struct wmi_tlv *)buf_ptr; 3173 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3174 sar_table_len_aligned); 3175 buf_ptr += TLV_HDR_SIZE; 3176 memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3177 3178 buf_ptr += sar_table_len_aligned; 3179 tlv = (struct wmi_tlv *)buf_ptr; 3180 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3181 sar_dbs_backoff_len_aligned); 3182 buf_ptr += TLV_HDR_SIZE; 3183 memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3184 3185 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3186 skb, 3187 WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); 3188 if (ret) { 3189 ath12k_warn(ab, 3190 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n", 3191 ret); 3192 dev_kfree_skb(skb); 3193 } 3194 3195 return ret; 3196 } 3197 3198 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table) 3199 { 3200 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3201 struct wmi_pdev_set_bios_geo_table_cmd *cmd; 3202 struct wmi_tlv *tlv; 3203 struct sk_buff *skb; 3204 int ret; 3205 u8 *buf_ptr; 3206 u32 len, sar_geo_len_aligned; 3207 const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET; 3208 3209 sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32)); 3210 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned; 3211 3212 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3213 if (!skb) 3214 return -ENOMEM; 3215 3216 cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data; 3217 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, 3218 sizeof(*cmd)); 3219 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3220 cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3221 3222 buf_ptr = skb->data + sizeof(*cmd); 3223 tlv = (struct wmi_tlv *)buf_ptr; 3224 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned); 3225 buf_ptr += TLV_HDR_SIZE; 3226 memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3227 3228 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3229 skb, 3230 WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); 3231 if (ret) { 3232 ath12k_warn(ab, 3233 "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n", 3234 ret); 3235 dev_kfree_skb(skb); 3236 } 3237 3238 return ret; 3239 } 3240 3241 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3242 u32 tid, u32 initiator, u32 reason) 3243 { 3244 struct ath12k_wmi_pdev *wmi = ar->wmi; 3245 struct wmi_delba_send_cmd *cmd; 3246 struct sk_buff *skb; 3247 int ret; 3248 3249 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3250 if (!skb) 3251 return -ENOMEM; 3252 3253 cmd = (struct wmi_delba_send_cmd *)skb->data; 3254 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD, 3255 sizeof(*cmd)); 3256 cmd->vdev_id = cpu_to_le32(vdev_id); 3257 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3258 cmd->tid = cpu_to_le32(tid); 3259 cmd->initiator = cpu_to_le32(initiator); 3260 cmd->reasoncode = cpu_to_le32(reason); 3261 3262 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3263 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", 3264 vdev_id, mac, tid, initiator, reason); 3265 3266 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); 3267 3268 if (ret) { 3269 ath12k_warn(ar->ab, 3270 "failed to send WMI_DELBA_SEND_CMDID cmd\n"); 3271 dev_kfree_skb(skb); 3272 } 3273 3274 return ret; 3275 } 3276 3277 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3278 u32 tid, u32 status) 3279 { 3280 struct ath12k_wmi_pdev *wmi = ar->wmi; 3281 struct wmi_addba_setresponse_cmd *cmd; 3282 struct sk_buff *skb; 3283 int ret; 3284 3285 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3286 if (!skb) 3287 return -ENOMEM; 3288 3289 cmd = (struct wmi_addba_setresponse_cmd *)skb->data; 3290 cmd->tlv_header = 3291 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD, 3292 sizeof(*cmd)); 3293 cmd->vdev_id = cpu_to_le32(vdev_id); 3294 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3295 cmd->tid = cpu_to_le32(tid); 3296 cmd->statuscode = cpu_to_le32(status); 3297 3298 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3299 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", 3300 vdev_id, mac, tid, status); 3301 3302 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); 3303 3304 if (ret) { 3305 ath12k_warn(ar->ab, 3306 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); 3307 dev_kfree_skb(skb); 3308 } 3309 3310 return ret; 3311 } 3312 3313 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3314 u32 tid, u32 buf_size) 3315 { 3316 struct ath12k_wmi_pdev *wmi = ar->wmi; 3317 struct wmi_addba_send_cmd *cmd; 3318 struct sk_buff *skb; 3319 int ret; 3320 3321 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3322 if (!skb) 3323 return -ENOMEM; 3324 3325 cmd = (struct wmi_addba_send_cmd *)skb->data; 3326 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD, 3327 sizeof(*cmd)); 3328 cmd->vdev_id = cpu_to_le32(vdev_id); 3329 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3330 cmd->tid = cpu_to_le32(tid); 3331 cmd->buffersize = cpu_to_le32(buf_size); 3332 3333 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3334 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", 3335 vdev_id, mac, tid, buf_size); 3336 3337 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); 3338 3339 if (ret) { 3340 ath12k_warn(ar->ab, 3341 "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); 3342 dev_kfree_skb(skb); 3343 } 3344 3345 return ret; 3346 } 3347 3348 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac) 3349 { 3350 struct ath12k_wmi_pdev *wmi = ar->wmi; 3351 struct wmi_addba_clear_resp_cmd *cmd; 3352 struct sk_buff *skb; 3353 int ret; 3354 3355 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3356 if (!skb) 3357 return -ENOMEM; 3358 3359 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; 3360 cmd->tlv_header = 3361 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD, 3362 sizeof(*cmd)); 3363 cmd->vdev_id = cpu_to_le32(vdev_id); 3364 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3365 3366 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3367 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", 3368 vdev_id, mac); 3369 3370 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); 3371 3372 if (ret) { 3373 ath12k_warn(ar->ab, 3374 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); 3375 dev_kfree_skb(skb); 3376 } 3377 3378 return ret; 3379 } 3380 3381 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar, 3382 struct ath12k_wmi_init_country_arg *arg) 3383 { 3384 struct ath12k_wmi_pdev *wmi = ar->wmi; 3385 struct wmi_init_country_cmd *cmd; 3386 struct sk_buff *skb; 3387 int ret; 3388 3389 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3390 if (!skb) 3391 return -ENOMEM; 3392 3393 cmd = (struct wmi_init_country_cmd *)skb->data; 3394 cmd->tlv_header = 3395 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD, 3396 sizeof(*cmd)); 3397 3398 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3399 3400 switch (arg->flags) { 3401 case ALPHA_IS_SET: 3402 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; 3403 memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3); 3404 break; 3405 case CC_IS_SET: 3406 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE); 3407 cmd->cc_info.country_code = 3408 cpu_to_le32(arg->cc_info.country_code); 3409 break; 3410 case REGDMN_IS_SET: 3411 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN); 3412 cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id); 3413 break; 3414 default: 3415 ret = -EINVAL; 3416 goto out; 3417 } 3418 3419 ret = ath12k_wmi_cmd_send(wmi, skb, 3420 WMI_SET_INIT_COUNTRY_CMDID); 3421 3422 out: 3423 if (ret) { 3424 ath12k_warn(ar->ab, 3425 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", 3426 ret); 3427 dev_kfree_skb(skb); 3428 } 3429 3430 return ret; 3431 } 3432 3433 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar, 3434 struct wmi_set_current_country_arg *arg) 3435 { 3436 struct ath12k_wmi_pdev *wmi = ar->wmi; 3437 struct wmi_set_current_country_cmd *cmd; 3438 struct sk_buff *skb; 3439 int ret; 3440 3441 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3442 if (!skb) 3443 return -ENOMEM; 3444 3445 cmd = (struct wmi_set_current_country_cmd *)skb->data; 3446 cmd->tlv_header = 3447 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD, 3448 sizeof(*cmd)); 3449 3450 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3451 memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2)); 3452 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); 3453 3454 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3455 "set current country pdev id %d alpha2 %c%c\n", 3456 ar->pdev->pdev_id, 3457 arg->alpha2[0], 3458 arg->alpha2[1]); 3459 3460 if (ret) { 3461 ath12k_warn(ar->ab, 3462 "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); 3463 dev_kfree_skb(skb); 3464 } 3465 3466 return ret; 3467 } 3468 3469 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar, 3470 struct wmi_11d_scan_start_arg *arg) 3471 { 3472 struct ath12k_wmi_pdev *wmi = ar->wmi; 3473 struct wmi_11d_scan_start_cmd *cmd; 3474 struct sk_buff *skb; 3475 int ret; 3476 3477 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3478 if (!skb) 3479 return -ENOMEM; 3480 3481 cmd = (struct wmi_11d_scan_start_cmd *)skb->data; 3482 cmd->tlv_header = 3483 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD, 3484 sizeof(*cmd)); 3485 3486 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 3487 cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec); 3488 cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec); 3489 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); 3490 3491 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3492 "send 11d scan start vdev id %d period %d ms internal %d ms\n", 3493 arg->vdev_id, arg->scan_period_msec, 3494 arg->start_interval_msec); 3495 3496 if (ret) { 3497 ath12k_warn(ar->ab, 3498 "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); 3499 dev_kfree_skb(skb); 3500 } 3501 3502 return ret; 3503 } 3504 3505 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id) 3506 { 3507 struct ath12k_wmi_pdev *wmi = ar->wmi; 3508 struct wmi_11d_scan_stop_cmd *cmd; 3509 struct sk_buff *skb; 3510 int ret; 3511 3512 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3513 if (!skb) 3514 return -ENOMEM; 3515 3516 cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; 3517 cmd->tlv_header = 3518 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD, 3519 sizeof(*cmd)); 3520 3521 cmd->vdev_id = cpu_to_le32(vdev_id); 3522 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); 3523 3524 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3525 "send 11d scan stop vdev id %d\n", 3526 cmd->vdev_id); 3527 3528 if (ret) { 3529 ath12k_warn(ar->ab, 3530 "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); 3531 dev_kfree_skb(skb); 3532 } 3533 3534 return ret; 3535 } 3536 3537 int 3538 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id) 3539 { 3540 struct ath12k_wmi_pdev *wmi = ar->wmi; 3541 struct ath12k_base *ab = wmi->wmi_ab->ab; 3542 struct wmi_twt_enable_params_cmd *cmd; 3543 struct sk_buff *skb; 3544 int ret, len; 3545 3546 len = sizeof(*cmd); 3547 3548 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3549 if (!skb) 3550 return -ENOMEM; 3551 3552 cmd = (struct wmi_twt_enable_params_cmd *)skb->data; 3553 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD, 3554 len); 3555 cmd->pdev_id = cpu_to_le32(pdev_id); 3556 cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS); 3557 cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE); 3558 cmd->congestion_thresh_setup = 3559 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP); 3560 cmd->congestion_thresh_teardown = 3561 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN); 3562 cmd->congestion_thresh_critical = 3563 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL); 3564 cmd->interference_thresh_teardown = 3565 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN); 3566 cmd->interference_thresh_setup = 3567 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP); 3568 cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP); 3569 cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN); 3570 cmd->no_of_bcast_mcast_slots = 3571 cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS); 3572 cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS); 3573 cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT); 3574 cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL); 3575 cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL); 3576 cmd->remove_sta_slot_interval = 3577 cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL); 3578 /* TODO add MBSSID support */ 3579 cmd->mbss_support = 0; 3580 3581 ret = ath12k_wmi_cmd_send(wmi, skb, 3582 WMI_TWT_ENABLE_CMDID); 3583 if (ret) { 3584 ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); 3585 dev_kfree_skb(skb); 3586 } 3587 return ret; 3588 } 3589 3590 int 3591 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id) 3592 { 3593 struct ath12k_wmi_pdev *wmi = ar->wmi; 3594 struct ath12k_base *ab = wmi->wmi_ab->ab; 3595 struct wmi_twt_disable_params_cmd *cmd; 3596 struct sk_buff *skb; 3597 int ret, len; 3598 3599 len = sizeof(*cmd); 3600 3601 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3602 if (!skb) 3603 return -ENOMEM; 3604 3605 cmd = (struct wmi_twt_disable_params_cmd *)skb->data; 3606 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD, 3607 len); 3608 cmd->pdev_id = cpu_to_le32(pdev_id); 3609 3610 ret = ath12k_wmi_cmd_send(wmi, skb, 3611 WMI_TWT_DISABLE_CMDID); 3612 if (ret) { 3613 ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); 3614 dev_kfree_skb(skb); 3615 } 3616 return ret; 3617 } 3618 3619 int 3620 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id, 3621 struct ieee80211_he_obss_pd *he_obss_pd) 3622 { 3623 struct ath12k_wmi_pdev *wmi = ar->wmi; 3624 struct ath12k_base *ab = wmi->wmi_ab->ab; 3625 struct wmi_obss_spatial_reuse_params_cmd *cmd; 3626 struct sk_buff *skb; 3627 int ret, len; 3628 3629 len = sizeof(*cmd); 3630 3631 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3632 if (!skb) 3633 return -ENOMEM; 3634 3635 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; 3636 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD, 3637 len); 3638 cmd->vdev_id = cpu_to_le32(vdev_id); 3639 cmd->enable = cpu_to_le32(he_obss_pd->enable); 3640 cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset); 3641 cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset); 3642 3643 ret = ath12k_wmi_cmd_send(wmi, skb, 3644 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); 3645 if (ret) { 3646 ath12k_warn(ab, 3647 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); 3648 dev_kfree_skb(skb); 3649 } 3650 return ret; 3651 } 3652 3653 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id, 3654 u8 bss_color, u32 period, 3655 bool enable) 3656 { 3657 struct ath12k_wmi_pdev *wmi = ar->wmi; 3658 struct ath12k_base *ab = wmi->wmi_ab->ab; 3659 struct wmi_obss_color_collision_cfg_params_cmd *cmd; 3660 struct sk_buff *skb; 3661 int ret, len; 3662 3663 len = sizeof(*cmd); 3664 3665 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3666 if (!skb) 3667 return -ENOMEM; 3668 3669 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; 3670 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG, 3671 len); 3672 cmd->vdev_id = cpu_to_le32(vdev_id); 3673 cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) : 3674 cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE); 3675 cmd->current_bss_color = cpu_to_le32(bss_color); 3676 cmd->detection_period_ms = cpu_to_le32(period); 3677 cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS); 3678 cmd->free_slot_expiry_time_ms = 0; 3679 cmd->flags = 0; 3680 3681 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3682 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n", 3683 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, 3684 cmd->detection_period_ms, cmd->scan_period_ms); 3685 3686 ret = ath12k_wmi_cmd_send(wmi, skb, 3687 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); 3688 if (ret) { 3689 ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); 3690 dev_kfree_skb(skb); 3691 } 3692 return ret; 3693 } 3694 3695 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id, 3696 bool enable) 3697 { 3698 struct ath12k_wmi_pdev *wmi = ar->wmi; 3699 struct ath12k_base *ab = wmi->wmi_ab->ab; 3700 struct wmi_bss_color_change_enable_params_cmd *cmd; 3701 struct sk_buff *skb; 3702 int ret, len; 3703 3704 len = sizeof(*cmd); 3705 3706 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3707 if (!skb) 3708 return -ENOMEM; 3709 3710 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; 3711 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE, 3712 len); 3713 cmd->vdev_id = cpu_to_le32(vdev_id); 3714 cmd->enable = enable ? cpu_to_le32(1) : 0; 3715 3716 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3717 "wmi_send_bss_color_change_enable id %d enable %d\n", 3718 cmd->vdev_id, cmd->enable); 3719 3720 ret = ath12k_wmi_cmd_send(wmi, skb, 3721 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); 3722 if (ret) { 3723 ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); 3724 dev_kfree_skb(skb); 3725 } 3726 return ret; 3727 } 3728 3729 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id, 3730 struct sk_buff *tmpl) 3731 { 3732 struct wmi_tlv *tlv; 3733 struct sk_buff *skb; 3734 void *ptr; 3735 int ret, len; 3736 size_t aligned_len; 3737 struct wmi_fils_discovery_tmpl_cmd *cmd; 3738 3739 aligned_len = roundup(tmpl->len, 4); 3740 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 3741 3742 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3743 "WMI vdev %i set FILS discovery template\n", vdev_id); 3744 3745 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3746 if (!skb) 3747 return -ENOMEM; 3748 3749 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; 3750 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD, 3751 sizeof(*cmd)); 3752 cmd->vdev_id = cpu_to_le32(vdev_id); 3753 cmd->buf_len = cpu_to_le32(tmpl->len); 3754 ptr = skb->data + sizeof(*cmd); 3755 3756 tlv = ptr; 3757 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3758 memcpy(tlv->value, tmpl->data, tmpl->len); 3759 3760 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); 3761 if (ret) { 3762 ath12k_warn(ar->ab, 3763 "WMI vdev %i failed to send FILS discovery template command\n", 3764 vdev_id); 3765 dev_kfree_skb(skb); 3766 } 3767 return ret; 3768 } 3769 3770 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, 3771 struct sk_buff *tmpl) 3772 { 3773 struct wmi_probe_tmpl_cmd *cmd; 3774 struct ath12k_wmi_bcn_prb_info_params *probe_info; 3775 struct wmi_tlv *tlv; 3776 struct sk_buff *skb; 3777 void *ptr; 3778 int ret, len; 3779 size_t aligned_len = roundup(tmpl->len, 4); 3780 3781 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3782 "WMI vdev %i set probe response template\n", vdev_id); 3783 3784 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; 3785 3786 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3787 if (!skb) 3788 return -ENOMEM; 3789 3790 cmd = (struct wmi_probe_tmpl_cmd *)skb->data; 3791 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD, 3792 sizeof(*cmd)); 3793 cmd->vdev_id = cpu_to_le32(vdev_id); 3794 cmd->buf_len = cpu_to_le32(tmpl->len); 3795 3796 ptr = skb->data + sizeof(*cmd); 3797 3798 probe_info = ptr; 3799 len = sizeof(*probe_info); 3800 probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 3801 len); 3802 probe_info->caps = 0; 3803 probe_info->erp = 0; 3804 3805 ptr += sizeof(*probe_info); 3806 3807 tlv = ptr; 3808 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3809 memcpy(tlv->value, tmpl->data, tmpl->len); 3810 3811 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); 3812 if (ret) { 3813 ath12k_warn(ar->ab, 3814 "WMI vdev %i failed to send probe response template command\n", 3815 vdev_id); 3816 dev_kfree_skb(skb); 3817 } 3818 return ret; 3819 } 3820 3821 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, 3822 bool unsol_bcast_probe_resp_enabled) 3823 { 3824 struct sk_buff *skb; 3825 int ret, len; 3826 struct wmi_fils_discovery_cmd *cmd; 3827 3828 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3829 "WMI vdev %i set %s interval to %u TU\n", 3830 vdev_id, unsol_bcast_probe_resp_enabled ? 3831 "unsolicited broadcast probe response" : "FILS discovery", 3832 interval); 3833 3834 len = sizeof(*cmd); 3835 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3836 if (!skb) 3837 return -ENOMEM; 3838 3839 cmd = (struct wmi_fils_discovery_cmd *)skb->data; 3840 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD, 3841 len); 3842 cmd->vdev_id = cpu_to_le32(vdev_id); 3843 cmd->interval = cpu_to_le32(interval); 3844 cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled); 3845 3846 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); 3847 if (ret) { 3848 ath12k_warn(ar->ab, 3849 "WMI vdev %i failed to send FILS discovery enable/disable command\n", 3850 vdev_id); 3851 dev_kfree_skb(skb); 3852 } 3853 return ret; 3854 } 3855 3856 static void 3857 ath12k_wmi_obss_color_collision_event(struct ath12k_base *ab, struct sk_buff *skb) 3858 { 3859 const struct wmi_obss_color_collision_event *ev; 3860 struct ath12k_link_vif *arvif; 3861 u32 vdev_id, evt_type; 3862 u64 bitmap; 3863 3864 const void **tb __free(kfree) = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 3865 if (IS_ERR(tb)) { 3866 ath12k_warn(ab, "failed to parse OBSS color collision tlv %ld\n", 3867 PTR_ERR(tb)); 3868 return; 3869 } 3870 3871 ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; 3872 if (!ev) { 3873 ath12k_warn(ab, "failed to fetch OBSS color collision event\n"); 3874 return; 3875 } 3876 3877 vdev_id = le32_to_cpu(ev->vdev_id); 3878 evt_type = le32_to_cpu(ev->evt_type); 3879 bitmap = le64_to_cpu(ev->obss_color_bitmap); 3880 3881 guard(rcu)(); 3882 3883 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 3884 if (!arvif) { 3885 ath12k_warn(ab, "no arvif found for vdev %u in OBSS color collision event\n", 3886 vdev_id); 3887 return; 3888 } 3889 3890 switch (evt_type) { 3891 case WMI_BSS_COLOR_COLLISION_DETECTION: 3892 ieee80211_obss_color_collision_notify(arvif->ahvif->vif, 3893 bitmap, 3894 arvif->link_id); 3895 ath12k_dbg(ab, ATH12K_DBG_WMI, 3896 "obss color collision detected vdev %u event %d bitmap %016llx\n", 3897 vdev_id, evt_type, bitmap); 3898 break; 3899 case WMI_BSS_COLOR_COLLISION_DISABLE: 3900 case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: 3901 case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: 3902 break; 3903 default: 3904 ath12k_warn(ab, "unknown OBSS color collision event type %d\n", evt_type); 3905 } 3906 } 3907 3908 static void 3909 ath12k_fill_band_to_mac_param(struct ath12k_base *soc, 3910 struct ath12k_wmi_pdev_band_arg *arg) 3911 { 3912 u8 i; 3913 struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap; 3914 struct ath12k_pdev *pdev; 3915 3916 for (i = 0; i < soc->num_radios; i++) { 3917 pdev = &soc->pdevs[i]; 3918 hal_reg_cap = &soc->hal_reg_cap[i]; 3919 arg[i].pdev_id = pdev->pdev_id; 3920 3921 switch (pdev->cap.supported_bands) { 3922 case WMI_HOST_WLAN_2GHZ_5GHZ_CAP: 3923 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3924 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3925 break; 3926 case WMI_HOST_WLAN_2GHZ_CAP: 3927 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3928 arg[i].end_freq = hal_reg_cap->high_2ghz_chan; 3929 break; 3930 case WMI_HOST_WLAN_5GHZ_CAP: 3931 arg[i].start_freq = hal_reg_cap->low_5ghz_chan; 3932 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3933 break; 3934 default: 3935 break; 3936 } 3937 } 3938 } 3939 3940 static void 3941 ath12k_wmi_copy_resource_config(struct ath12k_base *ab, 3942 struct ath12k_wmi_resource_config_params *wmi_cfg, 3943 struct ath12k_wmi_resource_config_arg *tg_cfg) 3944 { 3945 wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs); 3946 wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers); 3947 wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers); 3948 wmi_cfg->num_offload_reorder_buffs = 3949 cpu_to_le32(tg_cfg->num_offload_reorder_buffs); 3950 wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys); 3951 wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids); 3952 wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit); 3953 wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask); 3954 wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask); 3955 wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]); 3956 wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]); 3957 wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]); 3958 wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]); 3959 wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode); 3960 wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req); 3961 wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev); 3962 wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev); 3963 wmi_cfg->roam_offload_max_ap_profiles = 3964 cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles); 3965 wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups); 3966 wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems); 3967 wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode); 3968 wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size); 3969 wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries); 3970 wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size); 3971 wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim); 3972 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = 3973 cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check); 3974 wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config); 3975 wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev); 3976 wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc); 3977 wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries); 3978 wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs); 3979 wmi_cfg->num_tdls_conn_table_entries = 3980 cpu_to_le32(tg_cfg->num_tdls_conn_table_entries); 3981 wmi_cfg->beacon_tx_offload_max_vdev = 3982 cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev); 3983 wmi_cfg->num_multicast_filter_entries = 3984 cpu_to_le32(tg_cfg->num_multicast_filter_entries); 3985 wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters); 3986 wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern); 3987 wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size); 3988 wmi_cfg->max_tdls_concurrent_sleep_sta = 3989 cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta); 3990 wmi_cfg->max_tdls_concurrent_buffer_sta = 3991 cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta); 3992 wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate); 3993 wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs); 3994 wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels); 3995 wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules); 3996 wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); 3997 wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); 3998 wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); 3999 wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config | 4000 WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 | 4001 WMI_RSRC_CFG_FLAG1_ACK_RSSI); 4002 wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); 4003 wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); 4004 wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); 4005 wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); 4006 wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, 4007 WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); 4008 wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << 4009 WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); 4010 if (ab->hw_params->reoq_lut_support) 4011 wmi_cfg->host_service_flags |= 4012 cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT); 4013 wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt); 4014 wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period); 4015 wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET); 4016 } 4017 4018 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, 4019 struct ath12k_wmi_init_cmd_arg *arg) 4020 { 4021 struct ath12k_base *ab = wmi->wmi_ab->ab; 4022 struct sk_buff *skb; 4023 struct wmi_init_cmd *cmd; 4024 struct ath12k_wmi_resource_config_params *cfg; 4025 struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode; 4026 struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac; 4027 struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks; 4028 struct wmi_tlv *tlv; 4029 size_t ret, len; 4030 void *ptr; 4031 u32 hw_mode_len = 0; 4032 u16 idx; 4033 4034 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) 4035 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + 4036 (arg->num_band_to_mac * sizeof(*band_to_mac)); 4037 4038 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + 4039 (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); 4040 4041 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 4042 if (!skb) 4043 return -ENOMEM; 4044 4045 cmd = (struct wmi_init_cmd *)skb->data; 4046 4047 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD, 4048 sizeof(*cmd)); 4049 4050 ptr = skb->data + sizeof(*cmd); 4051 cfg = ptr; 4052 4053 ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg); 4054 4055 cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG, 4056 sizeof(*cfg)); 4057 4058 ptr += sizeof(*cfg); 4059 host_mem_chunks = ptr + TLV_HDR_SIZE; 4060 len = sizeof(struct ath12k_wmi_host_mem_chunk_params); 4061 4062 for (idx = 0; idx < arg->num_mem_chunks; ++idx) { 4063 host_mem_chunks[idx].tlv_header = 4064 ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK, 4065 len); 4066 4067 host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr); 4068 host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len); 4069 host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id); 4070 4071 ath12k_dbg(ab, ATH12K_DBG_WMI, 4072 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n", 4073 arg->mem_chunks[idx].req_id, 4074 (u64)arg->mem_chunks[idx].paddr, 4075 arg->mem_chunks[idx].len); 4076 } 4077 cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks); 4078 len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks; 4079 4080 /* num_mem_chunks is zero */ 4081 tlv = ptr; 4082 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4083 ptr += TLV_HDR_SIZE + len; 4084 4085 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) { 4086 hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr; 4087 hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4088 sizeof(*hw_mode)); 4089 4090 hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id); 4091 hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac); 4092 4093 ptr += sizeof(*hw_mode); 4094 4095 len = arg->num_band_to_mac * sizeof(*band_to_mac); 4096 tlv = ptr; 4097 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4098 4099 ptr += TLV_HDR_SIZE; 4100 len = sizeof(*band_to_mac); 4101 4102 for (idx = 0; idx < arg->num_band_to_mac; idx++) { 4103 band_to_mac = (void *)ptr; 4104 4105 band_to_mac->tlv_header = 4106 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC, 4107 len); 4108 band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id); 4109 band_to_mac->start_freq = 4110 cpu_to_le32(arg->band_to_mac[idx].start_freq); 4111 band_to_mac->end_freq = 4112 cpu_to_le32(arg->band_to_mac[idx].end_freq); 4113 ptr += sizeof(*band_to_mac); 4114 } 4115 } 4116 4117 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); 4118 if (ret) { 4119 ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n"); 4120 dev_kfree_skb(skb); 4121 } 4122 4123 return ret; 4124 } 4125 4126 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, 4127 int pdev_id) 4128 { 4129 struct ath12k_wmi_pdev_lro_config_cmd *cmd; 4130 struct sk_buff *skb; 4131 int ret; 4132 4133 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4134 if (!skb) 4135 return -ENOMEM; 4136 4137 cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data; 4138 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD, 4139 sizeof(*cmd)); 4140 4141 get_random_bytes(cmd->th_4, sizeof(cmd->th_4)); 4142 get_random_bytes(cmd->th_6, sizeof(cmd->th_6)); 4143 4144 cmd->pdev_id = cpu_to_le32(pdev_id); 4145 4146 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4147 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id); 4148 4149 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); 4150 if (ret) { 4151 ath12k_warn(ar->ab, 4152 "failed to send lro cfg req wmi cmd\n"); 4153 goto err; 4154 } 4155 4156 return 0; 4157 err: 4158 dev_kfree_skb(skb); 4159 return ret; 4160 } 4161 4162 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab) 4163 { 4164 unsigned long time_left; 4165 4166 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, 4167 WMI_SERVICE_READY_TIMEOUT_HZ); 4168 if (!time_left) 4169 return -ETIMEDOUT; 4170 4171 return 0; 4172 } 4173 4174 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab) 4175 { 4176 unsigned long time_left; 4177 4178 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, 4179 WMI_SERVICE_READY_TIMEOUT_HZ); 4180 if (!time_left) 4181 return -ETIMEDOUT; 4182 4183 return 0; 4184 } 4185 4186 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, 4187 enum wmi_host_hw_mode_config_type mode) 4188 { 4189 struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd; 4190 struct sk_buff *skb; 4191 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4192 int len; 4193 int ret; 4194 4195 len = sizeof(*cmd); 4196 4197 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 4198 if (!skb) 4199 return -ENOMEM; 4200 4201 cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data; 4202 4203 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4204 sizeof(*cmd)); 4205 4206 cmd->pdev_id = WMI_PDEV_ID_SOC; 4207 cmd->hw_mode_index = cpu_to_le32(mode); 4208 4209 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); 4210 if (ret) { 4211 ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); 4212 dev_kfree_skb(skb); 4213 } 4214 4215 return ret; 4216 } 4217 4218 int ath12k_wmi_cmd_init(struct ath12k_base *ab) 4219 { 4220 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4221 struct ath12k_wmi_init_cmd_arg arg = {}; 4222 4223 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 4224 ab->wmi_ab.svc_map)) 4225 arg.res_cfg.is_reg_cc_ext_event_supported = true; 4226 4227 ab->hw_params->wmi_init(ab, &arg.res_cfg); 4228 ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode; 4229 4230 arg.num_mem_chunks = wmi_ab->num_mem_chunks; 4231 arg.hw_mode_id = wmi_ab->preferred_hw_mode; 4232 arg.mem_chunks = wmi_ab->mem_chunks; 4233 4234 if (ab->hw_params->single_pdev_only) 4235 arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; 4236 4237 arg.num_band_to_mac = ab->num_radios; 4238 ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); 4239 4240 ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver; 4241 4242 return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); 4243 } 4244 4245 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, 4246 struct ath12k_wmi_vdev_spectral_conf_arg *arg) 4247 { 4248 struct ath12k_wmi_vdev_spectral_conf_cmd *cmd; 4249 struct sk_buff *skb; 4250 int ret; 4251 4252 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4253 if (!skb) 4254 return -ENOMEM; 4255 4256 cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data; 4257 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD, 4258 sizeof(*cmd)); 4259 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 4260 cmd->scan_count = cpu_to_le32(arg->scan_count); 4261 cmd->scan_period = cpu_to_le32(arg->scan_period); 4262 cmd->scan_priority = cpu_to_le32(arg->scan_priority); 4263 cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size); 4264 cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena); 4265 cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena); 4266 cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref); 4267 cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay); 4268 cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr); 4269 cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr); 4270 cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode); 4271 cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode); 4272 cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr); 4273 cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format); 4274 cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode); 4275 cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale); 4276 cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj); 4277 cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask); 4278 4279 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4280 "WMI spectral scan config cmd vdev_id 0x%x\n", 4281 arg->vdev_id); 4282 4283 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4284 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); 4285 if (ret) { 4286 ath12k_warn(ar->ab, 4287 "failed to send spectral scan config wmi cmd\n"); 4288 goto err; 4289 } 4290 4291 return 0; 4292 err: 4293 dev_kfree_skb(skb); 4294 return ret; 4295 } 4296 4297 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id, 4298 u32 trigger, u32 enable) 4299 { 4300 struct ath12k_wmi_vdev_spectral_enable_cmd *cmd; 4301 struct sk_buff *skb; 4302 int ret; 4303 4304 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4305 if (!skb) 4306 return -ENOMEM; 4307 4308 cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data; 4309 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD, 4310 sizeof(*cmd)); 4311 4312 cmd->vdev_id = cpu_to_le32(vdev_id); 4313 cmd->trigger_cmd = cpu_to_le32(trigger); 4314 cmd->enable_cmd = cpu_to_le32(enable); 4315 4316 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4317 "WMI spectral enable cmd vdev id 0x%x\n", 4318 vdev_id); 4319 4320 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4321 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); 4322 if (ret) { 4323 ath12k_warn(ar->ab, 4324 "failed to send spectral enable wmi cmd\n"); 4325 goto err; 4326 } 4327 4328 return 0; 4329 err: 4330 dev_kfree_skb(skb); 4331 return ret; 4332 } 4333 4334 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, 4335 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg) 4336 { 4337 struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; 4338 struct sk_buff *skb; 4339 int ret; 4340 4341 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4342 if (!skb) 4343 return -ENOMEM; 4344 4345 cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; 4346 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, 4347 sizeof(*cmd)); 4348 4349 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 4350 cmd->module_id = cpu_to_le32(arg->module_id); 4351 cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); 4352 cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); 4353 cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo); 4354 cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi); 4355 cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo); 4356 cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi); 4357 cmd->num_elems = cpu_to_le32(arg->num_elems); 4358 cmd->buf_size = cpu_to_le32(arg->buf_size); 4359 cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event); 4360 cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms); 4361 4362 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4363 "WMI DMA ring cfg req cmd pdev_id 0x%x\n", 4364 arg->pdev_id); 4365 4366 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4367 WMI_PDEV_DMA_RING_CFG_REQ_CMDID); 4368 if (ret) { 4369 ath12k_warn(ar->ab, 4370 "failed to send dma ring cfg req wmi cmd\n"); 4371 goto err; 4372 } 4373 4374 return 0; 4375 err: 4376 dev_kfree_skb(skb); 4377 return ret; 4378 } 4379 4380 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc, 4381 u16 tag, u16 len, 4382 const void *ptr, void *data) 4383 { 4384 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4385 4386 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) 4387 return -EPROTO; 4388 4389 if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry)) 4390 return -ENOBUFS; 4391 4392 arg->num_buf_entry++; 4393 return 0; 4394 } 4395 4396 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc, 4397 u16 tag, u16 len, 4398 const void *ptr, void *data) 4399 { 4400 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4401 4402 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) 4403 return -EPROTO; 4404 4405 if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry)) 4406 return -ENOBUFS; 4407 4408 arg->num_meta++; 4409 4410 return 0; 4411 } 4412 4413 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab, 4414 u16 tag, u16 len, 4415 const void *ptr, void *data) 4416 { 4417 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4418 const struct ath12k_wmi_dma_buf_release_fixed_params *fixed; 4419 u32 pdev_id; 4420 int ret; 4421 4422 switch (tag) { 4423 case WMI_TAG_DMA_BUF_RELEASE: 4424 fixed = ptr; 4425 arg->fixed = *fixed; 4426 pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id)); 4427 arg->fixed.pdev_id = cpu_to_le32(pdev_id); 4428 break; 4429 case WMI_TAG_ARRAY_STRUCT: 4430 if (!arg->buf_entry_done) { 4431 arg->num_buf_entry = 0; 4432 arg->buf_entry = ptr; 4433 4434 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4435 ath12k_wmi_dma_buf_entry_parse, 4436 arg); 4437 if (ret) { 4438 ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n", 4439 ret); 4440 return ret; 4441 } 4442 4443 arg->buf_entry_done = true; 4444 } else if (!arg->meta_data_done) { 4445 arg->num_meta = 0; 4446 arg->meta_data = ptr; 4447 4448 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4449 ath12k_wmi_dma_buf_meta_parse, 4450 arg); 4451 if (ret) { 4452 ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n", 4453 ret); 4454 return ret; 4455 } 4456 4457 arg->meta_data_done = true; 4458 } 4459 break; 4460 default: 4461 break; 4462 } 4463 return 0; 4464 } 4465 4466 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab, 4467 struct sk_buff *skb) 4468 { 4469 struct ath12k_wmi_dma_buf_release_arg arg = {}; 4470 struct ath12k_dbring_buf_release_event param; 4471 int ret; 4472 4473 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4474 ath12k_wmi_dma_buf_parse, 4475 &arg); 4476 if (ret) { 4477 ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); 4478 return; 4479 } 4480 4481 param.fixed = arg.fixed; 4482 param.buf_entry = arg.buf_entry; 4483 param.num_buf_entry = arg.num_buf_entry; 4484 param.meta_data = arg.meta_data; 4485 param.num_meta = arg.num_meta; 4486 4487 ret = ath12k_dbring_buffer_release_event(ab, ¶m); 4488 if (ret) { 4489 ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret); 4490 return; 4491 } 4492 } 4493 4494 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc, 4495 u16 tag, u16 len, 4496 const void *ptr, void *data) 4497 { 4498 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4499 struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4500 u32 phy_map = 0; 4501 4502 if (tag != WMI_TAG_HW_MODE_CAPABILITIES) 4503 return -EPROTO; 4504 4505 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes) 4506 return -ENOBUFS; 4507 4508 hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params, 4509 hw_mode_id); 4510 svc_rdy_ext->n_hw_mode_caps++; 4511 4512 phy_map = le32_to_cpu(hw_mode_cap->phy_id_map); 4513 svc_rdy_ext->tot_phy_id += fls(phy_map); 4514 4515 return 0; 4516 } 4517 4518 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, 4519 u16 len, const void *ptr, void *data) 4520 { 4521 struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info; 4522 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4523 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 4524 enum wmi_host_hw_mode_config_type mode, pref; 4525 u32 i; 4526 int ret; 4527 4528 svc_rdy_ext->n_hw_mode_caps = 0; 4529 svc_rdy_ext->hw_mode_caps = ptr; 4530 4531 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4532 ath12k_wmi_hw_mode_caps_parse, 4533 svc_rdy_ext); 4534 if (ret) { 4535 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4536 return ret; 4537 } 4538 4539 for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) { 4540 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; 4541 mode = le32_to_cpu(hw_mode_caps->hw_mode_id); 4542 4543 if (mode >= WMI_HOST_HW_MODE_MAX) 4544 continue; 4545 4546 pref = soc->wmi_ab.preferred_hw_mode; 4547 4548 if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) { 4549 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; 4550 soc->wmi_ab.preferred_hw_mode = mode; 4551 } 4552 } 4553 4554 svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps; 4555 4556 ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n", 4557 svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode); 4558 4559 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) 4560 return -EINVAL; 4561 4562 return 0; 4563 } 4564 4565 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc, 4566 u16 tag, u16 len, 4567 const void *ptr, void *data) 4568 { 4569 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4570 4571 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) 4572 return -EPROTO; 4573 4574 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) 4575 return -ENOBUFS; 4576 4577 len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params)); 4578 if (!svc_rdy_ext->n_mac_phy_caps) { 4579 svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len, 4580 GFP_ATOMIC); 4581 if (!svc_rdy_ext->mac_phy_caps) 4582 return -ENOMEM; 4583 } 4584 4585 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); 4586 svc_rdy_ext->n_mac_phy_caps++; 4587 return 0; 4588 } 4589 4590 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc, 4591 u16 tag, u16 len, 4592 const void *ptr, void *data) 4593 { 4594 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4595 4596 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) 4597 return -EPROTO; 4598 4599 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy) 4600 return -ENOBUFS; 4601 4602 svc_rdy_ext->n_ext_hal_reg_caps++; 4603 return 0; 4604 } 4605 4606 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, 4607 u16 len, const void *ptr, void *data) 4608 { 4609 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4610 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4611 struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap; 4612 int ret; 4613 u32 i; 4614 4615 svc_rdy_ext->n_ext_hal_reg_caps = 0; 4616 svc_rdy_ext->ext_hal_reg_caps = ptr; 4617 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4618 ath12k_wmi_ext_hal_reg_caps_parse, 4619 svc_rdy_ext); 4620 if (ret) { 4621 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4622 return ret; 4623 } 4624 4625 for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) { 4626 ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle, 4627 svc_rdy_ext->soc_hal_reg_caps, 4628 svc_rdy_ext->ext_hal_reg_caps, i, 4629 ®_cap); 4630 if (ret) { 4631 ath12k_warn(soc, "failed to extract reg cap %d\n", i); 4632 return ret; 4633 } 4634 4635 if (reg_cap.phy_id >= MAX_RADIOS) { 4636 ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id); 4637 return -EINVAL; 4638 } 4639 4640 soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; 4641 } 4642 return 0; 4643 } 4644 4645 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc, 4646 u16 len, const void *ptr, 4647 void *data) 4648 { 4649 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4650 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4651 u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id); 4652 u32 phy_id_map; 4653 int pdev_index = 0; 4654 int ret; 4655 4656 svc_rdy_ext->soc_hal_reg_caps = ptr; 4657 svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy); 4658 4659 soc->num_radios = 0; 4660 phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map); 4661 soc->fw_pdev_count = 0; 4662 4663 while (phy_id_map && soc->num_radios < MAX_RADIOS) { 4664 ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, 4665 svc_rdy_ext, 4666 hw_mode_id, soc->num_radios, 4667 &soc->pdevs[pdev_index]); 4668 if (ret) { 4669 ath12k_warn(soc, "failed to extract mac caps, idx :%d\n", 4670 soc->num_radios); 4671 return ret; 4672 } 4673 4674 soc->num_radios++; 4675 4676 /* For single_pdev_only targets, 4677 * save mac_phy capability in the same pdev 4678 */ 4679 if (soc->hw_params->single_pdev_only) 4680 pdev_index = 0; 4681 else 4682 pdev_index = soc->num_radios; 4683 4684 /* TODO: mac_phy_cap prints */ 4685 phy_id_map >>= 1; 4686 } 4687 4688 if (soc->hw_params->single_pdev_only) { 4689 soc->num_radios = 1; 4690 soc->pdevs[0].pdev_id = 0; 4691 } 4692 4693 return 0; 4694 } 4695 4696 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc, 4697 u16 tag, u16 len, 4698 const void *ptr, void *data) 4699 { 4700 struct ath12k_wmi_dma_ring_caps_parse *parse = data; 4701 4702 if (tag != WMI_TAG_DMA_RING_CAPABILITIES) 4703 return -EPROTO; 4704 4705 parse->n_dma_ring_caps++; 4706 return 0; 4707 } 4708 4709 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab, 4710 u32 num_cap) 4711 { 4712 size_t sz; 4713 void *ptr; 4714 4715 sz = num_cap * sizeof(struct ath12k_dbring_cap); 4716 ptr = kzalloc(sz, GFP_ATOMIC); 4717 if (!ptr) 4718 return -ENOMEM; 4719 4720 ab->db_caps = ptr; 4721 ab->num_db_cap = num_cap; 4722 4723 return 0; 4724 } 4725 4726 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) 4727 { 4728 kfree(ab->db_caps); 4729 ab->db_caps = NULL; 4730 ab->num_db_cap = 0; 4731 } 4732 4733 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, 4734 u16 len, const void *ptr, void *data) 4735 { 4736 struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data; 4737 struct ath12k_wmi_dma_ring_caps_params *dma_caps; 4738 struct ath12k_dbring_cap *dir_buff_caps; 4739 int ret; 4740 u32 i; 4741 4742 dma_caps_parse->n_dma_ring_caps = 0; 4743 dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr; 4744 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4745 ath12k_wmi_dma_ring_caps_parse, 4746 dma_caps_parse); 4747 if (ret) { 4748 ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); 4749 return ret; 4750 } 4751 4752 if (!dma_caps_parse->n_dma_ring_caps) 4753 return 0; 4754 4755 if (ab->num_db_cap) { 4756 ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n"); 4757 return 0; 4758 } 4759 4760 ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); 4761 if (ret) 4762 return ret; 4763 4764 dir_buff_caps = ab->db_caps; 4765 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { 4766 if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) { 4767 ath12k_warn(ab, "Invalid module id %d\n", 4768 le32_to_cpu(dma_caps[i].module_id)); 4769 ret = -EINVAL; 4770 goto free_dir_buff; 4771 } 4772 4773 dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id); 4774 dir_buff_caps[i].pdev_id = 4775 DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id)); 4776 dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem); 4777 dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz); 4778 dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align); 4779 } 4780 4781 return 0; 4782 4783 free_dir_buff: 4784 ath12k_wmi_free_dbring_caps(ab); 4785 return ret; 4786 } 4787 4788 static void 4789 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab, 4790 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap, 4791 struct ath12k_svc_ext_mac_phy_info *mac_phy_info) 4792 { 4793 mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id); 4794 mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands); 4795 mac_phy_info->hw_freq_range.low_2ghz_freq = 4796 __le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq); 4797 mac_phy_info->hw_freq_range.high_2ghz_freq = 4798 __le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq); 4799 mac_phy_info->hw_freq_range.low_5ghz_freq = 4800 __le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq); 4801 mac_phy_info->hw_freq_range.high_5ghz_freq = 4802 __le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq); 4803 } 4804 4805 static void 4806 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab, 4807 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext) 4808 { 4809 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 4810 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap; 4811 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4812 struct ath12k_svc_ext_mac_phy_info *mac_phy_info; 4813 u32 hw_mode_id, phy_bit_map; 4814 u8 hw_idx; 4815 4816 mac_phy_info = &svc_ext_info->mac_phy_info[0]; 4817 mac_phy_cap = svc_rdy_ext->mac_phy_caps; 4818 4819 for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) { 4820 hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx]; 4821 hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id); 4822 phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map); 4823 4824 while (phy_bit_map) { 4825 ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info); 4826 mac_phy_info->hw_mode_config_type = 4827 le32_get_bits(hw_mode_cap->hw_mode_config_type, 4828 WMI_HW_MODE_CAP_CFG_TYPE); 4829 ath12k_dbg(ab, ATH12K_DBG_WMI, 4830 "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n", 4831 hw_idx, hw_mode_id, 4832 mac_phy_info->hw_mode_config_type, 4833 mac_phy_info->supported_bands, mac_phy_info->phy_id, 4834 mac_phy_info->hw_freq_range.low_2ghz_freq, 4835 mac_phy_info->hw_freq_range.high_2ghz_freq, 4836 mac_phy_info->hw_freq_range.low_5ghz_freq, 4837 mac_phy_info->hw_freq_range.high_5ghz_freq); 4838 4839 mac_phy_cap++; 4840 mac_phy_info++; 4841 4842 phy_bit_map >>= 1; 4843 } 4844 } 4845 } 4846 4847 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, 4848 u16 tag, u16 len, 4849 const void *ptr, void *data) 4850 { 4851 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 4852 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4853 int ret; 4854 4855 switch (tag) { 4856 case WMI_TAG_SERVICE_READY_EXT_EVENT: 4857 ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr, 4858 &svc_rdy_ext->arg); 4859 if (ret) { 4860 ath12k_warn(ab, "unable to extract ext params\n"); 4861 return ret; 4862 } 4863 break; 4864 4865 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: 4866 svc_rdy_ext->hw_caps = ptr; 4867 svc_rdy_ext->arg.num_hw_modes = 4868 le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes); 4869 break; 4870 4871 case WMI_TAG_SOC_HAL_REG_CAPABILITIES: 4872 ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr, 4873 svc_rdy_ext); 4874 if (ret) 4875 return ret; 4876 break; 4877 4878 case WMI_TAG_ARRAY_STRUCT: 4879 if (!svc_rdy_ext->hw_mode_done) { 4880 ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext); 4881 if (ret) 4882 return ret; 4883 4884 svc_rdy_ext->hw_mode_done = true; 4885 } else if (!svc_rdy_ext->mac_phy_done) { 4886 svc_rdy_ext->n_mac_phy_caps = 0; 4887 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4888 ath12k_wmi_mac_phy_caps_parse, 4889 svc_rdy_ext); 4890 if (ret) { 4891 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4892 return ret; 4893 } 4894 4895 ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext); 4896 4897 svc_rdy_ext->mac_phy_done = true; 4898 } else if (!svc_rdy_ext->ext_hal_reg_done) { 4899 ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); 4900 if (ret) 4901 return ret; 4902 4903 svc_rdy_ext->ext_hal_reg_done = true; 4904 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { 4905 svc_rdy_ext->mac_phy_chainmask_combo_done = true; 4906 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { 4907 svc_rdy_ext->mac_phy_chainmask_cap_done = true; 4908 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { 4909 svc_rdy_ext->oem_dma_ring_cap_done = true; 4910 } else if (!svc_rdy_ext->dma_ring_cap_done) { 4911 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 4912 &svc_rdy_ext->dma_caps_parse); 4913 if (ret) 4914 return ret; 4915 4916 svc_rdy_ext->dma_ring_cap_done = true; 4917 } 4918 break; 4919 4920 default: 4921 break; 4922 } 4923 return 0; 4924 } 4925 4926 static int ath12k_service_ready_ext_event(struct ath12k_base *ab, 4927 struct sk_buff *skb) 4928 { 4929 struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { }; 4930 int ret; 4931 4932 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4933 ath12k_wmi_svc_rdy_ext_parse, 4934 &svc_rdy_ext); 4935 if (ret) { 4936 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4937 goto err; 4938 } 4939 4940 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) 4941 complete(&ab->wmi_ab.service_ready); 4942 4943 kfree(svc_rdy_ext.mac_phy_caps); 4944 return 0; 4945 4946 err: 4947 kfree(svc_rdy_ext.mac_phy_caps); 4948 ath12k_wmi_free_dbring_caps(ab); 4949 return ret; 4950 } 4951 4952 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle, 4953 const void *ptr, 4954 struct ath12k_wmi_svc_rdy_ext2_arg *arg) 4955 { 4956 const struct wmi_service_ready_ext2_event *ev = ptr; 4957 4958 if (!ev) 4959 return -EINVAL; 4960 4961 arg->reg_db_version = le32_to_cpu(ev->reg_db_version); 4962 arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz); 4963 arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz); 4964 arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps); 4965 arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw); 4966 arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma); 4967 arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo); 4968 arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags); 4969 return 0; 4970 } 4971 4972 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band, 4973 const __le32 cap_mac_info[], 4974 const __le32 cap_phy_info[], 4975 const __le32 supp_mcs[], 4976 const struct ath12k_wmi_ppe_threshold_params *ppet, 4977 __le32 cap_info_internal) 4978 { 4979 struct ath12k_band_cap *cap_band = &pdev->cap.band[band]; 4980 u32 support_320mhz; 4981 u8 i; 4982 4983 if (band == NL80211_BAND_6GHZ) 4984 support_320mhz = cap_band->eht_cap_phy_info[0] & 4985 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 4986 4987 for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++) 4988 cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]); 4989 4990 for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++) 4991 cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]); 4992 4993 if (band == NL80211_BAND_6GHZ) 4994 cap_band->eht_cap_phy_info[0] |= support_320mhz; 4995 4996 cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]); 4997 cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]); 4998 if (band != NL80211_BAND_2GHZ) { 4999 cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]); 5000 cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]); 5001 } 5002 5003 cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1); 5004 cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info); 5005 for (i = 0; i < WMI_MAX_NUM_SS; i++) 5006 cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] = 5007 le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]); 5008 5009 cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal); 5010 } 5011 5012 static int 5013 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, 5014 const struct ath12k_wmi_caps_ext_params *caps, 5015 struct ath12k_pdev *pdev) 5016 { 5017 struct ath12k_band_cap *cap_band; 5018 u32 bands, support_320mhz; 5019 int i; 5020 5021 if (ab->hw_params->single_pdev_only) { 5022 if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) { 5023 support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) & 5024 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 5025 cap_band = &pdev->cap.band[NL80211_BAND_6GHZ]; 5026 cap_band->eht_cap_phy_info[0] |= support_320mhz; 5027 return 0; 5028 } 5029 5030 for (i = 0; i < ab->fw_pdev_count; i++) { 5031 struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i]; 5032 5033 if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) && 5034 fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) { 5035 bands = fw_pdev->supported_bands; 5036 break; 5037 } 5038 } 5039 5040 if (i == ab->fw_pdev_count) 5041 return -EINVAL; 5042 } else { 5043 bands = pdev->cap.supported_bands; 5044 } 5045 5046 if (bands & WMI_HOST_WLAN_2GHZ_CAP) { 5047 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ, 5048 caps->eht_cap_mac_info_2ghz, 5049 caps->eht_cap_phy_info_2ghz, 5050 caps->eht_supp_mcs_ext_2ghz, 5051 &caps->eht_ppet_2ghz, 5052 caps->eht_cap_info_internal); 5053 } 5054 5055 if (bands & WMI_HOST_WLAN_5GHZ_CAP) { 5056 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ, 5057 caps->eht_cap_mac_info_5ghz, 5058 caps->eht_cap_phy_info_5ghz, 5059 caps->eht_supp_mcs_ext_5ghz, 5060 &caps->eht_ppet_5ghz, 5061 caps->eht_cap_info_internal); 5062 5063 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ, 5064 caps->eht_cap_mac_info_5ghz, 5065 caps->eht_cap_phy_info_5ghz, 5066 caps->eht_supp_mcs_ext_5ghz, 5067 &caps->eht_ppet_5ghz, 5068 caps->eht_cap_info_internal); 5069 } 5070 5071 pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability); 5072 pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability); 5073 5074 return 0; 5075 } 5076 5077 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, 5078 u16 len, const void *ptr, 5079 void *data) 5080 { 5081 const struct ath12k_wmi_caps_ext_params *caps = ptr; 5082 int i = 0, ret; 5083 5084 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT) 5085 return -EPROTO; 5086 5087 if (ab->hw_params->single_pdev_only) { 5088 if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) && 5089 caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE) 5090 return 0; 5091 } else { 5092 for (i = 0; i < ab->num_radios; i++) { 5093 if (ab->pdevs[i].pdev_id == 5094 ath12k_wmi_caps_ext_get_pdev_id(caps)) 5095 break; 5096 } 5097 5098 if (i == ab->num_radios) 5099 return -EINVAL; 5100 } 5101 5102 ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]); 5103 if (ret) { 5104 ath12k_warn(ab, 5105 "failed to parse extended MAC PHY capabilities for pdev %d: %d\n", 5106 ret, ab->pdevs[i].pdev_id); 5107 return ret; 5108 } 5109 5110 return 0; 5111 } 5112 5113 static void 5114 ath12k_wmi_update_freq_info(struct ath12k_base *ab, 5115 struct ath12k_svc_ext_mac_phy_info *mac_cap, 5116 enum ath12k_hw_mode mode, 5117 u32 phy_id) 5118 { 5119 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5120 struct ath12k_hw_mode_freq_range_arg *mac_range; 5121 5122 mac_range = &hw_mode_info->freq_range_caps[mode][phy_id]; 5123 5124 if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 5125 mac_range->low_2ghz_freq = max_t(u32, 5126 mac_cap->hw_freq_range.low_2ghz_freq, 5127 ATH12K_MIN_2GHZ_FREQ); 5128 mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ? 5129 min_t(u32, 5130 mac_cap->hw_freq_range.high_2ghz_freq, 5131 ATH12K_MAX_2GHZ_FREQ) : 5132 ATH12K_MAX_2GHZ_FREQ; 5133 } 5134 5135 if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 5136 mac_range->low_5ghz_freq = max_t(u32, 5137 mac_cap->hw_freq_range.low_5ghz_freq, 5138 ATH12K_MIN_5GHZ_FREQ); 5139 mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ? 5140 min_t(u32, 5141 mac_cap->hw_freq_range.high_5ghz_freq, 5142 ATH12K_MAX_6GHZ_FREQ) : 5143 ATH12K_MAX_6GHZ_FREQ; 5144 } 5145 } 5146 5147 static bool 5148 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab, 5149 enum ath12k_hw_mode hwmode) 5150 { 5151 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5152 struct ath12k_hw_mode_freq_range_arg *mac_range; 5153 u8 phy_id; 5154 5155 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5156 mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id]; 5157 /* modify SBS/DBS range only when both phy for DBS are filled */ 5158 if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq) 5159 return false; 5160 } 5161 5162 return true; 5163 } 5164 5165 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab) 5166 { 5167 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5168 struct ath12k_hw_mode_freq_range_arg *mac_range; 5169 u8 phy_id; 5170 5171 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS]; 5172 /* Reset 5 GHz range for shared mac for DBS */ 5173 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5174 if (mac_range[phy_id].low_2ghz_freq && 5175 mac_range[phy_id].low_5ghz_freq) { 5176 mac_range[phy_id].low_5ghz_freq = 0; 5177 mac_range[phy_id].high_5ghz_freq = 0; 5178 } 5179 } 5180 } 5181 5182 static u32 5183 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5184 { 5185 u32 highest_freq = 0; 5186 u8 phy_id; 5187 5188 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5189 if (range[phy_id].high_5ghz_freq > highest_freq) 5190 highest_freq = range[phy_id].high_5ghz_freq; 5191 } 5192 5193 return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ; 5194 } 5195 5196 static u32 5197 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5198 { 5199 u32 lowest_freq = 0; 5200 u8 phy_id; 5201 5202 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5203 if ((!lowest_freq && range[phy_id].low_5ghz_freq) || 5204 range[phy_id].low_5ghz_freq < lowest_freq) 5205 lowest_freq = range[phy_id].low_5ghz_freq; 5206 } 5207 5208 return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ; 5209 } 5210 5211 static void 5212 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab, 5213 u16 sbs_range_sep, 5214 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5215 { 5216 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5217 struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range; 5218 u8 phy_id; 5219 5220 upper_sbs_freq_range = 5221 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE]; 5222 5223 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5224 upper_sbs_freq_range[phy_id].low_2ghz_freq = 5225 ref_freq[phy_id].low_2ghz_freq; 5226 upper_sbs_freq_range[phy_id].high_2ghz_freq = 5227 ref_freq[phy_id].high_2ghz_freq; 5228 5229 /* update for shared mac */ 5230 if (upper_sbs_freq_range[phy_id].low_2ghz_freq) { 5231 upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5232 upper_sbs_freq_range[phy_id].high_5ghz_freq = 5233 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5234 } else { 5235 upper_sbs_freq_range[phy_id].low_5ghz_freq = 5236 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5237 upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5238 } 5239 } 5240 } 5241 5242 static void 5243 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab, 5244 u16 sbs_range_sep, 5245 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5246 { 5247 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5248 struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range; 5249 u8 phy_id; 5250 5251 lower_sbs_freq_range = 5252 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE]; 5253 5254 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5255 lower_sbs_freq_range[phy_id].low_2ghz_freq = 5256 ref_freq[phy_id].low_2ghz_freq; 5257 lower_sbs_freq_range[phy_id].high_2ghz_freq = 5258 ref_freq[phy_id].high_2ghz_freq; 5259 5260 /* update for shared mac */ 5261 if (lower_sbs_freq_range[phy_id].low_2ghz_freq) { 5262 lower_sbs_freq_range[phy_id].low_5ghz_freq = 5263 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5264 lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5265 } else { 5266 lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5267 lower_sbs_freq_range[phy_id].high_5ghz_freq = 5268 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5269 } 5270 } 5271 } 5272 5273 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode) 5274 { 5275 static const char * const mode_str[] = { 5276 [ATH12K_HW_MODE_SMM] = "SMM", 5277 [ATH12K_HW_MODE_DBS] = "DBS", 5278 [ATH12K_HW_MODE_SBS] = "SBS", 5279 [ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE", 5280 [ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE", 5281 }; 5282 5283 if (hw_mode >= ARRAY_SIZE(mode_str)) 5284 return "Unknown"; 5285 5286 return mode_str[hw_mode]; 5287 } 5288 5289 static void 5290 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab, 5291 struct ath12k_hw_mode_freq_range_arg *freq_range, 5292 enum ath12k_hw_mode hw_mode) 5293 { 5294 u8 i; 5295 5296 for (i = 0; i < MAX_RADIOS; i++) 5297 if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq) 5298 ath12k_dbg(ab, ATH12K_DBG_WMI, 5299 "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5300 ath12k_wmi_hw_mode_to_str(hw_mode), 5301 hw_mode, i, 5302 freq_range[i].low_2ghz_freq, 5303 freq_range[i].high_2ghz_freq, 5304 freq_range[i].low_5ghz_freq, 5305 freq_range[i].high_5ghz_freq); 5306 } 5307 5308 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab) 5309 { 5310 struct ath12k_hw_mode_freq_range_arg *freq_range; 5311 u8 i; 5312 5313 for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) { 5314 freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i]; 5315 ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i); 5316 } 5317 } 5318 5319 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id) 5320 { 5321 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5322 struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range; 5323 struct ath12k_hw_mode_freq_range_arg *non_shared_range; 5324 u8 shared_phy_id; 5325 5326 sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id]; 5327 5328 /* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id 5329 * keep the range as it is in SBS 5330 */ 5331 if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq) 5332 return 0; 5333 5334 if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) { 5335 ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz"); 5336 ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS); 5337 return -EINVAL; 5338 } 5339 5340 non_shared_range = sbs_mac_range; 5341 /* if SBS mac range has only 5 GHz then it's the non-shared phy, so 5342 * modify the range as per the shared mac. 5343 */ 5344 shared_phy_id = phy_id ? 0 : 1; 5345 shared_mac_range = 5346 &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id]; 5347 5348 if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) { 5349 ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared"); 5350 /* If the shared mac lower 5 GHz frequency is greater than 5351 * non-shared mac lower 5 GHz frequency then the shared mac has 5352 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high 5353 * freq should be less than the shared mac's low 5 GHz freq. 5354 */ 5355 if (non_shared_range->high_5ghz_freq >= 5356 shared_mac_range->low_5ghz_freq) 5357 non_shared_range->high_5ghz_freq = 5358 max_t(u32, shared_mac_range->low_5ghz_freq - 10, 5359 non_shared_range->low_5ghz_freq); 5360 } else if (shared_mac_range->high_5ghz_freq < 5361 non_shared_range->high_5ghz_freq) { 5362 ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared"); 5363 /* If the shared mac high 5 GHz frequency is less than 5364 * non-shared mac high 5 GHz frequency then the shared mac has 5365 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low 5366 * freq should be greater than the shared mac's high 5 GHz freq. 5367 */ 5368 if (shared_mac_range->high_5ghz_freq >= 5369 non_shared_range->low_5ghz_freq) 5370 non_shared_range->low_5ghz_freq = 5371 min_t(u32, shared_mac_range->high_5ghz_freq + 10, 5372 non_shared_range->high_5ghz_freq); 5373 } else { 5374 ath12k_warn(ab, "invalid SBS range with all 5 GHz shared"); 5375 return -EINVAL; 5376 } 5377 5378 return 0; 5379 } 5380 5381 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab) 5382 { 5383 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5384 struct ath12k_hw_mode_freq_range_arg *mac_range; 5385 u16 sbs_range_sep; 5386 u8 phy_id; 5387 int ret; 5388 5389 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS]; 5390 5391 /* If sbs_lower_band_end_freq has a value, then the frequency range 5392 * will be split using that value. 5393 */ 5394 sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq; 5395 if (sbs_range_sep) { 5396 ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep, 5397 mac_range); 5398 ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep, 5399 mac_range); 5400 /* Hardware specifies the range boundary with sbs_range_sep, 5401 * (i.e. the boundary between 5 GHz high and 5 GHz low), 5402 * reset the original one to make sure it will not get used. 5403 */ 5404 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5405 return; 5406 } 5407 5408 /* If sbs_lower_band_end_freq is not set that means firmware will send one 5409 * shared mac range and one non-shared mac range. so update that freq. 5410 */ 5411 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5412 ret = ath12k_wmi_modify_sbs_freq(ab, phy_id); 5413 if (ret) { 5414 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5415 break; 5416 } 5417 } 5418 } 5419 5420 static void 5421 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab, 5422 enum wmi_host_hw_mode_config_type hw_config_type, 5423 u32 phy_id, 5424 struct ath12k_svc_ext_mac_phy_info *mac_cap) 5425 { 5426 if (phy_id >= MAX_RADIOS) { 5427 ath12k_err(ab, "mac more than two not supported: %d", phy_id); 5428 return; 5429 } 5430 5431 ath12k_dbg(ab, ATH12K_DBG_WMI, 5432 "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5433 hw_config_type, phy_id, mac_cap->supported_bands, 5434 ab->wmi_ab.sbs_lower_band_end_freq, 5435 mac_cap->hw_freq_range.low_2ghz_freq, 5436 mac_cap->hw_freq_range.high_2ghz_freq, 5437 mac_cap->hw_freq_range.low_5ghz_freq, 5438 mac_cap->hw_freq_range.high_5ghz_freq); 5439 5440 switch (hw_config_type) { 5441 case WMI_HOST_HW_MODE_SINGLE: 5442 if (phy_id) { 5443 ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported"); 5444 break; 5445 } 5446 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id); 5447 break; 5448 5449 case WMI_HOST_HW_MODE_DBS: 5450 if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5451 ath12k_wmi_update_freq_info(ab, mac_cap, 5452 ATH12K_HW_MODE_DBS, phy_id); 5453 break; 5454 case WMI_HOST_HW_MODE_DBS_SBS: 5455 case WMI_HOST_HW_MODE_DBS_OR_SBS: 5456 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id); 5457 if (ab->wmi_ab.sbs_lower_band_end_freq || 5458 mac_cap->hw_freq_range.low_5ghz_freq || 5459 mac_cap->hw_freq_range.low_2ghz_freq) 5460 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, 5461 phy_id); 5462 5463 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5464 ath12k_wmi_update_dbs_freq_info(ab); 5465 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5466 ath12k_wmi_update_sbs_freq_info(ab); 5467 break; 5468 case WMI_HOST_HW_MODE_SBS: 5469 case WMI_HOST_HW_MODE_SBS_PASSIVE: 5470 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id); 5471 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5472 ath12k_wmi_update_sbs_freq_info(ab); 5473 5474 break; 5475 default: 5476 break; 5477 } 5478 } 5479 5480 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab) 5481 { 5482 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) || 5483 (ab->wmi_ab.sbs_lower_band_end_freq && 5484 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) && 5485 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE))) 5486 return true; 5487 5488 return false; 5489 } 5490 5491 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab) 5492 { 5493 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 5494 struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info; 5495 enum wmi_host_hw_mode_config_type hw_config_type; 5496 struct ath12k_svc_ext_mac_phy_info *tmp; 5497 bool dbs_mode = false, sbs_mode = false; 5498 u32 i, j = 0; 5499 5500 if (!svc_ext_info->num_hw_modes) { 5501 ath12k_err(ab, "invalid number of hw modes"); 5502 return -EINVAL; 5503 } 5504 5505 ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d", 5506 svc_ext_info->num_hw_modes); 5507 5508 memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps)); 5509 5510 for (i = 0; i < svc_ext_info->num_hw_modes; i++) { 5511 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5512 return -EINVAL; 5513 5514 /* Update for MAC0 */ 5515 tmp = &svc_ext_info->mac_phy_info[j++]; 5516 hw_config_type = tmp->hw_mode_config_type; 5517 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp); 5518 5519 /* SBS and DBS have dual MAC. Up to 2 MACs are considered. */ 5520 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5521 hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5522 hw_config_type == WMI_HOST_HW_MODE_SBS || 5523 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) { 5524 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5525 return -EINVAL; 5526 /* Update for MAC1 */ 5527 tmp = &svc_ext_info->mac_phy_info[j++]; 5528 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, 5529 tmp->phy_id, tmp); 5530 5531 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5532 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) 5533 dbs_mode = true; 5534 5535 if (ath12k_wmi_sbs_range_present(ab) && 5536 (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5537 hw_config_type == WMI_HOST_HW_MODE_SBS || 5538 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)) 5539 sbs_mode = true; 5540 } 5541 } 5542 5543 info->support_dbs = dbs_mode; 5544 info->support_sbs = sbs_mode; 5545 5546 ath12k_wmi_dump_freq_range(ab); 5547 5548 return 0; 5549 } 5550 5551 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, 5552 u16 tag, u16 len, 5553 const void *ptr, void *data) 5554 { 5555 const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps; 5556 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 5557 struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; 5558 int ret; 5559 5560 switch (tag) { 5561 case WMI_TAG_SERVICE_READY_EXT2_EVENT: 5562 ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr, 5563 &parse->arg); 5564 if (ret) { 5565 ath12k_warn(ab, 5566 "failed to extract wmi service ready ext2 parameters: %d\n", 5567 ret); 5568 return ret; 5569 } 5570 break; 5571 5572 case WMI_TAG_ARRAY_STRUCT: 5573 if (!parse->dma_ring_cap_done) { 5574 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 5575 &parse->dma_caps_parse); 5576 if (ret) 5577 return ret; 5578 5579 parse->dma_ring_cap_done = true; 5580 } else if (!parse->spectral_bin_scaling_done) { 5581 /* TODO: This is a place-holder as WMI tag for 5582 * spectral scaling is before 5583 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT 5584 */ 5585 parse->spectral_bin_scaling_done = true; 5586 } else if (!parse->mac_phy_caps_ext_done) { 5587 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 5588 ath12k_wmi_tlv_mac_phy_caps_ext, 5589 parse); 5590 if (ret) { 5591 ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n", 5592 ret); 5593 return ret; 5594 } 5595 5596 parse->mac_phy_caps_ext_done = true; 5597 } else if (!parse->hal_reg_caps_ext2_done) { 5598 parse->hal_reg_caps_ext2_done = true; 5599 } else if (!parse->scan_radio_caps_ext2_done) { 5600 parse->scan_radio_caps_ext2_done = true; 5601 } else if (!parse->twt_caps_done) { 5602 parse->twt_caps_done = true; 5603 } else if (!parse->htt_msdu_idx_to_qtype_map_done) { 5604 parse->htt_msdu_idx_to_qtype_map_done = true; 5605 } else if (!parse->dbs_or_sbs_cap_ext_done) { 5606 dbs_or_sbs_caps = ptr; 5607 ab->wmi_ab.sbs_lower_band_end_freq = 5608 __le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq); 5609 5610 ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n", 5611 ab->wmi_ab.sbs_lower_band_end_freq); 5612 5613 ret = ath12k_wmi_update_hw_mode_list(ab); 5614 if (ret) { 5615 ath12k_warn(ab, "failed to update hw mode list: %d\n", 5616 ret); 5617 return ret; 5618 } 5619 5620 parse->dbs_or_sbs_cap_ext_done = true; 5621 } 5622 5623 break; 5624 default: 5625 break; 5626 } 5627 5628 return 0; 5629 } 5630 5631 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab, 5632 struct sk_buff *skb) 5633 { 5634 struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { }; 5635 int ret; 5636 5637 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 5638 ath12k_wmi_svc_rdy_ext2_parse, 5639 &svc_rdy_ext2); 5640 if (ret) { 5641 ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); 5642 goto err; 5643 } 5644 5645 complete(&ab->wmi_ab.service_ready); 5646 5647 return 0; 5648 5649 err: 5650 ath12k_wmi_free_dbring_caps(ab); 5651 return ret; 5652 } 5653 5654 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb, 5655 struct wmi_vdev_start_resp_event *vdev_rsp) 5656 { 5657 const void **tb; 5658 const struct wmi_vdev_start_resp_event *ev; 5659 int ret; 5660 5661 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5662 if (IS_ERR(tb)) { 5663 ret = PTR_ERR(tb); 5664 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5665 return ret; 5666 } 5667 5668 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; 5669 if (!ev) { 5670 ath12k_warn(ab, "failed to fetch vdev start resp ev"); 5671 kfree(tb); 5672 return -EPROTO; 5673 } 5674 5675 *vdev_rsp = *ev; 5676 5677 kfree(tb); 5678 return 0; 5679 } 5680 5681 static struct ath12k_reg_rule 5682 *create_ext_reg_rules_from_wmi(u32 num_reg_rules, 5683 struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule) 5684 { 5685 struct ath12k_reg_rule *reg_rule_ptr; 5686 u32 count; 5687 5688 reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), 5689 GFP_ATOMIC); 5690 5691 if (!reg_rule_ptr) 5692 return NULL; 5693 5694 for (count = 0; count < num_reg_rules; count++) { 5695 reg_rule_ptr[count].start_freq = 5696 le32_get_bits(wmi_reg_rule[count].freq_info, 5697 REG_RULE_START_FREQ); 5698 reg_rule_ptr[count].end_freq = 5699 le32_get_bits(wmi_reg_rule[count].freq_info, 5700 REG_RULE_END_FREQ); 5701 reg_rule_ptr[count].max_bw = 5702 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5703 REG_RULE_MAX_BW); 5704 reg_rule_ptr[count].reg_power = 5705 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5706 REG_RULE_REG_PWR); 5707 reg_rule_ptr[count].ant_gain = 5708 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5709 REG_RULE_ANT_GAIN); 5710 reg_rule_ptr[count].flags = 5711 le32_get_bits(wmi_reg_rule[count].flag_info, 5712 REG_RULE_FLAGS); 5713 reg_rule_ptr[count].psd_flag = 5714 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5715 REG_RULE_PSD_INFO); 5716 reg_rule_ptr[count].psd_eirp = 5717 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5718 REG_RULE_PSD_EIRP); 5719 } 5720 5721 return reg_rule_ptr; 5722 } 5723 5724 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule, 5725 u32 num_reg_rules) 5726 { 5727 u8 num_invalid_5ghz_rules = 0; 5728 u32 count, start_freq; 5729 5730 for (count = 0; count < num_reg_rules; count++) { 5731 start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ); 5732 5733 if (start_freq >= ATH12K_MIN_6GHZ_FREQ) 5734 num_invalid_5ghz_rules++; 5735 } 5736 5737 return num_invalid_5ghz_rules; 5738 } 5739 5740 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, 5741 struct sk_buff *skb, 5742 struct ath12k_reg_info *reg_info) 5743 { 5744 const void **tb; 5745 const struct wmi_reg_chan_list_cc_ext_event *ev; 5746 struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule; 5747 u32 num_2g_reg_rules, num_5g_reg_rules; 5748 u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; 5749 u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; 5750 u8 num_invalid_5ghz_ext_rules; 5751 u32 total_reg_rules = 0; 5752 int ret, i, j; 5753 5754 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); 5755 5756 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5757 if (IS_ERR(tb)) { 5758 ret = PTR_ERR(tb); 5759 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5760 return ret; 5761 } 5762 5763 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; 5764 if (!ev) { 5765 ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n"); 5766 kfree(tb); 5767 return -EPROTO; 5768 } 5769 5770 reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules); 5771 reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules); 5772 reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] = 5773 le32_to_cpu(ev->num_6g_reg_rules_ap_lpi); 5774 reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] = 5775 le32_to_cpu(ev->num_6g_reg_rules_ap_sp); 5776 reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] = 5777 le32_to_cpu(ev->num_6g_reg_rules_ap_vlp); 5778 5779 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5780 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5781 le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]); 5782 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5783 le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]); 5784 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5785 le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]); 5786 } 5787 5788 num_2g_reg_rules = reg_info->num_2g_reg_rules; 5789 total_reg_rules += num_2g_reg_rules; 5790 num_5g_reg_rules = reg_info->num_5g_reg_rules; 5791 total_reg_rules += num_5g_reg_rules; 5792 5793 if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) { 5794 ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n", 5795 num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES); 5796 kfree(tb); 5797 return -EINVAL; 5798 } 5799 5800 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5801 num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i]; 5802 5803 if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { 5804 ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n", 5805 i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES); 5806 kfree(tb); 5807 return -EINVAL; 5808 } 5809 5810 total_reg_rules += num_6g_reg_rules_ap[i]; 5811 } 5812 5813 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5814 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5815 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5816 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5817 5818 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5819 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5820 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5821 5822 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5823 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5824 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5825 5826 if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES || 5827 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES || 5828 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6GHZ_REG_RULES) { 5829 ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n", 5830 i); 5831 kfree(tb); 5832 return -EINVAL; 5833 } 5834 } 5835 5836 if (!total_reg_rules) { 5837 ath12k_warn(ab, "No reg rules available\n"); 5838 kfree(tb); 5839 return -EINVAL; 5840 } 5841 5842 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); 5843 5844 reg_info->dfs_region = le32_to_cpu(ev->dfs_region); 5845 reg_info->phybitmap = le32_to_cpu(ev->phybitmap); 5846 reg_info->num_phy = le32_to_cpu(ev->num_phy); 5847 reg_info->phy_id = le32_to_cpu(ev->phy_id); 5848 reg_info->ctry_code = le32_to_cpu(ev->country_id); 5849 reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code); 5850 5851 switch (le32_to_cpu(ev->status_code)) { 5852 case WMI_REG_SET_CC_STATUS_PASS: 5853 reg_info->status_code = REG_SET_CC_STATUS_PASS; 5854 break; 5855 case WMI_REG_CURRENT_ALPHA2_NOT_FOUND: 5856 reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; 5857 break; 5858 case WMI_REG_INIT_ALPHA2_NOT_FOUND: 5859 reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; 5860 break; 5861 case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED: 5862 reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; 5863 break; 5864 case WMI_REG_SET_CC_STATUS_NO_MEMORY: 5865 reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; 5866 break; 5867 case WMI_REG_SET_CC_STATUS_FAIL: 5868 reg_info->status_code = REG_SET_CC_STATUS_FAIL; 5869 break; 5870 } 5871 5872 reg_info->is_ext_reg_event = true; 5873 5874 reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g); 5875 reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g); 5876 reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g); 5877 reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g); 5878 reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi); 5879 reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi); 5880 reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp); 5881 reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp); 5882 reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp); 5883 reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp); 5884 5885 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5886 reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5887 le32_to_cpu(ev->min_bw_6g_client_lpi[i]); 5888 reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5889 le32_to_cpu(ev->max_bw_6g_client_lpi[i]); 5890 reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5891 le32_to_cpu(ev->min_bw_6g_client_sp[i]); 5892 reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5893 le32_to_cpu(ev->max_bw_6g_client_sp[i]); 5894 reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] = 5895 le32_to_cpu(ev->min_bw_6g_client_vlp[i]); 5896 reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] = 5897 le32_to_cpu(ev->max_bw_6g_client_vlp[i]); 5898 } 5899 5900 ath12k_dbg(ab, ATH12K_DBG_WMI, 5901 "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x", 5902 __func__, reg_info->alpha2, reg_info->dfs_region, 5903 reg_info->min_bw_2g, reg_info->max_bw_2g, 5904 reg_info->min_bw_5g, reg_info->max_bw_5g, 5905 reg_info->phybitmap); 5906 5907 ath12k_dbg(ab, ATH12K_DBG_WMI, 5908 "num_2g_reg_rules %d num_5g_reg_rules %d", 5909 num_2g_reg_rules, num_5g_reg_rules); 5910 5911 ath12k_dbg(ab, ATH12K_DBG_WMI, 5912 "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d", 5913 num_6g_reg_rules_ap[WMI_REG_INDOOR_AP], 5914 num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP], 5915 num_6g_reg_rules_ap[WMI_REG_VLP_AP]); 5916 5917 ath12k_dbg(ab, ATH12K_DBG_WMI, 5918 "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5919 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT], 5920 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT], 5921 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]); 5922 5923 ath12k_dbg(ab, ATH12K_DBG_WMI, 5924 "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5925 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT], 5926 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT], 5927 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]); 5928 5929 ext_wmi_reg_rule = 5930 (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev 5931 + sizeof(*ev) 5932 + sizeof(struct wmi_tlv)); 5933 5934 if (num_2g_reg_rules) { 5935 reg_info->reg_rules_2g_ptr = 5936 create_ext_reg_rules_from_wmi(num_2g_reg_rules, 5937 ext_wmi_reg_rule); 5938 5939 if (!reg_info->reg_rules_2g_ptr) { 5940 kfree(tb); 5941 ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n"); 5942 return -ENOMEM; 5943 } 5944 } 5945 5946 ext_wmi_reg_rule += num_2g_reg_rules; 5947 5948 /* Firmware might include 6 GHz reg rule in 5 GHz rule list 5949 * for few countries along with separate 6 GHz rule. 5950 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list 5951 * causes intersect check to be true, and same rules will be 5952 * shown multiple times in iw cmd. 5953 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list 5954 */ 5955 num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule, 5956 num_5g_reg_rules); 5957 5958 if (num_invalid_5ghz_ext_rules) { 5959 ath12k_dbg(ab, ATH12K_DBG_WMI, 5960 "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", 5961 reg_info->alpha2, reg_info->num_5g_reg_rules, 5962 num_invalid_5ghz_ext_rules); 5963 5964 num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules; 5965 reg_info->num_5g_reg_rules = num_5g_reg_rules; 5966 } 5967 5968 if (num_5g_reg_rules) { 5969 reg_info->reg_rules_5g_ptr = 5970 create_ext_reg_rules_from_wmi(num_5g_reg_rules, 5971 ext_wmi_reg_rule); 5972 5973 if (!reg_info->reg_rules_5g_ptr) { 5974 kfree(tb); 5975 ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n"); 5976 return -ENOMEM; 5977 } 5978 } 5979 5980 /* We have adjusted the number of 5 GHz reg rules above. But still those 5981 * many rules needs to be adjusted in ext_wmi_reg_rule. 5982 * 5983 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. 5984 */ 5985 ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules); 5986 5987 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5988 reg_info->reg_rules_6g_ap_ptr[i] = 5989 create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i], 5990 ext_wmi_reg_rule); 5991 5992 if (!reg_info->reg_rules_6g_ap_ptr[i]) { 5993 kfree(tb); 5994 ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n"); 5995 return -ENOMEM; 5996 } 5997 5998 ext_wmi_reg_rule += num_6g_reg_rules_ap[i]; 5999 } 6000 6001 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { 6002 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 6003 reg_info->reg_rules_6g_client_ptr[j][i] = 6004 create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i], 6005 ext_wmi_reg_rule); 6006 6007 if (!reg_info->reg_rules_6g_client_ptr[j][i]) { 6008 kfree(tb); 6009 ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n"); 6010 return -ENOMEM; 6011 } 6012 6013 ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i]; 6014 } 6015 } 6016 6017 reg_info->client_type = le32_to_cpu(ev->client_type); 6018 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; 6019 reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; 6020 reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] = 6021 le32_to_cpu(ev->domain_code_6g_ap_lpi); 6022 reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] = 6023 le32_to_cpu(ev->domain_code_6g_ap_sp); 6024 reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] = 6025 le32_to_cpu(ev->domain_code_6g_ap_vlp); 6026 6027 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 6028 reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] = 6029 le32_to_cpu(ev->domain_code_6g_client_lpi[i]); 6030 reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] = 6031 le32_to_cpu(ev->domain_code_6g_client_sp[i]); 6032 reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] = 6033 le32_to_cpu(ev->domain_code_6g_client_vlp[i]); 6034 } 6035 6036 reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id); 6037 6038 ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d", 6039 reg_info->client_type, reg_info->domain_code_6g_super_id); 6040 6041 ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n"); 6042 6043 kfree(tb); 6044 return 0; 6045 } 6046 6047 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb, 6048 struct wmi_peer_delete_resp_event *peer_del_resp) 6049 { 6050 const void **tb; 6051 const struct wmi_peer_delete_resp_event *ev; 6052 int ret; 6053 6054 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6055 if (IS_ERR(tb)) { 6056 ret = PTR_ERR(tb); 6057 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6058 return ret; 6059 } 6060 6061 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; 6062 if (!ev) { 6063 ath12k_warn(ab, "failed to fetch peer delete resp ev"); 6064 kfree(tb); 6065 return -EPROTO; 6066 } 6067 6068 memset(peer_del_resp, 0, sizeof(*peer_del_resp)); 6069 6070 peer_del_resp->vdev_id = ev->vdev_id; 6071 ether_addr_copy(peer_del_resp->peer_macaddr.addr, 6072 ev->peer_macaddr.addr); 6073 6074 kfree(tb); 6075 return 0; 6076 } 6077 6078 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, 6079 struct sk_buff *skb, 6080 u32 *vdev_id) 6081 { 6082 const void **tb; 6083 const struct wmi_vdev_delete_resp_event *ev; 6084 int ret; 6085 6086 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6087 if (IS_ERR(tb)) { 6088 ret = PTR_ERR(tb); 6089 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6090 return ret; 6091 } 6092 6093 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; 6094 if (!ev) { 6095 ath12k_warn(ab, "failed to fetch vdev delete resp ev"); 6096 kfree(tb); 6097 return -EPROTO; 6098 } 6099 6100 *vdev_id = le32_to_cpu(ev->vdev_id); 6101 6102 kfree(tb); 6103 return 0; 6104 } 6105 6106 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, 6107 struct sk_buff *skb, 6108 u32 *vdev_id, u32 *tx_status) 6109 { 6110 const void **tb; 6111 const struct wmi_bcn_tx_status_event *ev; 6112 int ret; 6113 6114 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6115 if (IS_ERR(tb)) { 6116 ret = PTR_ERR(tb); 6117 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6118 return ret; 6119 } 6120 6121 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; 6122 if (!ev) { 6123 ath12k_warn(ab, "failed to fetch bcn tx status ev"); 6124 kfree(tb); 6125 return -EPROTO; 6126 } 6127 6128 *vdev_id = le32_to_cpu(ev->vdev_id); 6129 *tx_status = le32_to_cpu(ev->tx_status); 6130 6131 kfree(tb); 6132 return 0; 6133 } 6134 6135 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb, 6136 u32 *vdev_id) 6137 { 6138 const void **tb; 6139 const struct wmi_vdev_stopped_event *ev; 6140 int ret; 6141 6142 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6143 if (IS_ERR(tb)) { 6144 ret = PTR_ERR(tb); 6145 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6146 return ret; 6147 } 6148 6149 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; 6150 if (!ev) { 6151 ath12k_warn(ab, "failed to fetch vdev stop ev"); 6152 kfree(tb); 6153 return -EPROTO; 6154 } 6155 6156 *vdev_id = le32_to_cpu(ev->vdev_id); 6157 6158 kfree(tb); 6159 return 0; 6160 } 6161 6162 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab, 6163 u16 tag, u16 len, 6164 const void *ptr, void *data) 6165 { 6166 struct wmi_tlv_mgmt_rx_parse *parse = data; 6167 6168 switch (tag) { 6169 case WMI_TAG_MGMT_RX_HDR: 6170 parse->fixed = ptr; 6171 break; 6172 case WMI_TAG_ARRAY_BYTE: 6173 if (!parse->frame_buf_done) { 6174 parse->frame_buf = ptr; 6175 parse->frame_buf_done = true; 6176 } 6177 break; 6178 } 6179 return 0; 6180 } 6181 6182 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab, 6183 struct sk_buff *skb, 6184 struct ath12k_wmi_mgmt_rx_arg *hdr) 6185 { 6186 struct wmi_tlv_mgmt_rx_parse parse = { }; 6187 const struct ath12k_wmi_mgmt_rx_params *ev; 6188 const u8 *frame; 6189 int i, ret; 6190 6191 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6192 ath12k_wmi_tlv_mgmt_rx_parse, 6193 &parse); 6194 if (ret) { 6195 ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); 6196 return ret; 6197 } 6198 6199 ev = parse.fixed; 6200 frame = parse.frame_buf; 6201 6202 if (!ev || !frame) { 6203 ath12k_warn(ab, "failed to fetch mgmt rx hdr"); 6204 return -EPROTO; 6205 } 6206 6207 hdr->pdev_id = le32_to_cpu(ev->pdev_id); 6208 hdr->chan_freq = le32_to_cpu(ev->chan_freq); 6209 hdr->channel = le32_to_cpu(ev->channel); 6210 hdr->snr = le32_to_cpu(ev->snr); 6211 hdr->rate = le32_to_cpu(ev->rate); 6212 hdr->phy_mode = le32_to_cpu(ev->phy_mode); 6213 hdr->buf_len = le32_to_cpu(ev->buf_len); 6214 hdr->status = le32_to_cpu(ev->status); 6215 hdr->flags = le32_to_cpu(ev->flags); 6216 hdr->rssi = a_sle32_to_cpu(ev->rssi); 6217 hdr->tsf_delta = le32_to_cpu(ev->tsf_delta); 6218 6219 for (i = 0; i < ATH_MAX_ANTENNA; i++) 6220 hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]); 6221 6222 if (skb->len < (frame - skb->data) + hdr->buf_len) { 6223 ath12k_warn(ab, "invalid length in mgmt rx hdr ev"); 6224 return -EPROTO; 6225 } 6226 6227 /* shift the sk_buff to point to `frame` */ 6228 skb_trim(skb, 0); 6229 skb_put(skb, frame - skb->data); 6230 skb_pull(skb, frame - skb->data); 6231 skb_put(skb, hdr->buf_len); 6232 6233 return 0; 6234 } 6235 6236 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, 6237 u32 status, u32 ack_rssi) 6238 { 6239 struct sk_buff *msdu; 6240 struct ieee80211_tx_info *info; 6241 struct ath12k_skb_cb *skb_cb; 6242 int num_mgmt; 6243 6244 spin_lock_bh(&ar->txmgmt_idr_lock); 6245 msdu = idr_find(&ar->txmgmt_idr, desc_id); 6246 6247 if (!msdu) { 6248 ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", 6249 desc_id); 6250 spin_unlock_bh(&ar->txmgmt_idr_lock); 6251 return -ENOENT; 6252 } 6253 6254 idr_remove(&ar->txmgmt_idr, desc_id); 6255 spin_unlock_bh(&ar->txmgmt_idr_lock); 6256 6257 skb_cb = ATH12K_SKB_CB(msdu); 6258 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 6259 6260 info = IEEE80211_SKB_CB(msdu); 6261 memset(&info->status, 0, sizeof(info->status)); 6262 6263 /* skip tx rate update from ieee80211_status*/ 6264 info->status.rates[0].idx = -1; 6265 6266 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) { 6267 info->flags |= IEEE80211_TX_STAT_ACK; 6268 info->status.ack_signal = ack_rssi; 6269 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; 6270 } 6271 6272 if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status) 6273 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 6274 6275 ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu); 6276 6277 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 6278 6279 /* WARN when we received this event without doing any mgmt tx */ 6280 if (num_mgmt < 0) 6281 WARN_ON_ONCE(1); 6282 6283 if (!num_mgmt) 6284 wake_up(&ar->txmgmt_empty_waitq); 6285 6286 return 0; 6287 } 6288 6289 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab, 6290 struct sk_buff *skb, 6291 struct wmi_mgmt_tx_compl_event *param) 6292 { 6293 const void **tb; 6294 const struct wmi_mgmt_tx_compl_event *ev; 6295 int ret; 6296 6297 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6298 if (IS_ERR(tb)) { 6299 ret = PTR_ERR(tb); 6300 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6301 return ret; 6302 } 6303 6304 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; 6305 if (!ev) { 6306 ath12k_warn(ab, "failed to fetch mgmt tx compl ev"); 6307 kfree(tb); 6308 return -EPROTO; 6309 } 6310 6311 param->pdev_id = ev->pdev_id; 6312 param->desc_id = ev->desc_id; 6313 param->status = ev->status; 6314 param->ppdu_id = ev->ppdu_id; 6315 param->ack_rssi = ev->ack_rssi; 6316 6317 kfree(tb); 6318 return 0; 6319 } 6320 6321 static void ath12k_wmi_event_scan_started(struct ath12k *ar) 6322 { 6323 lockdep_assert_held(&ar->data_lock); 6324 6325 switch (ar->scan.state) { 6326 case ATH12K_SCAN_IDLE: 6327 case ATH12K_SCAN_RUNNING: 6328 case ATH12K_SCAN_ABORTING: 6329 ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", 6330 ath12k_scan_state_str(ar->scan.state), 6331 ar->scan.state); 6332 break; 6333 case ATH12K_SCAN_STARTING: 6334 ar->scan.state = ATH12K_SCAN_RUNNING; 6335 6336 if (ar->scan.is_roc) 6337 ieee80211_ready_on_channel(ath12k_ar_to_hw(ar)); 6338 6339 complete(&ar->scan.started); 6340 break; 6341 } 6342 } 6343 6344 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar) 6345 { 6346 lockdep_assert_held(&ar->data_lock); 6347 6348 switch (ar->scan.state) { 6349 case ATH12K_SCAN_IDLE: 6350 case ATH12K_SCAN_RUNNING: 6351 case ATH12K_SCAN_ABORTING: 6352 ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", 6353 ath12k_scan_state_str(ar->scan.state), 6354 ar->scan.state); 6355 break; 6356 case ATH12K_SCAN_STARTING: 6357 complete(&ar->scan.started); 6358 __ath12k_mac_scan_finish(ar); 6359 break; 6360 } 6361 } 6362 6363 static void ath12k_wmi_event_scan_completed(struct ath12k *ar) 6364 { 6365 lockdep_assert_held(&ar->data_lock); 6366 6367 switch (ar->scan.state) { 6368 case ATH12K_SCAN_IDLE: 6369 case ATH12K_SCAN_STARTING: 6370 /* One suspected reason scan can be completed while starting is 6371 * if firmware fails to deliver all scan events to the host, 6372 * e.g. when transport pipe is full. This has been observed 6373 * with spectral scan phyerr events starving wmi transport 6374 * pipe. In such case the "scan completed" event should be (and 6375 * is) ignored by the host as it may be just firmware's scan 6376 * state machine recovering. 6377 */ 6378 ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", 6379 ath12k_scan_state_str(ar->scan.state), 6380 ar->scan.state); 6381 break; 6382 case ATH12K_SCAN_RUNNING: 6383 case ATH12K_SCAN_ABORTING: 6384 __ath12k_mac_scan_finish(ar); 6385 break; 6386 } 6387 } 6388 6389 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar) 6390 { 6391 lockdep_assert_held(&ar->data_lock); 6392 6393 switch (ar->scan.state) { 6394 case ATH12K_SCAN_IDLE: 6395 case ATH12K_SCAN_STARTING: 6396 ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", 6397 ath12k_scan_state_str(ar->scan.state), 6398 ar->scan.state); 6399 break; 6400 case ATH12K_SCAN_RUNNING: 6401 case ATH12K_SCAN_ABORTING: 6402 ar->scan_channel = NULL; 6403 break; 6404 } 6405 } 6406 6407 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) 6408 { 6409 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6410 6411 lockdep_assert_held(&ar->data_lock); 6412 6413 switch (ar->scan.state) { 6414 case ATH12K_SCAN_IDLE: 6415 case ATH12K_SCAN_STARTING: 6416 ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", 6417 ath12k_scan_state_str(ar->scan.state), 6418 ar->scan.state); 6419 break; 6420 case ATH12K_SCAN_RUNNING: 6421 case ATH12K_SCAN_ABORTING: 6422 ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq); 6423 6424 if (ar->scan.is_roc && ar->scan.roc_freq == freq) 6425 complete(&ar->scan.on_channel); 6426 6427 break; 6428 } 6429 } 6430 6431 static const char * 6432 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type, 6433 enum wmi_scan_completion_reason reason) 6434 { 6435 switch (type) { 6436 case WMI_SCAN_EVENT_STARTED: 6437 return "started"; 6438 case WMI_SCAN_EVENT_COMPLETED: 6439 switch (reason) { 6440 case WMI_SCAN_REASON_COMPLETED: 6441 return "completed"; 6442 case WMI_SCAN_REASON_CANCELLED: 6443 return "completed [cancelled]"; 6444 case WMI_SCAN_REASON_PREEMPTED: 6445 return "completed [preempted]"; 6446 case WMI_SCAN_REASON_TIMEDOUT: 6447 return "completed [timedout]"; 6448 case WMI_SCAN_REASON_INTERNAL_FAILURE: 6449 return "completed [internal err]"; 6450 case WMI_SCAN_REASON_MAX: 6451 break; 6452 } 6453 return "completed [unknown]"; 6454 case WMI_SCAN_EVENT_BSS_CHANNEL: 6455 return "bss channel"; 6456 case WMI_SCAN_EVENT_FOREIGN_CHAN: 6457 return "foreign channel"; 6458 case WMI_SCAN_EVENT_DEQUEUED: 6459 return "dequeued"; 6460 case WMI_SCAN_EVENT_PREEMPTED: 6461 return "preempted"; 6462 case WMI_SCAN_EVENT_START_FAILED: 6463 return "start failed"; 6464 case WMI_SCAN_EVENT_RESTARTED: 6465 return "restarted"; 6466 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 6467 return "foreign channel exit"; 6468 default: 6469 return "unknown"; 6470 } 6471 } 6472 6473 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb, 6474 struct wmi_scan_event *scan_evt_param) 6475 { 6476 const void **tb; 6477 const struct wmi_scan_event *ev; 6478 int ret; 6479 6480 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6481 if (IS_ERR(tb)) { 6482 ret = PTR_ERR(tb); 6483 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6484 return ret; 6485 } 6486 6487 ev = tb[WMI_TAG_SCAN_EVENT]; 6488 if (!ev) { 6489 ath12k_warn(ab, "failed to fetch scan ev"); 6490 kfree(tb); 6491 return -EPROTO; 6492 } 6493 6494 scan_evt_param->event_type = ev->event_type; 6495 scan_evt_param->reason = ev->reason; 6496 scan_evt_param->channel_freq = ev->channel_freq; 6497 scan_evt_param->scan_req_id = ev->scan_req_id; 6498 scan_evt_param->scan_id = ev->scan_id; 6499 scan_evt_param->vdev_id = ev->vdev_id; 6500 scan_evt_param->tsf_timestamp = ev->tsf_timestamp; 6501 6502 kfree(tb); 6503 return 0; 6504 } 6505 6506 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb, 6507 struct wmi_peer_sta_kickout_arg *arg) 6508 { 6509 const void **tb; 6510 const struct wmi_peer_sta_kickout_event *ev; 6511 int ret; 6512 6513 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6514 if (IS_ERR(tb)) { 6515 ret = PTR_ERR(tb); 6516 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6517 return ret; 6518 } 6519 6520 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; 6521 if (!ev) { 6522 ath12k_warn(ab, "failed to fetch peer sta kickout ev"); 6523 kfree(tb); 6524 return -EPROTO; 6525 } 6526 6527 arg->mac_addr = ev->peer_macaddr.addr; 6528 arg->reason = le32_to_cpu(ev->reason); 6529 arg->rssi = le32_to_cpu(ev->rssi); 6530 6531 kfree(tb); 6532 return 0; 6533 } 6534 6535 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, 6536 struct wmi_roam_event *roam_ev) 6537 { 6538 const void **tb; 6539 const struct wmi_roam_event *ev; 6540 int ret; 6541 6542 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6543 if (IS_ERR(tb)) { 6544 ret = PTR_ERR(tb); 6545 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6546 return ret; 6547 } 6548 6549 ev = tb[WMI_TAG_ROAM_EVENT]; 6550 if (!ev) { 6551 ath12k_warn(ab, "failed to fetch roam ev"); 6552 kfree(tb); 6553 return -EPROTO; 6554 } 6555 6556 roam_ev->vdev_id = ev->vdev_id; 6557 roam_ev->reason = ev->reason; 6558 roam_ev->rssi = ev->rssi; 6559 6560 kfree(tb); 6561 return 0; 6562 } 6563 6564 static int freq_to_idx(struct ath12k *ar, int freq) 6565 { 6566 struct ieee80211_supported_band *sband; 6567 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6568 int band, ch, idx = 0; 6569 6570 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6571 if (!ar->mac.sbands[band].channels) 6572 continue; 6573 6574 sband = hw->wiphy->bands[band]; 6575 if (!sband) 6576 continue; 6577 6578 for (ch = 0; ch < sband->n_channels; ch++, idx++) { 6579 if (sband->channels[ch].center_freq < 6580 KHZ_TO_MHZ(ar->freq_range.start_freq) || 6581 sband->channels[ch].center_freq > 6582 KHZ_TO_MHZ(ar->freq_range.end_freq)) 6583 continue; 6584 6585 if (sband->channels[ch].center_freq == freq) 6586 goto exit; 6587 } 6588 } 6589 6590 exit: 6591 return idx; 6592 } 6593 6594 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6595 struct wmi_chan_info_event *ch_info_ev) 6596 { 6597 const void **tb; 6598 const struct wmi_chan_info_event *ev; 6599 int ret; 6600 6601 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6602 if (IS_ERR(tb)) { 6603 ret = PTR_ERR(tb); 6604 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6605 return ret; 6606 } 6607 6608 ev = tb[WMI_TAG_CHAN_INFO_EVENT]; 6609 if (!ev) { 6610 ath12k_warn(ab, "failed to fetch chan info ev"); 6611 kfree(tb); 6612 return -EPROTO; 6613 } 6614 6615 ch_info_ev->err_code = ev->err_code; 6616 ch_info_ev->freq = ev->freq; 6617 ch_info_ev->cmd_flags = ev->cmd_flags; 6618 ch_info_ev->noise_floor = ev->noise_floor; 6619 ch_info_ev->rx_clear_count = ev->rx_clear_count; 6620 ch_info_ev->cycle_count = ev->cycle_count; 6621 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; 6622 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; 6623 ch_info_ev->rx_frame_count = ev->rx_frame_count; 6624 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; 6625 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; 6626 ch_info_ev->vdev_id = ev->vdev_id; 6627 6628 kfree(tb); 6629 return 0; 6630 } 6631 6632 static int 6633 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6634 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) 6635 { 6636 const void **tb; 6637 const struct wmi_pdev_bss_chan_info_event *ev; 6638 int ret; 6639 6640 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6641 if (IS_ERR(tb)) { 6642 ret = PTR_ERR(tb); 6643 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6644 return ret; 6645 } 6646 6647 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; 6648 if (!ev) { 6649 ath12k_warn(ab, "failed to fetch pdev bss chan info ev"); 6650 kfree(tb); 6651 return -EPROTO; 6652 } 6653 6654 bss_ch_info_ev->pdev_id = ev->pdev_id; 6655 bss_ch_info_ev->freq = ev->freq; 6656 bss_ch_info_ev->noise_floor = ev->noise_floor; 6657 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; 6658 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; 6659 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; 6660 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; 6661 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; 6662 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; 6663 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; 6664 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; 6665 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; 6666 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; 6667 6668 kfree(tb); 6669 return 0; 6670 } 6671 6672 static int 6673 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb, 6674 struct wmi_vdev_install_key_complete_arg *arg) 6675 { 6676 const void **tb; 6677 const struct wmi_vdev_install_key_compl_event *ev; 6678 int ret; 6679 6680 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6681 if (IS_ERR(tb)) { 6682 ret = PTR_ERR(tb); 6683 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6684 return ret; 6685 } 6686 6687 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; 6688 if (!ev) { 6689 ath12k_warn(ab, "failed to fetch vdev install key compl ev"); 6690 kfree(tb); 6691 return -EPROTO; 6692 } 6693 6694 arg->vdev_id = le32_to_cpu(ev->vdev_id); 6695 arg->macaddr = ev->peer_macaddr.addr; 6696 arg->key_idx = le32_to_cpu(ev->key_idx); 6697 arg->key_flags = le32_to_cpu(ev->key_flags); 6698 arg->status = le32_to_cpu(ev->status); 6699 6700 kfree(tb); 6701 return 0; 6702 } 6703 6704 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb, 6705 struct wmi_peer_assoc_conf_arg *peer_assoc_conf) 6706 { 6707 const void **tb; 6708 const struct wmi_peer_assoc_conf_event *ev; 6709 int ret; 6710 6711 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6712 if (IS_ERR(tb)) { 6713 ret = PTR_ERR(tb); 6714 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6715 return ret; 6716 } 6717 6718 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; 6719 if (!ev) { 6720 ath12k_warn(ab, "failed to fetch peer assoc conf ev"); 6721 kfree(tb); 6722 return -EPROTO; 6723 } 6724 6725 peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id); 6726 peer_assoc_conf->macaddr = ev->peer_macaddr.addr; 6727 6728 kfree(tb); 6729 return 0; 6730 } 6731 6732 static int 6733 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb, 6734 const struct wmi_pdev_temperature_event *ev) 6735 { 6736 const void **tb; 6737 int ret; 6738 6739 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6740 if (IS_ERR(tb)) { 6741 ret = PTR_ERR(tb); 6742 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6743 return ret; 6744 } 6745 6746 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; 6747 if (!ev) { 6748 ath12k_warn(ab, "failed to fetch pdev temp ev"); 6749 kfree(tb); 6750 return -EPROTO; 6751 } 6752 6753 kfree(tb); 6754 return 0; 6755 } 6756 6757 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab) 6758 { 6759 /* try to send pending beacons first. they take priority */ 6760 wake_up(&ab->wmi_ab.tx_credits_wq); 6761 } 6762 6763 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb) 6764 { 6765 const struct wmi_11d_new_cc_event *ev; 6766 struct ath12k *ar; 6767 struct ath12k_pdev *pdev; 6768 const void **tb; 6769 int ret, i; 6770 6771 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6772 if (IS_ERR(tb)) { 6773 ret = PTR_ERR(tb); 6774 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6775 return ret; 6776 } 6777 6778 ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; 6779 if (!ev) { 6780 kfree(tb); 6781 ath12k_warn(ab, "failed to fetch 11d new cc ev"); 6782 return -EPROTO; 6783 } 6784 6785 spin_lock_bh(&ab->base_lock); 6786 memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN); 6787 spin_unlock_bh(&ab->base_lock); 6788 6789 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n", 6790 ab->new_alpha2[0], 6791 ab->new_alpha2[1]); 6792 6793 kfree(tb); 6794 6795 for (i = 0; i < ab->num_radios; i++) { 6796 pdev = &ab->pdevs[i]; 6797 ar = pdev->ar; 6798 ar->state_11d = ATH12K_11D_IDLE; 6799 ar->ah->regd_updated = false; 6800 complete(&ar->completed_11d_scan); 6801 } 6802 6803 queue_work(ab->workqueue, &ab->update_11d_work); 6804 6805 return 0; 6806 } 6807 6808 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, 6809 struct sk_buff *skb) 6810 { 6811 dev_kfree_skb(skb); 6812 } 6813 6814 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) 6815 { 6816 struct ath12k_reg_info *reg_info; 6817 struct ath12k *ar = NULL; 6818 u8 pdev_idx = 255; 6819 int ret; 6820 6821 reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); 6822 if (!reg_info) { 6823 ret = -ENOMEM; 6824 goto fallback; 6825 } 6826 6827 ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); 6828 if (ret) { 6829 ath12k_warn(ab, "failed to extract regulatory info from received event\n"); 6830 goto mem_free; 6831 } 6832 6833 ret = ath12k_reg_validate_reg_info(ab, reg_info); 6834 if (ret == ATH12K_REG_STATUS_FALLBACK) { 6835 ath12k_warn(ab, "failed to validate reg info %d\n", ret); 6836 /* firmware has successfully switches to new regd but host can not 6837 * continue, so free reginfo and fallback to old regd 6838 */ 6839 goto mem_free; 6840 } else if (ret == ATH12K_REG_STATUS_DROP) { 6841 /* reg info is valid but we will not store it and 6842 * not going to create new regd for it 6843 */ 6844 ret = ATH12K_REG_STATUS_VALID; 6845 goto mem_free; 6846 } 6847 6848 /* free old reg_info if it exist */ 6849 pdev_idx = reg_info->phy_id; 6850 if (ab->reg_info[pdev_idx]) { 6851 ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]); 6852 kfree(ab->reg_info[pdev_idx]); 6853 } 6854 /* reg_info is valid, we store it for later use 6855 * even below regd build failed 6856 */ 6857 ab->reg_info[pdev_idx] = reg_info; 6858 6859 ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC, 6860 IEEE80211_REG_UNSET_AP); 6861 if (ret) { 6862 ath12k_warn(ab, "failed to handle chan list %d\n", ret); 6863 goto fallback; 6864 } 6865 6866 goto out; 6867 6868 mem_free: 6869 ath12k_reg_reset_reg_info(reg_info); 6870 kfree(reg_info); 6871 6872 if (ret == ATH12K_REG_STATUS_VALID) 6873 goto out; 6874 6875 fallback: 6876 /* Fallback to older reg (by sending previous country setting 6877 * again if fw has succeeded and we failed to process here. 6878 * The Regdomain should be uniform across driver and fw. Since the 6879 * FW has processed the command and sent a success status, we expect 6880 * this function to succeed as well. If it doesn't, CTRY needs to be 6881 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. 6882 */ 6883 /* TODO: This is rare, but still should also be handled */ 6884 WARN_ON(1); 6885 6886 out: 6887 /* In some error cases, even a valid pdev_idx might not be available */ 6888 if (pdev_idx != 255) 6889 ar = ab->pdevs[pdev_idx].ar; 6890 6891 /* During the boot-time update, 'ar' might not be allocated, 6892 * so the completion cannot be marked at that point. 6893 * This boot-time update is handled in ath12k_mac_hw_register() 6894 * before registering the hardware. 6895 */ 6896 if (ar) 6897 complete_all(&ar->regd_update_completed); 6898 6899 return ret; 6900 } 6901 6902 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 6903 const void *ptr, void *data) 6904 { 6905 struct ath12k_wmi_rdy_parse *rdy_parse = data; 6906 struct wmi_ready_event fixed_param; 6907 struct ath12k_wmi_mac_addr_params *addr_list; 6908 struct ath12k_pdev *pdev; 6909 u32 num_mac_addr; 6910 int i; 6911 6912 switch (tag) { 6913 case WMI_TAG_READY_EVENT: 6914 memset(&fixed_param, 0, sizeof(fixed_param)); 6915 memcpy(&fixed_param, (struct wmi_ready_event *)ptr, 6916 min_t(u16, sizeof(fixed_param), len)); 6917 ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status); 6918 rdy_parse->num_extra_mac_addr = 6919 le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr); 6920 6921 ether_addr_copy(ab->mac_addr, 6922 fixed_param.ready_event_min.mac_addr.addr); 6923 ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum); 6924 ab->wmi_ready = true; 6925 break; 6926 case WMI_TAG_ARRAY_FIXED_STRUCT: 6927 addr_list = (struct ath12k_wmi_mac_addr_params *)ptr; 6928 num_mac_addr = rdy_parse->num_extra_mac_addr; 6929 6930 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) 6931 break; 6932 6933 for (i = 0; i < ab->num_radios; i++) { 6934 pdev = &ab->pdevs[i]; 6935 ether_addr_copy(pdev->mac_addr, addr_list[i].addr); 6936 } 6937 ab->pdevs_macaddr_valid = true; 6938 break; 6939 default: 6940 break; 6941 } 6942 6943 return 0; 6944 } 6945 6946 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 6947 { 6948 struct ath12k_wmi_rdy_parse rdy_parse = { }; 6949 int ret; 6950 6951 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6952 ath12k_wmi_rdy_parse, &rdy_parse); 6953 if (ret) { 6954 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 6955 return ret; 6956 } 6957 6958 complete(&ab->wmi_ab.unified_ready); 6959 return 0; 6960 } 6961 6962 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 6963 { 6964 struct wmi_peer_delete_resp_event peer_del_resp; 6965 struct ath12k *ar; 6966 6967 if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { 6968 ath12k_warn(ab, "failed to extract peer delete resp"); 6969 return; 6970 } 6971 6972 rcu_read_lock(); 6973 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id)); 6974 if (!ar) { 6975 ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d", 6976 peer_del_resp.vdev_id); 6977 rcu_read_unlock(); 6978 return; 6979 } 6980 6981 complete(&ar->peer_delete_done); 6982 rcu_read_unlock(); 6983 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", 6984 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); 6985 } 6986 6987 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab, 6988 struct sk_buff *skb) 6989 { 6990 struct ath12k *ar; 6991 u32 vdev_id = 0; 6992 6993 if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { 6994 ath12k_warn(ab, "failed to extract vdev delete resp"); 6995 return; 6996 } 6997 6998 rcu_read_lock(); 6999 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 7000 if (!ar) { 7001 ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d", 7002 vdev_id); 7003 rcu_read_unlock(); 7004 return; 7005 } 7006 7007 complete(&ar->vdev_delete_done); 7008 7009 rcu_read_unlock(); 7010 7011 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n", 7012 vdev_id); 7013 } 7014 7015 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status) 7016 { 7017 switch (vdev_resp_status) { 7018 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: 7019 return "invalid vdev id"; 7020 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: 7021 return "not supported"; 7022 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: 7023 return "dfs violation"; 7024 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: 7025 return "invalid regdomain"; 7026 default: 7027 return "unknown"; 7028 } 7029 } 7030 7031 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 7032 { 7033 struct wmi_vdev_start_resp_event vdev_start_resp; 7034 struct ath12k *ar; 7035 u32 status; 7036 7037 if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { 7038 ath12k_warn(ab, "failed to extract vdev start resp"); 7039 return; 7040 } 7041 7042 rcu_read_lock(); 7043 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id)); 7044 if (!ar) { 7045 ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d", 7046 vdev_start_resp.vdev_id); 7047 rcu_read_unlock(); 7048 return; 7049 } 7050 7051 ar->last_wmi_vdev_start_status = 0; 7052 7053 status = le32_to_cpu(vdev_start_resp.status); 7054 if (WARN_ON_ONCE(status)) { 7055 ath12k_warn(ab, "vdev start resp error status %d (%s)\n", 7056 status, ath12k_wmi_vdev_resp_print(status)); 7057 ar->last_wmi_vdev_start_status = status; 7058 } 7059 7060 ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power); 7061 7062 complete(&ar->vdev_setup_done); 7063 7064 rcu_read_unlock(); 7065 7066 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d", 7067 vdev_start_resp.vdev_id); 7068 } 7069 7070 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb) 7071 { 7072 struct ath12k_link_vif *arvif; 7073 struct ath12k *ar; 7074 u32 vdev_id, tx_status; 7075 7076 if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { 7077 ath12k_warn(ab, "failed to extract bcn tx status"); 7078 return; 7079 } 7080 7081 guard(rcu)(); 7082 7083 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7084 if (!arvif) { 7085 ath12k_warn(ab, "invalid vdev %u in bcn tx status\n", 7086 vdev_id); 7087 return; 7088 } 7089 7090 ar = arvif->ar; 7091 wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arvif->bcn_tx_work); 7092 } 7093 7094 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb) 7095 { 7096 struct ath12k *ar; 7097 u32 vdev_id = 0; 7098 7099 if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { 7100 ath12k_warn(ab, "failed to extract vdev stopped event"); 7101 return; 7102 } 7103 7104 rcu_read_lock(); 7105 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 7106 if (!ar) { 7107 ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d", 7108 vdev_id); 7109 rcu_read_unlock(); 7110 return; 7111 } 7112 7113 complete(&ar->vdev_setup_done); 7114 7115 rcu_read_unlock(); 7116 7117 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); 7118 } 7119 7120 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) 7121 { 7122 struct ath12k_wmi_mgmt_rx_arg rx_ev = {}; 7123 struct ath12k *ar; 7124 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 7125 struct ieee80211_hdr *hdr; 7126 u16 fc; 7127 struct ieee80211_supported_band *sband; 7128 s32 noise_floor; 7129 7130 if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { 7131 ath12k_warn(ab, "failed to extract mgmt rx event"); 7132 dev_kfree_skb(skb); 7133 return; 7134 } 7135 7136 memset(status, 0, sizeof(*status)); 7137 7138 ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n", 7139 rx_ev.status); 7140 7141 rcu_read_lock(); 7142 ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); 7143 7144 if (!ar) { 7145 ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", 7146 rx_ev.pdev_id); 7147 dev_kfree_skb(skb); 7148 goto exit; 7149 } 7150 7151 if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) || 7152 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | 7153 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | 7154 WMI_RX_STATUS_ERR_CRC))) { 7155 dev_kfree_skb(skb); 7156 goto exit; 7157 } 7158 7159 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) 7160 status->flag |= RX_FLAG_MMIC_ERROR; 7161 7162 if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ && 7163 rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) { 7164 status->band = NL80211_BAND_6GHZ; 7165 status->freq = rx_ev.chan_freq; 7166 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { 7167 status->band = NL80211_BAND_2GHZ; 7168 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) { 7169 status->band = NL80211_BAND_5GHZ; 7170 } else { 7171 /* Shouldn't happen unless list of advertised channels to 7172 * mac80211 has been changed. 7173 */ 7174 WARN_ON_ONCE(1); 7175 dev_kfree_skb(skb); 7176 goto exit; 7177 } 7178 7179 if (rx_ev.phy_mode == MODE_11B && 7180 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) 7181 ath12k_dbg(ab, ATH12K_DBG_WMI, 7182 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); 7183 7184 sband = &ar->mac.sbands[status->band]; 7185 7186 if (status->band != NL80211_BAND_6GHZ) 7187 status->freq = ieee80211_channel_to_frequency(rx_ev.channel, 7188 status->band); 7189 7190 spin_lock_bh(&ar->data_lock); 7191 noise_floor = ath12k_pdev_get_noise_floor(ar); 7192 spin_unlock_bh(&ar->data_lock); 7193 7194 status->signal = rx_ev.snr + noise_floor; 7195 status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); 7196 7197 hdr = (struct ieee80211_hdr *)skb->data; 7198 fc = le16_to_cpu(hdr->frame_control); 7199 7200 /* Firmware is guaranteed to report all essential management frames via 7201 * WMI while it can deliver some extra via HTT. Since there can be 7202 * duplicates split the reporting wrt monitor/sniffing. 7203 */ 7204 status->flag |= RX_FLAG_SKIP_MONITOR; 7205 7206 /* In case of PMF, FW delivers decrypted frames with Protected Bit set 7207 * including group privacy action frames. 7208 */ 7209 if (ieee80211_has_protected(hdr->frame_control)) { 7210 status->flag |= RX_FLAG_DECRYPTED; 7211 7212 if (!ieee80211_is_robust_mgmt_frame(skb)) { 7213 status->flag |= RX_FLAG_IV_STRIPPED | 7214 RX_FLAG_MMIC_STRIPPED; 7215 hdr->frame_control = __cpu_to_le16(fc & 7216 ~IEEE80211_FCTL_PROTECTED); 7217 } 7218 } 7219 7220 if (ieee80211_is_beacon(hdr->frame_control)) 7221 ath12k_mac_handle_beacon(ar, skb); 7222 7223 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7224 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 7225 skb, skb->len, 7226 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 7227 7228 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7229 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 7230 status->freq, status->band, status->signal, 7231 status->rate_idx); 7232 7233 ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb); 7234 7235 exit: 7236 rcu_read_unlock(); 7237 } 7238 7239 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb) 7240 { 7241 struct wmi_mgmt_tx_compl_event tx_compl_param = {}; 7242 struct ath12k *ar; 7243 7244 if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { 7245 ath12k_warn(ab, "failed to extract mgmt tx compl event"); 7246 return; 7247 } 7248 7249 rcu_read_lock(); 7250 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id)); 7251 if (!ar) { 7252 ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", 7253 tx_compl_param.pdev_id); 7254 goto exit; 7255 } 7256 7257 wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id), 7258 le32_to_cpu(tx_compl_param.status), 7259 le32_to_cpu(tx_compl_param.ack_rssi)); 7260 7261 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7262 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", 7263 tx_compl_param.pdev_id, tx_compl_param.desc_id, 7264 tx_compl_param.status); 7265 7266 exit: 7267 rcu_read_unlock(); 7268 } 7269 7270 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab, 7271 u32 vdev_id, 7272 enum ath12k_scan_state state) 7273 { 7274 int i; 7275 struct ath12k_pdev *pdev; 7276 struct ath12k *ar; 7277 7278 for (i = 0; i < ab->num_radios; i++) { 7279 pdev = rcu_dereference(ab->pdevs_active[i]); 7280 if (pdev && pdev->ar) { 7281 ar = pdev->ar; 7282 7283 spin_lock_bh(&ar->data_lock); 7284 if (ar->scan.state == state && 7285 ar->scan.arvif && 7286 ar->scan.arvif->vdev_id == vdev_id) { 7287 spin_unlock_bh(&ar->data_lock); 7288 return ar; 7289 } 7290 spin_unlock_bh(&ar->data_lock); 7291 } 7292 } 7293 return NULL; 7294 } 7295 7296 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) 7297 { 7298 struct ath12k *ar; 7299 struct wmi_scan_event scan_ev = {}; 7300 7301 if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) { 7302 ath12k_warn(ab, "failed to extract scan event"); 7303 return; 7304 } 7305 7306 rcu_read_lock(); 7307 7308 /* In case the scan was cancelled, ex. during interface teardown, 7309 * the interface will not be found in active interfaces. 7310 * Rather, in such scenarios, iterate over the active pdev's to 7311 * search 'ar' if the corresponding 'ar' scan is ABORTING and the 7312 * aborting scan's vdev id matches this event info. 7313 */ 7314 if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && 7315 le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) { 7316 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7317 ATH12K_SCAN_ABORTING); 7318 if (!ar) 7319 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7320 ATH12K_SCAN_RUNNING); 7321 } else { 7322 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); 7323 } 7324 7325 if (!ar) { 7326 ath12k_warn(ab, "Received scan event for unknown vdev"); 7327 rcu_read_unlock(); 7328 return; 7329 } 7330 7331 spin_lock_bh(&ar->data_lock); 7332 7333 ath12k_dbg(ab, ATH12K_DBG_WMI, 7334 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 7335 ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type), 7336 le32_to_cpu(scan_ev.reason)), 7337 le32_to_cpu(scan_ev.event_type), 7338 le32_to_cpu(scan_ev.reason), 7339 le32_to_cpu(scan_ev.channel_freq), 7340 le32_to_cpu(scan_ev.scan_req_id), 7341 le32_to_cpu(scan_ev.scan_id), 7342 le32_to_cpu(scan_ev.vdev_id), 7343 ath12k_scan_state_str(ar->scan.state), ar->scan.state); 7344 7345 switch (le32_to_cpu(scan_ev.event_type)) { 7346 case WMI_SCAN_EVENT_STARTED: 7347 ath12k_wmi_event_scan_started(ar); 7348 break; 7349 case WMI_SCAN_EVENT_COMPLETED: 7350 ath12k_wmi_event_scan_completed(ar); 7351 break; 7352 case WMI_SCAN_EVENT_BSS_CHANNEL: 7353 ath12k_wmi_event_scan_bss_chan(ar); 7354 break; 7355 case WMI_SCAN_EVENT_FOREIGN_CHAN: 7356 ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq)); 7357 break; 7358 case WMI_SCAN_EVENT_START_FAILED: 7359 ath12k_warn(ab, "received scan start failure event\n"); 7360 ath12k_wmi_event_scan_start_failed(ar); 7361 break; 7362 case WMI_SCAN_EVENT_DEQUEUED: 7363 __ath12k_mac_scan_finish(ar); 7364 break; 7365 case WMI_SCAN_EVENT_PREEMPTED: 7366 case WMI_SCAN_EVENT_RESTARTED: 7367 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 7368 default: 7369 break; 7370 } 7371 7372 spin_unlock_bh(&ar->data_lock); 7373 7374 rcu_read_unlock(); 7375 } 7376 7377 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb) 7378 { 7379 struct wmi_peer_sta_kickout_arg arg = {}; 7380 struct ath12k_link_vif *arvif; 7381 struct ieee80211_sta *sta; 7382 struct ath12k_peer *peer; 7383 unsigned int link_id; 7384 struct ath12k *ar; 7385 7386 if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { 7387 ath12k_warn(ab, "failed to extract peer sta kickout event"); 7388 return; 7389 } 7390 7391 rcu_read_lock(); 7392 7393 spin_lock_bh(&ab->base_lock); 7394 7395 peer = ath12k_peer_find_by_addr(ab, arg.mac_addr); 7396 7397 if (!peer) { 7398 ath12k_warn(ab, "peer not found %pM\n", 7399 arg.mac_addr); 7400 goto exit; 7401 } 7402 7403 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id); 7404 if (!arvif) { 7405 ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d", 7406 peer->vdev_id); 7407 goto exit; 7408 } 7409 7410 ar = arvif->ar; 7411 7412 if (peer->mlo) { 7413 sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar), 7414 arg.mac_addr, 7415 NULL, &link_id); 7416 if (peer->link_id != link_id) { 7417 ath12k_warn(ab, 7418 "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n", 7419 arg.mac_addr, peer->link_id, link_id); 7420 goto exit; 7421 } 7422 } else { 7423 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 7424 arg.mac_addr, NULL); 7425 } 7426 if (!sta) { 7427 ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n", 7428 peer->mlo ? "MLO " : "", arg.mac_addr); 7429 goto exit; 7430 } 7431 7432 ath12k_dbg(ab, ATH12K_DBG_WMI, 7433 "peer sta kickout event %pM reason: %d rssi: %d\n", 7434 arg.mac_addr, arg.reason, arg.rssi); 7435 7436 switch (arg.reason) { 7437 case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY: 7438 if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) { 7439 ath12k_mac_handle_beacon_miss(ar, arvif); 7440 break; 7441 } 7442 fallthrough; 7443 default: 7444 ieee80211_report_low_ack(sta, 10); 7445 } 7446 7447 exit: 7448 spin_unlock_bh(&ab->base_lock); 7449 rcu_read_unlock(); 7450 } 7451 7452 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) 7453 { 7454 struct ath12k_link_vif *arvif; 7455 struct wmi_roam_event roam_ev = {}; 7456 struct ath12k *ar; 7457 u32 vdev_id; 7458 u8 roam_reason; 7459 7460 if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { 7461 ath12k_warn(ab, "failed to extract roam event"); 7462 return; 7463 } 7464 7465 vdev_id = le32_to_cpu(roam_ev.vdev_id); 7466 roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason), 7467 WMI_ROAM_REASON_MASK); 7468 7469 ath12k_dbg(ab, ATH12K_DBG_WMI, 7470 "wmi roam event vdev %u reason %d rssi %d\n", 7471 vdev_id, roam_reason, roam_ev.rssi); 7472 7473 guard(rcu)(); 7474 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7475 if (!arvif) { 7476 ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id); 7477 return; 7478 } 7479 7480 ar = arvif->ar; 7481 7482 if (roam_reason >= WMI_ROAM_REASON_MAX) 7483 ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", 7484 roam_reason, vdev_id); 7485 7486 switch (roam_reason) { 7487 case WMI_ROAM_REASON_BEACON_MISS: 7488 ath12k_mac_handle_beacon_miss(ar, arvif); 7489 break; 7490 case WMI_ROAM_REASON_BETTER_AP: 7491 case WMI_ROAM_REASON_LOW_RSSI: 7492 case WMI_ROAM_REASON_SUITABLE_AP_FOUND: 7493 case WMI_ROAM_REASON_HO_FAILED: 7494 ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", 7495 roam_reason, vdev_id); 7496 break; 7497 } 7498 } 7499 7500 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7501 { 7502 struct wmi_chan_info_event ch_info_ev = {}; 7503 struct ath12k *ar; 7504 struct survey_info *survey; 7505 int idx; 7506 /* HW channel counters frequency value in hertz */ 7507 u32 cc_freq_hz = ab->cc_freq_hz; 7508 7509 if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { 7510 ath12k_warn(ab, "failed to extract chan info event"); 7511 return; 7512 } 7513 7514 ath12k_dbg(ab, ATH12K_DBG_WMI, 7515 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", 7516 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, 7517 ch_info_ev.cmd_flags, ch_info_ev.noise_floor, 7518 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, 7519 ch_info_ev.mac_clk_mhz); 7520 7521 if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) { 7522 ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n"); 7523 return; 7524 } 7525 7526 rcu_read_lock(); 7527 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id)); 7528 if (!ar) { 7529 ath12k_warn(ab, "invalid vdev id in chan info ev %d", 7530 ch_info_ev.vdev_id); 7531 rcu_read_unlock(); 7532 return; 7533 } 7534 spin_lock_bh(&ar->data_lock); 7535 7536 switch (ar->scan.state) { 7537 case ATH12K_SCAN_IDLE: 7538 case ATH12K_SCAN_STARTING: 7539 ath12k_warn(ab, "received chan info event without a scan request, ignoring\n"); 7540 goto exit; 7541 case ATH12K_SCAN_RUNNING: 7542 case ATH12K_SCAN_ABORTING: 7543 break; 7544 } 7545 7546 idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq)); 7547 if (idx >= ARRAY_SIZE(ar->survey)) { 7548 ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", 7549 ch_info_ev.freq, idx); 7550 goto exit; 7551 } 7552 7553 /* If FW provides MAC clock frequency in Mhz, overriding the initialized 7554 * HW channel counters frequency value 7555 */ 7556 if (ch_info_ev.mac_clk_mhz) 7557 cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000); 7558 7559 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { 7560 survey = &ar->survey[idx]; 7561 memset(survey, 0, sizeof(*survey)); 7562 survey->noise = le32_to_cpu(ch_info_ev.noise_floor); 7563 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | 7564 SURVEY_INFO_TIME_BUSY; 7565 survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz); 7566 survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count), 7567 cc_freq_hz); 7568 } 7569 exit: 7570 spin_unlock_bh(&ar->data_lock); 7571 rcu_read_unlock(); 7572 } 7573 7574 static void 7575 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7576 { 7577 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; 7578 struct survey_info *survey; 7579 struct ath12k *ar; 7580 u32 cc_freq_hz = ab->cc_freq_hz; 7581 u64 busy, total, tx, rx, rx_bss; 7582 int idx; 7583 7584 if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { 7585 ath12k_warn(ab, "failed to extract pdev bss chan info event"); 7586 return; 7587 } 7588 7589 busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 | 7590 le32_to_cpu(bss_ch_info_ev.rx_clear_count_low); 7591 7592 total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 | 7593 le32_to_cpu(bss_ch_info_ev.cycle_count_low); 7594 7595 tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 | 7596 le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low); 7597 7598 rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 | 7599 le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low); 7600 7601 rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 | 7602 le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low); 7603 7604 ath12k_dbg(ab, ATH12K_DBG_WMI, 7605 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", 7606 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, 7607 bss_ch_info_ev.noise_floor, busy, total, 7608 tx, rx, rx_bss); 7609 7610 rcu_read_lock(); 7611 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id)); 7612 7613 if (!ar) { 7614 ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", 7615 bss_ch_info_ev.pdev_id); 7616 rcu_read_unlock(); 7617 return; 7618 } 7619 7620 spin_lock_bh(&ar->data_lock); 7621 idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq)); 7622 if (idx >= ARRAY_SIZE(ar->survey)) { 7623 ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", 7624 bss_ch_info_ev.freq, idx); 7625 goto exit; 7626 } 7627 7628 survey = &ar->survey[idx]; 7629 7630 survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor); 7631 survey->time = div_u64(total, cc_freq_hz); 7632 survey->time_busy = div_u64(busy, cc_freq_hz); 7633 survey->time_rx = div_u64(rx_bss, cc_freq_hz); 7634 survey->time_tx = div_u64(tx, cc_freq_hz); 7635 survey->filled |= (SURVEY_INFO_NOISE_DBM | 7636 SURVEY_INFO_TIME | 7637 SURVEY_INFO_TIME_BUSY | 7638 SURVEY_INFO_TIME_RX | 7639 SURVEY_INFO_TIME_TX); 7640 exit: 7641 spin_unlock_bh(&ar->data_lock); 7642 complete(&ar->bss_survey_done); 7643 7644 rcu_read_unlock(); 7645 } 7646 7647 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab, 7648 struct sk_buff *skb) 7649 { 7650 struct wmi_vdev_install_key_complete_arg install_key_compl = {}; 7651 struct ath12k *ar; 7652 7653 if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { 7654 ath12k_warn(ab, "failed to extract install key compl event"); 7655 return; 7656 } 7657 7658 ath12k_dbg(ab, ATH12K_DBG_WMI, 7659 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n", 7660 install_key_compl.key_idx, install_key_compl.key_flags, 7661 install_key_compl.macaddr, install_key_compl.status); 7662 7663 rcu_read_lock(); 7664 ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); 7665 if (!ar) { 7666 ath12k_warn(ab, "invalid vdev id in install key compl ev %d", 7667 install_key_compl.vdev_id); 7668 rcu_read_unlock(); 7669 return; 7670 } 7671 7672 ar->install_key_status = 0; 7673 7674 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { 7675 ath12k_warn(ab, "install key failed for %pM status %d\n", 7676 install_key_compl.macaddr, install_key_compl.status); 7677 ar->install_key_status = install_key_compl.status; 7678 } 7679 7680 complete(&ar->install_key_done); 7681 rcu_read_unlock(); 7682 } 7683 7684 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab, 7685 u16 tag, u16 len, 7686 const void *ptr, 7687 void *data) 7688 { 7689 const struct wmi_service_available_event *ev; 7690 u16 wmi_ext2_service_words; 7691 __le32 *wmi_ext2_service_bitmap; 7692 int i, j; 7693 u16 expected_len; 7694 7695 expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32); 7696 if (len < expected_len) { 7697 ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n", 7698 len, tag); 7699 return -EINVAL; 7700 } 7701 7702 switch (tag) { 7703 case WMI_TAG_SERVICE_AVAILABLE_EVENT: 7704 ev = (struct wmi_service_available_event *)ptr; 7705 for (i = 0, j = WMI_MAX_SERVICE; 7706 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; 7707 i++) { 7708 do { 7709 if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) & 7710 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7711 set_bit(j, ab->wmi_ab.svc_map); 7712 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7713 } 7714 7715 ath12k_dbg(ab, ATH12K_DBG_WMI, 7716 "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x", 7717 ev->wmi_service_segment_bitmap[0], 7718 ev->wmi_service_segment_bitmap[1], 7719 ev->wmi_service_segment_bitmap[2], 7720 ev->wmi_service_segment_bitmap[3]); 7721 break; 7722 case WMI_TAG_ARRAY_UINT32: 7723 wmi_ext2_service_bitmap = (__le32 *)ptr; 7724 wmi_ext2_service_words = len / sizeof(u32); 7725 for (i = 0, j = WMI_MAX_EXT_SERVICE; 7726 i < wmi_ext2_service_words && j < WMI_MAX_EXT2_SERVICE; 7727 i++) { 7728 do { 7729 if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) & 7730 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7731 set_bit(j, ab->wmi_ab.svc_map); 7732 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7733 ath12k_dbg(ab, ATH12K_DBG_WMI, 7734 "wmi_ext2_service bitmap 0x%08x\n", 7735 __le32_to_cpu(wmi_ext2_service_bitmap[i])); 7736 } 7737 7738 break; 7739 } 7740 return 0; 7741 } 7742 7743 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb) 7744 { 7745 int ret; 7746 7747 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 7748 ath12k_wmi_tlv_services_parser, 7749 NULL); 7750 return ret; 7751 } 7752 7753 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb) 7754 { 7755 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; 7756 struct ath12k *ar; 7757 7758 if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { 7759 ath12k_warn(ab, "failed to extract peer assoc conf event"); 7760 return; 7761 } 7762 7763 ath12k_dbg(ab, ATH12K_DBG_WMI, 7764 "peer assoc conf ev vdev id %d macaddr %pM\n", 7765 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); 7766 7767 rcu_read_lock(); 7768 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); 7769 7770 if (!ar) { 7771 ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d", 7772 peer_assoc_conf.vdev_id); 7773 rcu_read_unlock(); 7774 return; 7775 } 7776 7777 complete(&ar->peer_assoc_done); 7778 rcu_read_unlock(); 7779 } 7780 7781 static void 7782 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar, 7783 struct ath12k_fw_stats *fw_stats, 7784 char *buf, u32 *length) 7785 { 7786 const struct ath12k_fw_stats_vdev *vdev; 7787 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7788 struct ath12k_link_vif *arvif; 7789 u32 len = *length; 7790 u8 *vif_macaddr; 7791 int i; 7792 7793 len += scnprintf(buf + len, buf_len - len, "\n"); 7794 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7795 "ath12k VDEV stats"); 7796 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7797 "================="); 7798 7799 list_for_each_entry(vdev, &fw_stats->vdevs, list) { 7800 arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id); 7801 if (!arvif) 7802 continue; 7803 vif_macaddr = arvif->ahvif->vif->addr; 7804 7805 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7806 "VDEV ID", vdev->vdev_id); 7807 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7808 "VDEV MAC address", vif_macaddr); 7809 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7810 "beacon snr", vdev->beacon_snr); 7811 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7812 "data snr", vdev->data_snr); 7813 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7814 "num rx frames", vdev->num_rx_frames); 7815 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7816 "num rts fail", vdev->num_rts_fail); 7817 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7818 "num rts success", vdev->num_rts_success); 7819 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7820 "num rx err", vdev->num_rx_err); 7821 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7822 "num rx discard", vdev->num_rx_discard); 7823 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7824 "num tx not acked", vdev->num_tx_not_acked); 7825 7826 for (i = 0 ; i < WLAN_MAX_AC; i++) 7827 len += scnprintf(buf + len, buf_len - len, 7828 "%25s [%02d] %u\n", 7829 "num tx frames", i, 7830 vdev->num_tx_frames[i]); 7831 7832 for (i = 0 ; i < WLAN_MAX_AC; i++) 7833 len += scnprintf(buf + len, buf_len - len, 7834 "%25s [%02d] %u\n", 7835 "num tx frames retries", i, 7836 vdev->num_tx_frames_retries[i]); 7837 7838 for (i = 0 ; i < WLAN_MAX_AC; i++) 7839 len += scnprintf(buf + len, buf_len - len, 7840 "%25s [%02d] %u\n", 7841 "num tx frames failures", i, 7842 vdev->num_tx_frames_failures[i]); 7843 7844 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7845 len += scnprintf(buf + len, buf_len - len, 7846 "%25s [%02d] 0x%08x\n", 7847 "tx rate history", i, 7848 vdev->tx_rate_history[i]); 7849 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7850 len += scnprintf(buf + len, buf_len - len, 7851 "%25s [%02d] %u\n", 7852 "beacon rssi history", i, 7853 vdev->beacon_rssi_history[i]); 7854 7855 len += scnprintf(buf + len, buf_len - len, "\n"); 7856 *length = len; 7857 } 7858 } 7859 7860 static void 7861 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar, 7862 struct ath12k_fw_stats *fw_stats, 7863 char *buf, u32 *length) 7864 { 7865 const struct ath12k_fw_stats_bcn *bcn; 7866 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7867 struct ath12k_link_vif *arvif; 7868 u32 len = *length; 7869 size_t num_bcn; 7870 7871 num_bcn = list_count_nodes(&fw_stats->bcn); 7872 7873 len += scnprintf(buf + len, buf_len - len, "\n"); 7874 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 7875 "ath12k Beacon stats", num_bcn); 7876 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7877 "==================="); 7878 7879 list_for_each_entry(bcn, &fw_stats->bcn, list) { 7880 arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id); 7881 if (!arvif) 7882 continue; 7883 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7884 "VDEV ID", bcn->vdev_id); 7885 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7886 "VDEV MAC address", arvif->ahvif->vif->addr); 7887 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7888 "================"); 7889 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7890 "Num of beacon tx success", bcn->tx_bcn_succ_cnt); 7891 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7892 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); 7893 7894 len += scnprintf(buf + len, buf_len - len, "\n"); 7895 *length = len; 7896 } 7897 } 7898 7899 static void 7900 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7901 char *buf, u32 *length, u64 fw_soc_drop_cnt) 7902 { 7903 u32 len = *length; 7904 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7905 7906 len = scnprintf(buf + len, buf_len - len, "\n"); 7907 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7908 "ath12k PDEV stats"); 7909 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7910 "================="); 7911 7912 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7913 "Channel noise floor", pdev->ch_noise_floor); 7914 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7915 "Channel TX power", pdev->chan_tx_power); 7916 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7917 "TX frame count", pdev->tx_frame_count); 7918 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7919 "RX frame count", pdev->rx_frame_count); 7920 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7921 "RX clear count", pdev->rx_clear_count); 7922 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7923 "Cycle count", pdev->cycle_count); 7924 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7925 "PHY error count", pdev->phy_err_count); 7926 len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n", 7927 "soc drop count", fw_soc_drop_cnt); 7928 7929 *length = len; 7930 } 7931 7932 static void 7933 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7934 char *buf, u32 *length) 7935 { 7936 u32 len = *length; 7937 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7938 7939 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 7940 "ath12k PDEV TX stats"); 7941 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7942 "===================="); 7943 7944 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7945 "HTT cookies queued", pdev->comp_queued); 7946 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7947 "HTT cookies disp.", pdev->comp_delivered); 7948 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7949 "MSDU queued", pdev->msdu_enqued); 7950 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7951 "MPDU queued", pdev->mpdu_enqued); 7952 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7953 "MSDUs dropped", pdev->wmm_drop); 7954 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7955 "Local enqued", pdev->local_enqued); 7956 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7957 "Local freed", pdev->local_freed); 7958 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7959 "HW queued", pdev->hw_queued); 7960 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7961 "PPDUs reaped", pdev->hw_reaped); 7962 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7963 "Num underruns", pdev->underrun); 7964 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7965 "PPDUs cleaned", pdev->tx_abort); 7966 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7967 "MPDUs requeued", pdev->mpdus_requed); 7968 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7969 "Excessive retries", pdev->tx_ko); 7970 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7971 "HW rate", pdev->data_rc); 7972 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7973 "Sched self triggers", pdev->self_triggers); 7974 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7975 "Dropped due to SW retries", 7976 pdev->sw_retry_failure); 7977 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7978 "Illegal rate phy errors", 7979 pdev->illgl_rate_phy_err); 7980 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7981 "PDEV continuous xretry", pdev->pdev_cont_xretry); 7982 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7983 "TX timeout", pdev->pdev_tx_timeout); 7984 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7985 "PDEV resets", pdev->pdev_resets); 7986 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7987 "Stateless TIDs alloc failures", 7988 pdev->stateless_tid_alloc_failure); 7989 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7990 "PHY underrun", pdev->phy_underrun); 7991 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7992 "MPDU is more than txop limit", pdev->txop_ovf); 7993 *length = len; 7994 } 7995 7996 static void 7997 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7998 char *buf, u32 *length) 7999 { 8000 u32 len = *length; 8001 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 8002 8003 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 8004 "ath12k PDEV RX stats"); 8005 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8006 "===================="); 8007 8008 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8009 "Mid PPDU route change", 8010 pdev->mid_ppdu_route_change); 8011 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8012 "Tot. number of statuses", pdev->status_rcvd); 8013 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8014 "Extra frags on rings 0", pdev->r0_frags); 8015 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8016 "Extra frags on rings 1", pdev->r1_frags); 8017 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8018 "Extra frags on rings 2", pdev->r2_frags); 8019 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8020 "Extra frags on rings 3", pdev->r3_frags); 8021 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8022 "MSDUs delivered to HTT", pdev->htt_msdus); 8023 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8024 "MPDUs delivered to HTT", pdev->htt_mpdus); 8025 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8026 "MSDUs delivered to stack", pdev->loc_msdus); 8027 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8028 "MPDUs delivered to stack", pdev->loc_mpdus); 8029 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8030 "Oversized AMSUs", pdev->oversize_amsdu); 8031 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8032 "PHY errors", pdev->phy_errs); 8033 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8034 "PHY errors drops", pdev->phy_err_drop); 8035 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8036 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); 8037 *length = len; 8038 } 8039 8040 static void 8041 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar, 8042 struct ath12k_fw_stats *fw_stats, 8043 char *buf, u32 *length) 8044 { 8045 const struct ath12k_fw_stats_pdev *pdev; 8046 u32 len = *length; 8047 8048 pdev = list_first_entry_or_null(&fw_stats->pdevs, 8049 struct ath12k_fw_stats_pdev, list); 8050 if (!pdev) { 8051 ath12k_warn(ar->ab, "failed to get pdev stats\n"); 8052 return; 8053 } 8054 8055 ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len, 8056 ar->ab->fw_soc_drop_count); 8057 ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len); 8058 ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len); 8059 8060 *length = len; 8061 } 8062 8063 void ath12k_wmi_fw_stats_dump(struct ath12k *ar, 8064 struct ath12k_fw_stats *fw_stats, 8065 u32 stats_id, char *buf) 8066 { 8067 u32 len = 0; 8068 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 8069 8070 spin_lock_bh(&ar->data_lock); 8071 8072 switch (stats_id) { 8073 case WMI_REQUEST_VDEV_STAT: 8074 ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len); 8075 break; 8076 case WMI_REQUEST_BCN_STAT: 8077 ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len); 8078 break; 8079 case WMI_REQUEST_PDEV_STAT: 8080 ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len); 8081 break; 8082 default: 8083 break; 8084 } 8085 8086 spin_unlock_bh(&ar->data_lock); 8087 8088 if (len >= buf_len) 8089 buf[len - 1] = 0; 8090 else 8091 buf[len] = 0; 8092 } 8093 8094 static void 8095 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src, 8096 struct ath12k_fw_stats_vdev *dst) 8097 { 8098 int i; 8099 8100 dst->vdev_id = le32_to_cpu(src->vdev_id); 8101 dst->beacon_snr = le32_to_cpu(src->beacon_snr); 8102 dst->data_snr = le32_to_cpu(src->data_snr); 8103 dst->num_rx_frames = le32_to_cpu(src->num_rx_frames); 8104 dst->num_rts_fail = le32_to_cpu(src->num_rts_fail); 8105 dst->num_rts_success = le32_to_cpu(src->num_rts_success); 8106 dst->num_rx_err = le32_to_cpu(src->num_rx_err); 8107 dst->num_rx_discard = le32_to_cpu(src->num_rx_discard); 8108 dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked); 8109 8110 for (i = 0; i < WLAN_MAX_AC; i++) 8111 dst->num_tx_frames[i] = 8112 le32_to_cpu(src->num_tx_frames[i]); 8113 8114 for (i = 0; i < WLAN_MAX_AC; i++) 8115 dst->num_tx_frames_retries[i] = 8116 le32_to_cpu(src->num_tx_frames_retries[i]); 8117 8118 for (i = 0; i < WLAN_MAX_AC; i++) 8119 dst->num_tx_frames_failures[i] = 8120 le32_to_cpu(src->num_tx_frames_failures[i]); 8121 8122 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8123 dst->tx_rate_history[i] = 8124 le32_to_cpu(src->tx_rate_history[i]); 8125 8126 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8127 dst->beacon_rssi_history[i] = 8128 le32_to_cpu(src->beacon_rssi_history[i]); 8129 } 8130 8131 static void 8132 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src, 8133 struct ath12k_fw_stats_bcn *dst) 8134 { 8135 dst->vdev_id = le32_to_cpu(src->vdev_id); 8136 dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt); 8137 dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt); 8138 } 8139 8140 static void 8141 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src, 8142 struct ath12k_fw_stats_pdev *dst) 8143 { 8144 dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf); 8145 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); 8146 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); 8147 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); 8148 dst->cycle_count = __le32_to_cpu(src->cycle_count); 8149 dst->phy_err_count = __le32_to_cpu(src->phy_err_count); 8150 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); 8151 } 8152 8153 static void 8154 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src, 8155 struct ath12k_fw_stats_pdev *dst) 8156 { 8157 dst->comp_queued = a_sle32_to_cpu(src->comp_queued); 8158 dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered); 8159 dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued); 8160 dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued); 8161 dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop); 8162 dst->local_enqued = a_sle32_to_cpu(src->local_enqued); 8163 dst->local_freed = a_sle32_to_cpu(src->local_freed); 8164 dst->hw_queued = a_sle32_to_cpu(src->hw_queued); 8165 dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped); 8166 dst->underrun = a_sle32_to_cpu(src->underrun); 8167 dst->tx_abort = a_sle32_to_cpu(src->tx_abort); 8168 dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed); 8169 dst->tx_ko = __le32_to_cpu(src->tx_ko); 8170 dst->data_rc = __le32_to_cpu(src->data_rc); 8171 dst->self_triggers = __le32_to_cpu(src->self_triggers); 8172 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); 8173 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); 8174 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); 8175 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); 8176 dst->pdev_resets = __le32_to_cpu(src->pdev_resets); 8177 dst->stateless_tid_alloc_failure = 8178 __le32_to_cpu(src->stateless_tid_alloc_failure); 8179 dst->phy_underrun = __le32_to_cpu(src->phy_underrun); 8180 dst->txop_ovf = __le32_to_cpu(src->txop_ovf); 8181 } 8182 8183 static void 8184 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src, 8185 struct ath12k_fw_stats_pdev *dst) 8186 { 8187 dst->mid_ppdu_route_change = 8188 a_sle32_to_cpu(src->mid_ppdu_route_change); 8189 dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd); 8190 dst->r0_frags = a_sle32_to_cpu(src->r0_frags); 8191 dst->r1_frags = a_sle32_to_cpu(src->r1_frags); 8192 dst->r2_frags = a_sle32_to_cpu(src->r2_frags); 8193 dst->r3_frags = a_sle32_to_cpu(src->r3_frags); 8194 dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus); 8195 dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus); 8196 dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus); 8197 dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus); 8198 dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu); 8199 dst->phy_errs = a_sle32_to_cpu(src->phy_errs); 8200 dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop); 8201 dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs); 8202 } 8203 8204 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab, 8205 struct wmi_tlv_fw_stats_parse *parse, 8206 const void *ptr, 8207 u16 len) 8208 { 8209 const struct wmi_stats_event *ev = parse->ev; 8210 struct ath12k_fw_stats *stats = parse->stats; 8211 struct ath12k *ar; 8212 struct ath12k_link_vif *arvif; 8213 struct ieee80211_sta *sta; 8214 struct ath12k_sta *ahsta; 8215 struct ath12k_link_sta *arsta; 8216 int i, ret = 0; 8217 const void *data = ptr; 8218 8219 if (!ev) { 8220 ath12k_warn(ab, "failed to fetch update stats ev"); 8221 return -EPROTO; 8222 } 8223 8224 if (!stats) 8225 return -EINVAL; 8226 8227 rcu_read_lock(); 8228 8229 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8230 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8231 if (!ar) { 8232 ath12k_warn(ab, "invalid pdev id %d in update stats event\n", 8233 le32_to_cpu(ev->pdev_id)); 8234 ret = -EPROTO; 8235 goto exit; 8236 } 8237 8238 for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) { 8239 const struct wmi_vdev_stats_params *src; 8240 struct ath12k_fw_stats_vdev *dst; 8241 8242 src = data; 8243 if (len < sizeof(*src)) { 8244 ret = -EPROTO; 8245 goto exit; 8246 } 8247 8248 arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id)); 8249 if (arvif) { 8250 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 8251 arvif->bssid, 8252 NULL); 8253 if (sta) { 8254 ahsta = ath12k_sta_to_ahsta(sta); 8255 arsta = &ahsta->deflink; 8256 arsta->rssi_beacon = le32_to_cpu(src->beacon_snr); 8257 ath12k_dbg(ab, ATH12K_DBG_WMI, 8258 "wmi stats vdev id %d snr %d\n", 8259 src->vdev_id, src->beacon_snr); 8260 } else { 8261 ath12k_dbg(ab, ATH12K_DBG_WMI, 8262 "not found station bssid %pM for vdev stat\n", 8263 arvif->bssid); 8264 } 8265 } 8266 8267 data += sizeof(*src); 8268 len -= sizeof(*src); 8269 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8270 if (!dst) 8271 continue; 8272 ath12k_wmi_pull_vdev_stats(src, dst); 8273 stats->stats_id = WMI_REQUEST_VDEV_STAT; 8274 list_add_tail(&dst->list, &stats->vdevs); 8275 } 8276 for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) { 8277 const struct ath12k_wmi_bcn_stats_params *src; 8278 struct ath12k_fw_stats_bcn *dst; 8279 8280 src = data; 8281 if (len < sizeof(*src)) { 8282 ret = -EPROTO; 8283 goto exit; 8284 } 8285 8286 data += sizeof(*src); 8287 len -= sizeof(*src); 8288 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8289 if (!dst) 8290 continue; 8291 ath12k_wmi_pull_bcn_stats(src, dst); 8292 stats->stats_id = WMI_REQUEST_BCN_STAT; 8293 list_add_tail(&dst->list, &stats->bcn); 8294 } 8295 for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) { 8296 const struct ath12k_wmi_pdev_stats_params *src; 8297 struct ath12k_fw_stats_pdev *dst; 8298 8299 src = data; 8300 if (len < sizeof(*src)) { 8301 ret = -EPROTO; 8302 goto exit; 8303 } 8304 8305 stats->stats_id = WMI_REQUEST_PDEV_STAT; 8306 8307 data += sizeof(*src); 8308 len -= sizeof(*src); 8309 8310 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8311 if (!dst) 8312 continue; 8313 8314 ath12k_wmi_pull_pdev_stats_base(&src->base, dst); 8315 ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst); 8316 ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst); 8317 list_add_tail(&dst->list, &stats->pdevs); 8318 } 8319 8320 exit: 8321 rcu_read_unlock(); 8322 return ret; 8323 } 8324 8325 static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab, 8326 u16 tag, u16 len, 8327 const void *ptr, void *data) 8328 { 8329 const struct wmi_rssi_stat_params *stats_rssi = ptr; 8330 struct wmi_tlv_fw_stats_parse *parse = data; 8331 const struct wmi_stats_event *ev = parse->ev; 8332 struct ath12k_fw_stats *stats = parse->stats; 8333 struct ath12k_link_vif *arvif; 8334 struct ath12k_link_sta *arsta; 8335 struct ieee80211_sta *sta; 8336 struct ath12k_sta *ahsta; 8337 struct ath12k *ar; 8338 int vdev_id; 8339 int j; 8340 8341 if (!ev) { 8342 ath12k_warn(ab, "failed to fetch update stats ev"); 8343 return -EPROTO; 8344 } 8345 8346 if (tag != WMI_TAG_RSSI_STATS) 8347 return -EPROTO; 8348 8349 if (!stats) 8350 return -EINVAL; 8351 8352 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8353 vdev_id = le32_to_cpu(stats_rssi->vdev_id); 8354 guard(rcu)(); 8355 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8356 if (!ar) { 8357 ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n", 8358 stats->pdev_id); 8359 return -EPROTO; 8360 } 8361 8362 arvif = ath12k_mac_get_arvif(ar, vdev_id); 8363 if (!arvif) { 8364 ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id); 8365 return -EPROTO; 8366 } 8367 8368 ath12k_dbg(ab, ATH12K_DBG_WMI, 8369 "stats bssid %pM vif %p\n", 8370 arvif->bssid, arvif->ahvif->vif); 8371 8372 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 8373 arvif->bssid, 8374 NULL); 8375 if (!sta) { 8376 ath12k_dbg(ab, ATH12K_DBG_WMI, 8377 "not found station of bssid %pM for rssi chain\n", 8378 arvif->bssid); 8379 return -EPROTO; 8380 } 8381 8382 ahsta = ath12k_sta_to_ahsta(sta); 8383 arsta = &ahsta->deflink; 8384 8385 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 8386 ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); 8387 8388 for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) 8389 arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]); 8390 8391 stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; 8392 8393 return 0; 8394 } 8395 8396 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab, 8397 u16 tag, u16 len, 8398 const void *ptr, void *data) 8399 { 8400 struct wmi_tlv_fw_stats_parse *parse = data; 8401 int ret = 0; 8402 8403 switch (tag) { 8404 case WMI_TAG_STATS_EVENT: 8405 parse->ev = ptr; 8406 break; 8407 case WMI_TAG_ARRAY_BYTE: 8408 ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); 8409 break; 8410 case WMI_TAG_PER_CHAIN_RSSI_STATS: 8411 parse->rssi = ptr; 8412 if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT) 8413 parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi); 8414 break; 8415 case WMI_TAG_ARRAY_STRUCT: 8416 if (parse->rssi_num && !parse->chain_rssi_done) { 8417 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 8418 ath12k_wmi_tlv_rssi_chain_parse, 8419 parse); 8420 if (ret) 8421 return ret; 8422 8423 parse->chain_rssi_done = true; 8424 } 8425 break; 8426 default: 8427 break; 8428 } 8429 return ret; 8430 } 8431 8432 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb, 8433 struct ath12k_fw_stats *stats) 8434 { 8435 struct wmi_tlv_fw_stats_parse parse = {}; 8436 8437 stats->stats_id = 0; 8438 parse.stats = stats; 8439 8440 return ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 8441 ath12k_wmi_tlv_fw_stats_parse, 8442 &parse); 8443 } 8444 8445 static void ath12k_wmi_fw_stats_process(struct ath12k *ar, 8446 struct ath12k_fw_stats *stats) 8447 { 8448 struct ath12k_base *ab = ar->ab; 8449 struct ath12k_pdev *pdev; 8450 bool is_end = true; 8451 size_t total_vdevs_started = 0; 8452 int i; 8453 8454 if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { 8455 if (list_empty(&stats->vdevs)) { 8456 ath12k_warn(ab, "empty vdev stats"); 8457 return; 8458 } 8459 /* FW sends all the active VDEV stats irrespective of PDEV, 8460 * hence limit until the count of all VDEVs started 8461 */ 8462 rcu_read_lock(); 8463 for (i = 0; i < ab->num_radios; i++) { 8464 pdev = rcu_dereference(ab->pdevs_active[i]); 8465 if (pdev && pdev->ar) 8466 total_vdevs_started += pdev->ar->num_started_vdevs; 8467 } 8468 rcu_read_unlock(); 8469 8470 if (total_vdevs_started) 8471 is_end = ((++ar->fw_stats.num_vdev_recvd) == 8472 total_vdevs_started); 8473 8474 list_splice_tail_init(&stats->vdevs, 8475 &ar->fw_stats.vdevs); 8476 8477 if (is_end) 8478 complete(&ar->fw_stats_done); 8479 8480 return; 8481 } 8482 8483 if (stats->stats_id == WMI_REQUEST_BCN_STAT) { 8484 if (list_empty(&stats->bcn)) { 8485 ath12k_warn(ab, "empty beacon stats"); 8486 return; 8487 } 8488 8489 list_splice_tail_init(&stats->bcn, 8490 &ar->fw_stats.bcn); 8491 complete(&ar->fw_stats_done); 8492 } 8493 } 8494 8495 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) 8496 { 8497 struct ath12k_fw_stats stats = {}; 8498 struct ath12k *ar; 8499 int ret; 8500 8501 INIT_LIST_HEAD(&stats.pdevs); 8502 INIT_LIST_HEAD(&stats.vdevs); 8503 INIT_LIST_HEAD(&stats.bcn); 8504 8505 ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats); 8506 if (ret) { 8507 ath12k_warn(ab, "failed to pull fw stats: %d\n", ret); 8508 goto free; 8509 } 8510 8511 ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats"); 8512 8513 rcu_read_lock(); 8514 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); 8515 if (!ar) { 8516 rcu_read_unlock(); 8517 ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n", 8518 stats.pdev_id, ret); 8519 goto free; 8520 } 8521 8522 spin_lock_bh(&ar->data_lock); 8523 8524 /* Handle WMI_REQUEST_PDEV_STAT status update */ 8525 if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { 8526 list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); 8527 complete(&ar->fw_stats_done); 8528 goto complete; 8529 } 8530 8531 /* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */ 8532 if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { 8533 complete(&ar->fw_stats_done); 8534 goto complete; 8535 } 8536 8537 /* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */ 8538 ath12k_wmi_fw_stats_process(ar, &stats); 8539 8540 complete: 8541 complete(&ar->fw_stats_complete); 8542 spin_unlock_bh(&ar->data_lock); 8543 rcu_read_unlock(); 8544 8545 /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised 8546 * at this point, no need to free the individual list. 8547 */ 8548 return; 8549 8550 free: 8551 ath12k_fw_stats_free(&stats); 8552 } 8553 8554 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned 8555 * is not part of BDF CTL(Conformance test limits) table entries. 8556 */ 8557 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab, 8558 struct sk_buff *skb) 8559 { 8560 const void **tb; 8561 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 8562 int ret; 8563 8564 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8565 if (IS_ERR(tb)) { 8566 ret = PTR_ERR(tb); 8567 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8568 return; 8569 } 8570 8571 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; 8572 if (!ev) { 8573 ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); 8574 kfree(tb); 8575 return; 8576 } 8577 8578 ath12k_dbg(ab, ATH12K_DBG_WMI, 8579 "pdev ctl failsafe check ev status %d\n", 8580 ev->ctl_failsafe_status); 8581 8582 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power 8583 * to 10 dBm else the CTL power entry in the BDF would be picked up. 8584 */ 8585 if (ev->ctl_failsafe_status != 0) 8586 ath12k_warn(ab, "pdev ctl failsafe failure status %d", 8587 ev->ctl_failsafe_status); 8588 8589 kfree(tb); 8590 } 8591 8592 static void 8593 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, 8594 const struct ath12k_wmi_pdev_csa_event *ev, 8595 const u32 *vdev_ids) 8596 { 8597 u32 current_switch_count = le32_to_cpu(ev->current_switch_count); 8598 u32 num_vdevs = le32_to_cpu(ev->num_vdevs); 8599 struct ieee80211_bss_conf *conf; 8600 struct ath12k_link_vif *arvif; 8601 struct ath12k_vif *ahvif; 8602 int i; 8603 8604 rcu_read_lock(); 8605 for (i = 0; i < num_vdevs; i++) { 8606 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); 8607 8608 if (!arvif) { 8609 ath12k_warn(ab, "Recvd csa status for unknown vdev %d", 8610 vdev_ids[i]); 8611 continue; 8612 } 8613 ahvif = arvif->ahvif; 8614 8615 if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { 8616 ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n", 8617 arvif->link_id); 8618 continue; 8619 } 8620 8621 conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]); 8622 if (!conf) { 8623 ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n", 8624 ahvif->vif->addr, arvif->link_id); 8625 continue; 8626 } 8627 8628 if (!arvif->is_up || !conf->csa_active) 8629 continue; 8630 8631 /* Finish CSA when counter reaches zero */ 8632 if (!current_switch_count) { 8633 ieee80211_csa_finish(ahvif->vif, arvif->link_id); 8634 arvif->current_cntdown_counter = 0; 8635 } else if (current_switch_count > 1) { 8636 /* If the count in event is not what we expect, don't update the 8637 * mac80211 count. Since during beacon Tx failure, count in the 8638 * firmware will not decrement and this event will come with the 8639 * previous count value again 8640 */ 8641 if (current_switch_count != arvif->current_cntdown_counter) 8642 continue; 8643 8644 arvif->current_cntdown_counter = 8645 ieee80211_beacon_update_cntdwn(ahvif->vif, 8646 arvif->link_id); 8647 } 8648 } 8649 rcu_read_unlock(); 8650 } 8651 8652 static void 8653 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab, 8654 struct sk_buff *skb) 8655 { 8656 const void **tb; 8657 const struct ath12k_wmi_pdev_csa_event *ev; 8658 const u32 *vdev_ids; 8659 int ret; 8660 8661 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8662 if (IS_ERR(tb)) { 8663 ret = PTR_ERR(tb); 8664 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8665 return; 8666 } 8667 8668 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; 8669 vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; 8670 8671 if (!ev || !vdev_ids) { 8672 ath12k_warn(ab, "failed to fetch pdev csa switch count ev"); 8673 kfree(tb); 8674 return; 8675 } 8676 8677 ath12k_dbg(ab, ATH12K_DBG_WMI, 8678 "pdev csa switch count %d for pdev %d, num_vdevs %d", 8679 ev->current_switch_count, ev->pdev_id, 8680 ev->num_vdevs); 8681 8682 ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); 8683 8684 kfree(tb); 8685 } 8686 8687 static void 8688 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) 8689 { 8690 const void **tb; 8691 struct ath12k_mac_get_any_chanctx_conf_arg arg; 8692 const struct ath12k_wmi_pdev_radar_event *ev; 8693 struct ath12k *ar; 8694 int ret; 8695 8696 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8697 if (IS_ERR(tb)) { 8698 ret = PTR_ERR(tb); 8699 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8700 return; 8701 } 8702 8703 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; 8704 8705 if (!ev) { 8706 ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev"); 8707 kfree(tb); 8708 return; 8709 } 8710 8711 ath12k_dbg(ab, ATH12K_DBG_WMI, 8712 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", 8713 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, 8714 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, 8715 ev->freq_offset, ev->sidx); 8716 8717 rcu_read_lock(); 8718 8719 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); 8720 8721 if (!ar) { 8722 ath12k_warn(ab, "radar detected in invalid pdev %d\n", 8723 ev->pdev_id); 8724 goto exit; 8725 } 8726 8727 arg.ar = ar; 8728 arg.chanctx_conf = NULL; 8729 ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 8730 ath12k_mac_get_any_chanctx_conf_iter, &arg); 8731 if (!arg.chanctx_conf) { 8732 ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n"); 8733 goto exit; 8734 } 8735 8736 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", 8737 ev->pdev_id); 8738 8739 if (ar->dfs_block_radar_events) 8740 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); 8741 else 8742 ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf); 8743 8744 exit: 8745 rcu_read_unlock(); 8746 8747 kfree(tb); 8748 } 8749 8750 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id, 8751 struct sk_buff *skb) 8752 { 8753 const struct ath12k_wmi_ftm_event *ev; 8754 const void **tb; 8755 int ret; 8756 u16 length; 8757 8758 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8759 8760 if (IS_ERR(tb)) { 8761 ret = PTR_ERR(tb); 8762 ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret); 8763 return; 8764 } 8765 8766 ev = tb[WMI_TAG_ARRAY_BYTE]; 8767 if (!ev) { 8768 ath12k_warn(ab, "failed to fetch ftm msg\n"); 8769 kfree(tb); 8770 return; 8771 } 8772 8773 length = skb->len - TLV_HDR_SIZE; 8774 ath12k_tm_process_event(ab, cmd_id, ev, length); 8775 kfree(tb); 8776 tb = NULL; 8777 } 8778 8779 static void 8780 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, 8781 struct sk_buff *skb) 8782 { 8783 struct ath12k *ar; 8784 struct wmi_pdev_temperature_event ev = {}; 8785 8786 if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) { 8787 ath12k_warn(ab, "failed to extract pdev temperature event"); 8788 return; 8789 } 8790 8791 ath12k_dbg(ab, ATH12K_DBG_WMI, 8792 "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); 8793 8794 rcu_read_lock(); 8795 8796 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id)); 8797 if (!ar) { 8798 ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); 8799 goto exit; 8800 } 8801 8802 exit: 8803 rcu_read_unlock(); 8804 } 8805 8806 static void ath12k_fils_discovery_event(struct ath12k_base *ab, 8807 struct sk_buff *skb) 8808 { 8809 const void **tb; 8810 const struct wmi_fils_discovery_event *ev; 8811 int ret; 8812 8813 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8814 if (IS_ERR(tb)) { 8815 ret = PTR_ERR(tb); 8816 ath12k_warn(ab, 8817 "failed to parse FILS discovery event tlv %d\n", 8818 ret); 8819 return; 8820 } 8821 8822 ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; 8823 if (!ev) { 8824 ath12k_warn(ab, "failed to fetch FILS discovery event\n"); 8825 kfree(tb); 8826 return; 8827 } 8828 8829 ath12k_warn(ab, 8830 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", 8831 ev->vdev_id, ev->fils_tt, ev->tbtt); 8832 8833 kfree(tb); 8834 } 8835 8836 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, 8837 struct sk_buff *skb) 8838 { 8839 const void **tb; 8840 const struct wmi_probe_resp_tx_status_event *ev; 8841 int ret; 8842 8843 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8844 if (IS_ERR(tb)) { 8845 ret = PTR_ERR(tb); 8846 ath12k_warn(ab, 8847 "failed to parse probe response transmission status event tlv: %d\n", 8848 ret); 8849 return; 8850 } 8851 8852 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; 8853 if (!ev) { 8854 ath12k_warn(ab, 8855 "failed to fetch probe response transmission status event"); 8856 kfree(tb); 8857 return; 8858 } 8859 8860 if (ev->tx_status) 8861 ath12k_warn(ab, 8862 "Probe response transmission failed for vdev_id %u, status %u\n", 8863 ev->vdev_id, ev->tx_status); 8864 8865 kfree(tb); 8866 } 8867 8868 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab, 8869 struct sk_buff *skb) 8870 { 8871 const void **tb; 8872 const struct wmi_p2p_noa_event *ev; 8873 const struct ath12k_wmi_p2p_noa_info *noa; 8874 struct ath12k *ar; 8875 int ret, vdev_id; 8876 8877 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8878 if (IS_ERR(tb)) { 8879 ret = PTR_ERR(tb); 8880 ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret); 8881 return ret; 8882 } 8883 8884 ev = tb[WMI_TAG_P2P_NOA_EVENT]; 8885 noa = tb[WMI_TAG_P2P_NOA_INFO]; 8886 8887 if (!ev || !noa) { 8888 ret = -EPROTO; 8889 goto out; 8890 } 8891 8892 vdev_id = __le32_to_cpu(ev->vdev_id); 8893 8894 ath12k_dbg(ab, ATH12K_DBG_WMI, 8895 "wmi tlv p2p noa vdev_id %i descriptors %u\n", 8896 vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)); 8897 8898 rcu_read_lock(); 8899 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 8900 if (!ar) { 8901 ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n", 8902 vdev_id); 8903 ret = -EINVAL; 8904 goto unlock; 8905 } 8906 8907 ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); 8908 8909 ret = 0; 8910 8911 unlock: 8912 rcu_read_unlock(); 8913 out: 8914 kfree(tb); 8915 return ret; 8916 } 8917 8918 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab, 8919 struct sk_buff *skb) 8920 { 8921 const struct wmi_rfkill_state_change_event *ev; 8922 const void **tb; 8923 int ret; 8924 8925 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8926 if (IS_ERR(tb)) { 8927 ret = PTR_ERR(tb); 8928 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8929 return; 8930 } 8931 8932 ev = tb[WMI_TAG_RFKILL_EVENT]; 8933 if (!ev) { 8934 kfree(tb); 8935 return; 8936 } 8937 8938 ath12k_dbg(ab, ATH12K_DBG_MAC, 8939 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", 8940 le32_to_cpu(ev->gpio_pin_num), 8941 le32_to_cpu(ev->int_type), 8942 le32_to_cpu(ev->radio_state)); 8943 8944 spin_lock_bh(&ab->base_lock); 8945 ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON)); 8946 spin_unlock_bh(&ab->base_lock); 8947 8948 queue_work(ab->workqueue, &ab->rfkill_work); 8949 kfree(tb); 8950 } 8951 8952 static void 8953 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb) 8954 { 8955 trace_ath12k_wmi_diag(ab, skb->data, skb->len); 8956 } 8957 8958 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab, 8959 struct sk_buff *skb) 8960 { 8961 const void **tb; 8962 const struct wmi_twt_enable_event *ev; 8963 int ret; 8964 8965 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8966 if (IS_ERR(tb)) { 8967 ret = PTR_ERR(tb); 8968 ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n", 8969 ret); 8970 return; 8971 } 8972 8973 ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT]; 8974 if (!ev) { 8975 ath12k_warn(ab, "failed to fetch twt enable wmi event\n"); 8976 goto exit; 8977 } 8978 8979 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n", 8980 le32_to_cpu(ev->pdev_id), 8981 le32_to_cpu(ev->status)); 8982 8983 exit: 8984 kfree(tb); 8985 } 8986 8987 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab, 8988 struct sk_buff *skb) 8989 { 8990 const void **tb; 8991 const struct wmi_twt_disable_event *ev; 8992 int ret; 8993 8994 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8995 if (IS_ERR(tb)) { 8996 ret = PTR_ERR(tb); 8997 ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n", 8998 ret); 8999 return; 9000 } 9001 9002 ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT]; 9003 if (!ev) { 9004 ath12k_warn(ab, "failed to fetch twt disable wmi event\n"); 9005 goto exit; 9006 } 9007 9008 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n", 9009 le32_to_cpu(ev->pdev_id), 9010 le32_to_cpu(ev->status)); 9011 9012 exit: 9013 kfree(tb); 9014 } 9015 9016 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab, 9017 u16 tag, u16 len, 9018 const void *ptr, void *data) 9019 { 9020 const struct wmi_wow_ev_pg_fault_param *pf_param; 9021 const struct wmi_wow_ev_param *param; 9022 struct wmi_wow_ev_arg *arg = data; 9023 int pf_len; 9024 9025 switch (tag) { 9026 case WMI_TAG_WOW_EVENT_INFO: 9027 param = ptr; 9028 arg->wake_reason = le32_to_cpu(param->wake_reason); 9029 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n", 9030 arg->wake_reason, wow_reason(arg->wake_reason)); 9031 break; 9032 9033 case WMI_TAG_ARRAY_BYTE: 9034 if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) { 9035 pf_param = ptr; 9036 pf_len = le32_to_cpu(pf_param->len); 9037 if (pf_len > len - sizeof(pf_len) || 9038 pf_len < 0) { 9039 ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n", 9040 pf_len); 9041 return -EINVAL; 9042 } 9043 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n", 9044 pf_len); 9045 ath12k_dbg_dump(ab, ATH12K_DBG_WMI, 9046 "wow_reason_page_fault packet present", 9047 "wow_pg_fault ", 9048 pf_param->data, 9049 pf_len); 9050 } 9051 break; 9052 default: 9053 break; 9054 } 9055 9056 return 0; 9057 } 9058 9059 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb) 9060 { 9061 struct wmi_wow_ev_arg arg = { }; 9062 int ret; 9063 9064 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9065 ath12k_wmi_wow_wakeup_host_parse, 9066 &arg); 9067 if (ret) { 9068 ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n", 9069 ret); 9070 return; 9071 } 9072 9073 complete(&ab->wow.wakeup_completed); 9074 } 9075 9076 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, 9077 struct sk_buff *skb) 9078 { 9079 const struct wmi_gtk_offload_status_event *ev; 9080 struct ath12k_link_vif *arvif; 9081 __be64 replay_ctr_be; 9082 u64 replay_ctr; 9083 const void **tb; 9084 int ret; 9085 9086 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9087 if (IS_ERR(tb)) { 9088 ret = PTR_ERR(tb); 9089 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 9090 return; 9091 } 9092 9093 ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; 9094 if (!ev) { 9095 ath12k_warn(ab, "failed to fetch gtk offload status ev"); 9096 kfree(tb); 9097 return; 9098 } 9099 9100 rcu_read_lock(); 9101 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id)); 9102 if (!arvif) { 9103 rcu_read_unlock(); 9104 ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n", 9105 le32_to_cpu(ev->vdev_id)); 9106 kfree(tb); 9107 return; 9108 } 9109 9110 replay_ctr = le64_to_cpu(ev->replay_ctr); 9111 arvif->rekey_data.replay_ctr = replay_ctr; 9112 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n", 9113 le32_to_cpu(ev->refresh_cnt), replay_ctr); 9114 9115 /* supplicant expects big-endian replay counter */ 9116 replay_ctr_be = cpu_to_be64(replay_ctr); 9117 9118 ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid, 9119 (void *)&replay_ctr_be, GFP_ATOMIC); 9120 9121 rcu_read_unlock(); 9122 9123 kfree(tb); 9124 } 9125 9126 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab, 9127 struct sk_buff *skb) 9128 { 9129 const struct wmi_mlo_setup_complete_event *ev; 9130 struct ath12k *ar = NULL; 9131 struct ath12k_pdev *pdev; 9132 const void **tb; 9133 int ret, i; 9134 9135 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9136 if (IS_ERR(tb)) { 9137 ret = PTR_ERR(tb); 9138 ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n", 9139 ret); 9140 return; 9141 } 9142 9143 ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT]; 9144 if (!ev) { 9145 ath12k_warn(ab, "failed to fetch mlo setup complete event\n"); 9146 kfree(tb); 9147 return; 9148 } 9149 9150 if (le32_to_cpu(ev->pdev_id) > ab->num_radios) 9151 goto skip_lookup; 9152 9153 for (i = 0; i < ab->num_radios; i++) { 9154 pdev = &ab->pdevs[i]; 9155 if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) { 9156 ar = pdev->ar; 9157 break; 9158 } 9159 } 9160 9161 skip_lookup: 9162 if (!ar) { 9163 ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n", 9164 ev->pdev_id, ev->status); 9165 goto out; 9166 } 9167 9168 ar->mlo_setup_status = le32_to_cpu(ev->status); 9169 complete(&ar->mlo_setup_done); 9170 9171 out: 9172 kfree(tb); 9173 } 9174 9175 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab, 9176 struct sk_buff *skb) 9177 { 9178 const struct wmi_mlo_teardown_complete_event *ev; 9179 const void **tb; 9180 int ret; 9181 9182 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9183 if (IS_ERR(tb)) { 9184 ret = PTR_ERR(tb); 9185 ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret); 9186 return; 9187 } 9188 9189 ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE]; 9190 if (!ev) { 9191 ath12k_warn(ab, "failed to fetch teardown complete event\n"); 9192 kfree(tb); 9193 return; 9194 } 9195 9196 kfree(tb); 9197 } 9198 9199 #ifdef CONFIG_ATH12K_DEBUGFS 9200 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab, 9201 const void *ptr, u16 tag, u16 len, 9202 struct wmi_tpc_stats_arg *tpc_stats) 9203 { 9204 u32 len1, len2, len3, len4; 9205 s16 *dst_ptr; 9206 s8 *dst_ptr_ctl; 9207 9208 len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len); 9209 len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len); 9210 len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len); 9211 len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len); 9212 9213 switch (tpc_stats->event_count) { 9214 case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT: 9215 if (len1 > len) 9216 return -ENOBUFS; 9217 9218 if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) { 9219 dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array; 9220 memcpy(dst_ptr, ptr, len1); 9221 } 9222 break; 9223 case ATH12K_TPC_STATS_RATES_EVENT1: 9224 if (len2 > len) 9225 return -ENOBUFS; 9226 9227 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) { 9228 dst_ptr = tpc_stats->rates_array1.rate_array; 9229 memcpy(dst_ptr, ptr, len2); 9230 } 9231 break; 9232 case ATH12K_TPC_STATS_RATES_EVENT2: 9233 if (len3 > len) 9234 return -ENOBUFS; 9235 9236 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) { 9237 dst_ptr = tpc_stats->rates_array2.rate_array; 9238 memcpy(dst_ptr, ptr, len3); 9239 } 9240 break; 9241 case ATH12K_TPC_STATS_CTL_TABLE_EVENT: 9242 if (len4 > len) 9243 return -ENOBUFS; 9244 9245 if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { 9246 dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table; 9247 memcpy(dst_ptr_ctl, ptr, len4); 9248 } 9249 break; 9250 } 9251 return 0; 9252 } 9253 9254 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab, 9255 struct wmi_tpc_stats_arg *tpc_stats, 9256 struct wmi_max_reg_power_fixed_params *ev) 9257 { 9258 struct wmi_max_reg_power_allowed_arg *reg_pwr; 9259 u32 total_size; 9260 9261 ath12k_dbg(ab, ATH12K_DBG_WMI, 9262 "Received reg power array type %d length %d for tpc stats\n", 9263 ev->reg_power_type, ev->reg_array_len); 9264 9265 switch (le32_to_cpu(ev->reg_power_type)) { 9266 case TPC_STATS_REG_PWR_ALLOWED_TYPE: 9267 reg_pwr = &tpc_stats->max_reg_allowed_power; 9268 break; 9269 default: 9270 return -EINVAL; 9271 } 9272 9273 /* Each entry is 2 byte hence multiplying the indices with 2 */ 9274 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9275 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2; 9276 if (le32_to_cpu(ev->reg_array_len) != total_size) { 9277 ath12k_warn(ab, 9278 "Total size and reg_array_len doesn't match for tpc stats\n"); 9279 return -EINVAL; 9280 } 9281 9282 memcpy(®_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params)); 9283 9284 reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len), 9285 GFP_ATOMIC); 9286 if (!reg_pwr->reg_pwr_array) 9287 return -ENOMEM; 9288 9289 tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED; 9290 9291 return 0; 9292 } 9293 9294 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab, 9295 struct wmi_tpc_stats_arg *tpc_stats, 9296 struct wmi_tpc_rates_array_fixed_params *ev) 9297 { 9298 struct wmi_tpc_rates_array_arg *rates_array; 9299 u32 flag = 0, rate_array_len; 9300 9301 ath12k_dbg(ab, ATH12K_DBG_WMI, 9302 "Received rates array type %d length %d for tpc stats\n", 9303 ev->rate_array_type, ev->rate_array_len); 9304 9305 switch (le32_to_cpu(ev->rate_array_type)) { 9306 case ATH12K_TPC_STATS_RATES_ARRAY1: 9307 rates_array = &tpc_stats->rates_array1; 9308 flag = WMI_TPC_RATES_ARRAY1; 9309 break; 9310 case ATH12K_TPC_STATS_RATES_ARRAY2: 9311 rates_array = &tpc_stats->rates_array2; 9312 flag = WMI_TPC_RATES_ARRAY2; 9313 break; 9314 default: 9315 ath12k_warn(ab, 9316 "Received invalid type of rates array for tpc stats\n"); 9317 return -EINVAL; 9318 } 9319 memcpy(&rates_array->tpc_rates_array, ev, 9320 sizeof(struct wmi_tpc_rates_array_fixed_params)); 9321 rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len); 9322 rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC); 9323 if (!rates_array->rate_array) 9324 return -ENOMEM; 9325 9326 tpc_stats->tlvs_rcvd |= flag; 9327 return 0; 9328 } 9329 9330 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab, 9331 struct wmi_tpc_stats_arg *tpc_stats, 9332 struct wmi_tpc_ctl_pwr_fixed_params *ev) 9333 { 9334 struct wmi_tpc_ctl_pwr_table_arg *ctl_array; 9335 u32 total_size, ctl_array_len, flag = 0; 9336 9337 ath12k_dbg(ab, ATH12K_DBG_WMI, 9338 "Received ctl array type %d length %d for tpc stats\n", 9339 ev->ctl_array_type, ev->ctl_array_len); 9340 9341 switch (le32_to_cpu(ev->ctl_array_type)) { 9342 case ATH12K_TPC_STATS_CTL_ARRAY: 9343 ctl_array = &tpc_stats->ctl_array; 9344 flag = WMI_TPC_CTL_PWR_ARRAY; 9345 break; 9346 default: 9347 ath12k_warn(ab, 9348 "Received invalid type of ctl pwr table for tpc stats\n"); 9349 return -EINVAL; 9350 } 9351 9352 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9353 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4); 9354 if (le32_to_cpu(ev->ctl_array_len) != total_size) { 9355 ath12k_warn(ab, 9356 "Total size and ctl_array_len doesn't match for tpc stats\n"); 9357 return -EINVAL; 9358 } 9359 9360 memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params)); 9361 ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len); 9362 ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC); 9363 if (!ctl_array->ctl_pwr_table) 9364 return -ENOMEM; 9365 9366 tpc_stats->tlvs_rcvd |= flag; 9367 return 0; 9368 } 9369 9370 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab, 9371 u16 tag, u16 len, 9372 const void *ptr, void *data) 9373 { 9374 struct wmi_tpc_rates_array_fixed_params *tpc_rates_array; 9375 struct wmi_max_reg_power_fixed_params *tpc_reg_pwr; 9376 struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr; 9377 struct wmi_tpc_stats_arg *tpc_stats = data; 9378 struct wmi_tpc_config_params *tpc_config; 9379 int ret = 0; 9380 9381 if (!tpc_stats) { 9382 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9383 return -EINVAL; 9384 } 9385 9386 switch (tag) { 9387 case WMI_TAG_TPC_STATS_CONFIG_EVENT: 9388 tpc_config = (struct wmi_tpc_config_params *)ptr; 9389 memcpy(&tpc_stats->tpc_config, tpc_config, 9390 sizeof(struct wmi_tpc_config_params)); 9391 break; 9392 case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED: 9393 tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr; 9394 ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr); 9395 break; 9396 case WMI_TAG_TPC_STATS_RATES_ARRAY: 9397 tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr; 9398 ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array); 9399 break; 9400 case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT: 9401 tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr; 9402 ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr); 9403 break; 9404 default: 9405 ath12k_warn(ab, 9406 "Received invalid tag for tpc stats in subtlvs\n"); 9407 return -EINVAL; 9408 } 9409 return ret; 9410 } 9411 9412 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab, 9413 u16 tag, u16 len, 9414 const void *ptr, void *data) 9415 { 9416 struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data; 9417 int ret; 9418 9419 switch (tag) { 9420 case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM: 9421 ret = 0; 9422 /* Fixed param is already processed*/ 9423 break; 9424 case WMI_TAG_ARRAY_STRUCT: 9425 /* len 0 is expected for array of struct when there 9426 * is no content of that type to pack inside that tlv 9427 */ 9428 if (len == 0) 9429 return 0; 9430 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9431 ath12k_wmi_tpc_stats_subtlv_parser, 9432 tpc_stats); 9433 break; 9434 case WMI_TAG_ARRAY_INT16: 9435 if (len == 0) 9436 return 0; 9437 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9438 WMI_TAG_ARRAY_INT16, 9439 len, tpc_stats); 9440 break; 9441 case WMI_TAG_ARRAY_BYTE: 9442 if (len == 0) 9443 return 0; 9444 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9445 WMI_TAG_ARRAY_BYTE, 9446 len, tpc_stats); 9447 break; 9448 default: 9449 ath12k_warn(ab, "Received invalid tag for tpc stats\n"); 9450 ret = -EINVAL; 9451 break; 9452 } 9453 return ret; 9454 } 9455 9456 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar) 9457 { 9458 struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; 9459 9460 lockdep_assert_held(&ar->data_lock); 9461 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n"); 9462 if (tpc_stats) { 9463 kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array); 9464 kfree(tpc_stats->rates_array1.rate_array); 9465 kfree(tpc_stats->rates_array2.rate_array); 9466 kfree(tpc_stats->ctl_array.ctl_pwr_table); 9467 kfree(tpc_stats); 9468 ar->debug.tpc_stats = NULL; 9469 } 9470 } 9471 9472 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9473 struct sk_buff *skb) 9474 { 9475 struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param; 9476 struct wmi_tpc_stats_arg *tpc_stats; 9477 const struct wmi_tlv *tlv; 9478 void *ptr = skb->data; 9479 struct ath12k *ar; 9480 u16 tlv_tag; 9481 u32 event_count; 9482 int ret; 9483 9484 if (!skb->data) { 9485 ath12k_warn(ab, "No data present in tpc stats event\n"); 9486 return; 9487 } 9488 9489 if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9490 ath12k_warn(ab, "TPC stats event size invalid\n"); 9491 return; 9492 } 9493 9494 tlv = (struct wmi_tlv *)ptr; 9495 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9496 ptr += sizeof(*tlv); 9497 9498 if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) { 9499 ath12k_warn(ab, "TPC stats without fixed param tlv at start\n"); 9500 return; 9501 } 9502 9503 fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr; 9504 rcu_read_lock(); 9505 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1); 9506 if (!ar) { 9507 ath12k_warn(ab, "Failed to get ar for tpc stats\n"); 9508 rcu_read_unlock(); 9509 return; 9510 } 9511 spin_lock_bh(&ar->data_lock); 9512 if (!ar->debug.tpc_request) { 9513 /* Event is received either without request or the 9514 * timeout, if memory is already allocated free it 9515 */ 9516 if (ar->debug.tpc_stats) { 9517 ath12k_warn(ab, "Freeing memory for tpc_stats\n"); 9518 ath12k_wmi_free_tpc_stats_mem(ar); 9519 } 9520 goto unlock; 9521 } 9522 9523 event_count = le32_to_cpu(fixed_param->event_count); 9524 if (event_count == 0) { 9525 if (ar->debug.tpc_stats) { 9526 ath12k_warn(ab, 9527 "Invalid tpc memory present\n"); 9528 goto unlock; 9529 } 9530 ar->debug.tpc_stats = 9531 kzalloc(sizeof(struct wmi_tpc_stats_arg), 9532 GFP_ATOMIC); 9533 if (!ar->debug.tpc_stats) { 9534 ath12k_warn(ab, 9535 "Failed to allocate memory for tpc stats\n"); 9536 goto unlock; 9537 } 9538 } 9539 9540 tpc_stats = ar->debug.tpc_stats; 9541 if (!tpc_stats) { 9542 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9543 goto unlock; 9544 } 9545 9546 if (!(event_count == 0)) { 9547 if (event_count != tpc_stats->event_count + 1) { 9548 ath12k_warn(ab, 9549 "Invalid tpc event received\n"); 9550 goto unlock; 9551 } 9552 } 9553 tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id); 9554 tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event); 9555 tpc_stats->event_count = le32_to_cpu(fixed_param->event_count); 9556 ath12k_dbg(ab, ATH12K_DBG_WMI, 9557 "tpc stats event_count %d\n", 9558 tpc_stats->event_count); 9559 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9560 ath12k_wmi_tpc_stats_event_parser, 9561 tpc_stats); 9562 if (ret) { 9563 ath12k_wmi_free_tpc_stats_mem(ar); 9564 ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret); 9565 goto unlock; 9566 } 9567 9568 if (tpc_stats->end_of_event) 9569 complete(&ar->debug.tpc_complete); 9570 9571 unlock: 9572 spin_unlock_bh(&ar->data_lock); 9573 rcu_read_unlock(); 9574 } 9575 #else 9576 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9577 struct sk_buff *skb) 9578 { 9579 } 9580 #endif 9581 9582 static int 9583 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab, 9584 u16 tag, u16 len, 9585 const void *ptr, void *data) 9586 { 9587 const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info; 9588 const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info; 9589 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data; 9590 struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg; 9591 s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM]; 9592 u8 num_20mhz_segments; 9593 s8 min_nf, *nf_ptr; 9594 int i, j; 9595 9596 switch (tag) { 9597 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO: 9598 if (len < sizeof(*param_info)) { 9599 ath12k_warn(ab, 9600 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9601 tag, len); 9602 return -EINVAL; 9603 } 9604 9605 param_info = ptr; 9606 9607 param_arg.curr_bw = le32_to_cpu(param_info->curr_bw); 9608 param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask); 9609 9610 /* The received array is actually a 2D byte-array for per chain, 9611 * per 20MHz subband. Convert to 2D byte-array 9612 */ 9613 nf_ptr = ¶m_arg.nf_hw_dbm[0][0]; 9614 9615 for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) { 9616 nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]); 9617 9618 for (j = 0; j < 4; j++) { 9619 *nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF; 9620 nf_ptr++; 9621 } 9622 } 9623 9624 switch (param_arg.curr_bw) { 9625 case WMI_CHAN_WIDTH_20: 9626 num_20mhz_segments = 1; 9627 break; 9628 case WMI_CHAN_WIDTH_40: 9629 num_20mhz_segments = 2; 9630 break; 9631 case WMI_CHAN_WIDTH_80: 9632 num_20mhz_segments = 4; 9633 break; 9634 case WMI_CHAN_WIDTH_160: 9635 num_20mhz_segments = 8; 9636 break; 9637 case WMI_CHAN_WIDTH_320: 9638 num_20mhz_segments = 16; 9639 break; 9640 default: 9641 ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event", 9642 param_arg.curr_bw); 9643 /* In error case, still consider the primary 20 MHz segment since 9644 * that would be much better than instead of dropping the whole 9645 * event 9646 */ 9647 num_20mhz_segments = 1; 9648 } 9649 9650 min_nf = ATH12K_DEFAULT_NOISE_FLOOR; 9651 9652 for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) { 9653 if (!(param_arg.curr_rx_chainmask & BIT(i))) 9654 continue; 9655 9656 for (j = 0; j < num_20mhz_segments; j++) { 9657 if (param_arg.nf_hw_dbm[i][j] < min_nf) 9658 min_nf = param_arg.nf_hw_dbm[i][j]; 9659 } 9660 } 9661 9662 rssi_info->min_nf_dbm = min_nf; 9663 rssi_info->nf_dbm_present = true; 9664 break; 9665 case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO: 9666 if (len < sizeof(*temp_info)) { 9667 ath12k_warn(ab, 9668 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9669 tag, len); 9670 return -EINVAL; 9671 } 9672 9673 temp_info = ptr; 9674 rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset); 9675 rssi_info->temp_offset_present = true; 9676 break; 9677 default: 9678 ath12k_dbg(ab, ATH12K_DBG_WMI, 9679 "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag); 9680 } 9681 9682 return 0; 9683 } 9684 9685 static int 9686 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab, 9687 u16 tag, u16 len, 9688 const void *ptr, void *data) 9689 { 9690 int ret = 0; 9691 9692 switch (tag) { 9693 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM: 9694 /* Fixed param is already processed*/ 9695 break; 9696 case WMI_TAG_ARRAY_STRUCT: 9697 /* len 0 is expected for array of struct when there 9698 * is no content of that type inside that tlv 9699 */ 9700 if (len == 0) 9701 return 0; 9702 9703 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9704 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser, 9705 data); 9706 break; 9707 default: 9708 ath12k_dbg(ab, ATH12K_DBG_WMI, 9709 "Received invalid tag 0x%x for RSSI dbm conv info event\n", 9710 tag); 9711 break; 9712 } 9713 9714 return ret; 9715 } 9716 9717 static int 9718 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr, 9719 size_t len, int *pdev_id) 9720 { 9721 struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param; 9722 const struct wmi_tlv *tlv; 9723 u16 tlv_tag; 9724 9725 if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9726 ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len); 9727 return -EINVAL; 9728 } 9729 9730 tlv = (struct wmi_tlv *)ptr; 9731 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9732 ptr += sizeof(*tlv); 9733 9734 if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) { 9735 ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n"); 9736 return -EINVAL; 9737 } 9738 9739 fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr; 9740 *pdev_id = le32_to_cpu(fixed_param->pdev_id); 9741 9742 return 0; 9743 } 9744 9745 static void 9746 ath12k_wmi_update_rssi_offsets(struct ath12k *ar, 9747 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info) 9748 { 9749 struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info; 9750 9751 lockdep_assert_held(&ar->data_lock); 9752 9753 if (rssi_info->temp_offset_present) 9754 info->temp_offset = rssi_info->temp_offset; 9755 9756 if (rssi_info->nf_dbm_present) 9757 info->min_nf_dbm = rssi_info->min_nf_dbm; 9758 9759 info->noise_floor = info->min_nf_dbm + info->temp_offset; 9760 } 9761 9762 static void 9763 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab, 9764 struct sk_buff *skb) 9765 { 9766 struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info; 9767 struct ath12k *ar; 9768 s32 noise_floor; 9769 u32 pdev_id; 9770 int ret; 9771 9772 ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len, 9773 &pdev_id); 9774 if (ret) { 9775 ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n", 9776 ret); 9777 return; 9778 } 9779 9780 rcu_read_lock(); 9781 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 9782 /* If pdev is not active, ignore the event */ 9783 if (!ar) 9784 goto out_unlock; 9785 9786 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9787 ath12k_wmi_rssi_dbm_conv_info_event_parser, 9788 &rssi_info); 9789 if (ret) { 9790 ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n"); 9791 goto out_unlock; 9792 } 9793 9794 spin_lock_bh(&ar->data_lock); 9795 ath12k_wmi_update_rssi_offsets(ar, &rssi_info); 9796 noise_floor = ath12k_pdev_get_noise_floor(ar); 9797 spin_unlock_bh(&ar->data_lock); 9798 9799 ath12k_dbg(ab, ATH12K_DBG_WMI, 9800 "RSSI noise floor updated, new value is %d dbm\n", noise_floor); 9801 out_unlock: 9802 rcu_read_unlock(); 9803 } 9804 9805 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) 9806 { 9807 struct wmi_cmd_hdr *cmd_hdr; 9808 enum wmi_tlv_event_id id; 9809 9810 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 9811 id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID); 9812 9813 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) 9814 goto out; 9815 9816 switch (id) { 9817 /* Process all the WMI events here */ 9818 case WMI_SERVICE_READY_EVENTID: 9819 ath12k_service_ready_event(ab, skb); 9820 break; 9821 case WMI_SERVICE_READY_EXT_EVENTID: 9822 ath12k_service_ready_ext_event(ab, skb); 9823 break; 9824 case WMI_SERVICE_READY_EXT2_EVENTID: 9825 ath12k_service_ready_ext2_event(ab, skb); 9826 break; 9827 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: 9828 ath12k_reg_chan_list_event(ab, skb); 9829 break; 9830 case WMI_READY_EVENTID: 9831 ath12k_ready_event(ab, skb); 9832 break; 9833 case WMI_PEER_DELETE_RESP_EVENTID: 9834 ath12k_peer_delete_resp_event(ab, skb); 9835 break; 9836 case WMI_VDEV_START_RESP_EVENTID: 9837 ath12k_vdev_start_resp_event(ab, skb); 9838 break; 9839 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: 9840 ath12k_bcn_tx_status_event(ab, skb); 9841 break; 9842 case WMI_VDEV_STOPPED_EVENTID: 9843 ath12k_vdev_stopped_event(ab, skb); 9844 break; 9845 case WMI_MGMT_RX_EVENTID: 9846 ath12k_mgmt_rx_event(ab, skb); 9847 /* mgmt_rx_event() owns the skb now! */ 9848 return; 9849 case WMI_MGMT_TX_COMPLETION_EVENTID: 9850 ath12k_mgmt_tx_compl_event(ab, skb); 9851 break; 9852 case WMI_SCAN_EVENTID: 9853 ath12k_scan_event(ab, skb); 9854 break; 9855 case WMI_PEER_STA_KICKOUT_EVENTID: 9856 ath12k_peer_sta_kickout_event(ab, skb); 9857 break; 9858 case WMI_ROAM_EVENTID: 9859 ath12k_roam_event(ab, skb); 9860 break; 9861 case WMI_CHAN_INFO_EVENTID: 9862 ath12k_chan_info_event(ab, skb); 9863 break; 9864 case WMI_PDEV_BSS_CHAN_INFO_EVENTID: 9865 ath12k_pdev_bss_chan_info_event(ab, skb); 9866 break; 9867 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 9868 ath12k_vdev_install_key_compl_event(ab, skb); 9869 break; 9870 case WMI_SERVICE_AVAILABLE_EVENTID: 9871 ath12k_service_available_event(ab, skb); 9872 break; 9873 case WMI_PEER_ASSOC_CONF_EVENTID: 9874 ath12k_peer_assoc_conf_event(ab, skb); 9875 break; 9876 case WMI_UPDATE_STATS_EVENTID: 9877 ath12k_update_stats_event(ab, skb); 9878 break; 9879 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: 9880 ath12k_pdev_ctl_failsafe_check_event(ab, skb); 9881 break; 9882 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: 9883 ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb); 9884 break; 9885 case WMI_PDEV_TEMPERATURE_EVENTID: 9886 ath12k_wmi_pdev_temperature_event(ab, skb); 9887 break; 9888 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: 9889 ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb); 9890 break; 9891 case WMI_HOST_FILS_DISCOVERY_EVENTID: 9892 ath12k_fils_discovery_event(ab, skb); 9893 break; 9894 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: 9895 ath12k_probe_resp_tx_status_event(ab, skb); 9896 break; 9897 case WMI_RFKILL_STATE_CHANGE_EVENTID: 9898 ath12k_rfkill_state_change_event(ab, skb); 9899 break; 9900 case WMI_TWT_ENABLE_EVENTID: 9901 ath12k_wmi_twt_enable_event(ab, skb); 9902 break; 9903 case WMI_TWT_DISABLE_EVENTID: 9904 ath12k_wmi_twt_disable_event(ab, skb); 9905 break; 9906 case WMI_P2P_NOA_EVENTID: 9907 ath12k_wmi_p2p_noa_event(ab, skb); 9908 break; 9909 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: 9910 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); 9911 break; 9912 case WMI_VDEV_DELETE_RESP_EVENTID: 9913 ath12k_vdev_delete_resp_event(ab, skb); 9914 break; 9915 case WMI_DIAG_EVENTID: 9916 ath12k_wmi_diag_event(ab, skb); 9917 break; 9918 case WMI_WOW_WAKEUP_HOST_EVENTID: 9919 ath12k_wmi_event_wow_wakeup_host(ab, skb); 9920 break; 9921 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 9922 ath12k_wmi_gtk_offload_status_event(ab, skb); 9923 break; 9924 case WMI_MLO_SETUP_COMPLETE_EVENTID: 9925 ath12k_wmi_event_mlo_setup_complete(ab, skb); 9926 break; 9927 case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: 9928 ath12k_wmi_event_teardown_complete(ab, skb); 9929 break; 9930 case WMI_HALPHY_STATS_CTRL_PATH_EVENTID: 9931 ath12k_wmi_process_tpc_stats(ab, skb); 9932 break; 9933 case WMI_11D_NEW_COUNTRY_EVENTID: 9934 ath12k_reg_11d_new_cc_event(ab, skb); 9935 break; 9936 case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID: 9937 ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb); 9938 break; 9939 case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: 9940 ath12k_wmi_obss_color_collision_event(ab, skb); 9941 break; 9942 /* add Unsupported events (rare) here */ 9943 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 9944 case WMI_PEER_OPER_MODE_CHANGE_EVENTID: 9945 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: 9946 ath12k_dbg(ab, ATH12K_DBG_WMI, 9947 "ignoring unsupported event 0x%x\n", id); 9948 break; 9949 /* add Unsupported events (frequent) here */ 9950 case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID: 9951 case WMI_MGMT_RX_FW_CONSUMED_EVENTID: 9952 /* debug might flood hence silently ignore (no-op) */ 9953 break; 9954 case WMI_PDEV_UTF_EVENTID: 9955 if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags)) 9956 ath12k_tm_wmi_event_segmented(ab, id, skb); 9957 else 9958 ath12k_tm_wmi_event_unsegmented(ab, id, skb); 9959 break; 9960 default: 9961 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); 9962 break; 9963 } 9964 9965 out: 9966 dev_kfree_skb(skb); 9967 } 9968 9969 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, 9970 u32 pdev_idx) 9971 { 9972 int status; 9973 static const u32 svc_id[] = { 9974 ATH12K_HTC_SVC_ID_WMI_CONTROL, 9975 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, 9976 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 9977 }; 9978 struct ath12k_htc_svc_conn_req conn_req = {}; 9979 struct ath12k_htc_svc_conn_resp conn_resp = {}; 9980 9981 /* these fields are the same for all service endpoints */ 9982 conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete; 9983 conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx; 9984 conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits; 9985 9986 /* connect to control service */ 9987 conn_req.service_id = svc_id[pdev_idx]; 9988 9989 status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); 9990 if (status) { 9991 ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", 9992 status); 9993 return status; 9994 } 9995 9996 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; 9997 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; 9998 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; 9999 10000 return 0; 10001 } 10002 10003 static int 10004 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, 10005 struct wmi_unit_test_cmd ut_cmd, 10006 u32 *test_args) 10007 { 10008 struct ath12k_wmi_pdev *wmi = ar->wmi; 10009 struct wmi_unit_test_cmd *cmd; 10010 struct sk_buff *skb; 10011 struct wmi_tlv *tlv; 10012 void *ptr; 10013 u32 *ut_cmd_args; 10014 int buf_len, arg_len; 10015 int ret; 10016 int i; 10017 10018 arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args); 10019 buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; 10020 10021 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10022 if (!skb) 10023 return -ENOMEM; 10024 10025 cmd = (struct wmi_unit_test_cmd *)skb->data; 10026 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD, 10027 sizeof(ut_cmd)); 10028 10029 cmd->vdev_id = ut_cmd.vdev_id; 10030 cmd->module_id = ut_cmd.module_id; 10031 cmd->num_args = ut_cmd.num_args; 10032 cmd->diag_token = ut_cmd.diag_token; 10033 10034 ptr = skb->data + sizeof(ut_cmd); 10035 10036 tlv = ptr; 10037 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10038 10039 ptr += TLV_HDR_SIZE; 10040 10041 ut_cmd_args = ptr; 10042 for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++) 10043 ut_cmd_args[i] = test_args[i]; 10044 10045 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10046 "WMI unit test : module %d vdev %d n_args %d token %d\n", 10047 cmd->module_id, cmd->vdev_id, cmd->num_args, 10048 cmd->diag_token); 10049 10050 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); 10051 10052 if (ret) { 10053 ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", 10054 ret); 10055 dev_kfree_skb(skb); 10056 } 10057 10058 return ret; 10059 } 10060 10061 int ath12k_wmi_simulate_radar(struct ath12k *ar) 10062 { 10063 struct ath12k_link_vif *arvif; 10064 u32 dfs_args[DFS_MAX_TEST_ARGS]; 10065 struct wmi_unit_test_cmd wmi_ut; 10066 bool arvif_found = false; 10067 10068 list_for_each_entry(arvif, &ar->arvifs, list) { 10069 if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) { 10070 arvif_found = true; 10071 break; 10072 } 10073 } 10074 10075 if (!arvif_found) 10076 return -EINVAL; 10077 10078 dfs_args[DFS_TEST_CMDID] = 0; 10079 dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 10080 /* Currently we could pass segment_id(b0 - b1), chirp(b2) 10081 * freq offset (b3 - b10) to unit test. For simulation 10082 * purpose this can be set to 0 which is valid. 10083 */ 10084 dfs_args[DFS_TEST_RADAR_PARAM] = 0; 10085 10086 wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id); 10087 wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE); 10088 wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS); 10089 wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN); 10090 10091 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); 10092 10093 return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); 10094 } 10095 10096 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, 10097 enum wmi_halphy_ctrl_path_stats_id tpc_stats_type) 10098 { 10099 struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd; 10100 struct ath12k_wmi_pdev *wmi = ar->wmi; 10101 struct sk_buff *skb; 10102 struct wmi_tlv *tlv; 10103 __le32 *pdev_id; 10104 u32 buf_len; 10105 void *ptr; 10106 int ret; 10107 10108 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE; 10109 10110 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10111 if (!skb) 10112 return -ENOMEM; 10113 cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data; 10114 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM, 10115 sizeof(*cmd)); 10116 10117 cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT); 10118 cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET); 10119 cmd->subid = cpu_to_le32(tpc_stats_type); 10120 10121 ptr = skb->data + sizeof(*cmd); 10122 10123 /* The below TLV arrays optionally follow this fixed param TLV structure 10124 * 1. ARRAY_UINT32 pdev_ids[] 10125 * If this array is present and non-zero length, stats should only 10126 * be provided from the pdevs identified in the array. 10127 * 2. ARRAY_UNIT32 vdev_ids[] 10128 * If this array is present and non-zero length, stats should only 10129 * be provided from the vdevs identified in the array. 10130 * 3. ath12k_wmi_mac_addr_params peer_macaddr[]; 10131 * If this array is present and non-zero length, stats should only 10132 * be provided from the peers with the MAC addresses specified 10133 * in the array 10134 */ 10135 tlv = ptr; 10136 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10137 ptr += TLV_HDR_SIZE; 10138 10139 pdev_id = ptr; 10140 *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar)); 10141 ptr += sizeof(*pdev_id); 10142 10143 tlv = ptr; 10144 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10145 ptr += TLV_HDR_SIZE; 10146 10147 tlv = ptr; 10148 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0); 10149 ptr += TLV_HDR_SIZE; 10150 10151 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID); 10152 if (ret) { 10153 ath12k_warn(ar->ab, 10154 "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n"); 10155 dev_kfree_skb(skb); 10156 return ret; 10157 } 10158 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n", 10159 ar->pdev->pdev_id); 10160 10161 return ret; 10162 } 10163 10164 int ath12k_wmi_connect(struct ath12k_base *ab) 10165 { 10166 u32 i; 10167 u8 wmi_ep_count; 10168 10169 wmi_ep_count = ab->htc.wmi_ep_count; 10170 if (wmi_ep_count > ab->hw_params->max_radios) 10171 return -1; 10172 10173 for (i = 0; i < wmi_ep_count; i++) 10174 ath12k_connect_pdev_htc_service(ab, i); 10175 10176 return 0; 10177 } 10178 10179 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id) 10180 { 10181 if (WARN_ON(pdev_id >= MAX_RADIOS)) 10182 return; 10183 10184 /* TODO: Deinit any pdev specific wmi resource */ 10185 } 10186 10187 int ath12k_wmi_pdev_attach(struct ath12k_base *ab, 10188 u8 pdev_id) 10189 { 10190 struct ath12k_wmi_pdev *wmi_handle; 10191 10192 if (pdev_id >= ab->hw_params->max_radios) 10193 return -EINVAL; 10194 10195 wmi_handle = &ab->wmi_ab.wmi[pdev_id]; 10196 10197 wmi_handle->wmi_ab = &ab->wmi_ab; 10198 10199 ab->wmi_ab.ab = ab; 10200 /* TODO: Init remaining resource specific to pdev */ 10201 10202 return 0; 10203 } 10204 10205 int ath12k_wmi_attach(struct ath12k_base *ab) 10206 { 10207 int ret; 10208 10209 ret = ath12k_wmi_pdev_attach(ab, 0); 10210 if (ret) 10211 return ret; 10212 10213 ab->wmi_ab.ab = ab; 10214 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; 10215 10216 /* It's overwritten when service_ext_ready is handled */ 10217 if (ab->hw_params->single_pdev_only) 10218 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; 10219 10220 /* TODO: Init remaining wmi soc resources required */ 10221 init_completion(&ab->wmi_ab.service_ready); 10222 init_completion(&ab->wmi_ab.unified_ready); 10223 10224 return 0; 10225 } 10226 10227 void ath12k_wmi_detach(struct ath12k_base *ab) 10228 { 10229 int i; 10230 10231 /* TODO: Deinit wmi resource specific to SOC as required */ 10232 10233 for (i = 0; i < ab->htc.wmi_ep_count; i++) 10234 ath12k_wmi_pdev_detach(ab, i); 10235 10236 ath12k_wmi_free_dbring_caps(ab); 10237 } 10238 10239 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg) 10240 { 10241 struct wmi_hw_data_filter_cmd *cmd; 10242 struct sk_buff *skb; 10243 int len; 10244 10245 len = sizeof(*cmd); 10246 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10247 10248 if (!skb) 10249 return -ENOMEM; 10250 10251 cmd = (struct wmi_hw_data_filter_cmd *)skb->data; 10252 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD, 10253 sizeof(*cmd)); 10254 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10255 cmd->enable = cpu_to_le32(arg->enable ? 1 : 0); 10256 10257 /* Set all modes in case of disable */ 10258 if (arg->enable) 10259 cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap); 10260 else 10261 cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U); 10262 10263 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10264 "wmi hw data filter enable %d filter_bitmap 0x%x\n", 10265 arg->enable, arg->hw_filter_bitmap); 10266 10267 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); 10268 } 10269 10270 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar) 10271 { 10272 struct wmi_wow_host_wakeup_cmd *cmd; 10273 struct sk_buff *skb; 10274 size_t len; 10275 10276 len = sizeof(*cmd); 10277 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10278 if (!skb) 10279 return -ENOMEM; 10280 10281 cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data; 10282 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, 10283 sizeof(*cmd)); 10284 10285 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); 10286 10287 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); 10288 } 10289 10290 int ath12k_wmi_wow_enable(struct ath12k *ar) 10291 { 10292 struct wmi_wow_enable_cmd *cmd; 10293 struct sk_buff *skb; 10294 int len; 10295 10296 len = sizeof(*cmd); 10297 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10298 if (!skb) 10299 return -ENOMEM; 10300 10301 cmd = (struct wmi_wow_enable_cmd *)skb->data; 10302 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD, 10303 sizeof(*cmd)); 10304 10305 cmd->enable = cpu_to_le32(1); 10306 cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED); 10307 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n"); 10308 10309 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); 10310 } 10311 10312 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, 10313 enum wmi_wow_wakeup_event event, 10314 u32 enable) 10315 { 10316 struct wmi_wow_add_del_event_cmd *cmd; 10317 struct sk_buff *skb; 10318 size_t len; 10319 10320 len = sizeof(*cmd); 10321 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10322 if (!skb) 10323 return -ENOMEM; 10324 10325 cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; 10326 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD, 10327 sizeof(*cmd)); 10328 cmd->vdev_id = cpu_to_le32(vdev_id); 10329 cmd->is_add = cpu_to_le32(enable); 10330 cmd->event_bitmap = cpu_to_le32((1 << event)); 10331 10332 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", 10333 wow_wakeup_event(event), enable, vdev_id); 10334 10335 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); 10336 } 10337 10338 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, 10339 const u8 *pattern, const u8 *mask, 10340 int pattern_len, int pattern_offset) 10341 { 10342 struct wmi_wow_add_pattern_cmd *cmd; 10343 struct wmi_wow_bitmap_pattern_params *bitmap; 10344 struct wmi_tlv *tlv; 10345 struct sk_buff *skb; 10346 void *ptr; 10347 size_t len; 10348 10349 len = sizeof(*cmd) + 10350 sizeof(*tlv) + /* array struct */ 10351 sizeof(*bitmap) + /* bitmap */ 10352 sizeof(*tlv) + /* empty ipv4 sync */ 10353 sizeof(*tlv) + /* empty ipv6 sync */ 10354 sizeof(*tlv) + /* empty magic */ 10355 sizeof(*tlv) + /* empty info timeout */ 10356 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ 10357 10358 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10359 if (!skb) 10360 return -ENOMEM; 10361 10362 /* cmd */ 10363 ptr = skb->data; 10364 cmd = ptr; 10365 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD, 10366 sizeof(*cmd)); 10367 cmd->vdev_id = cpu_to_le32(vdev_id); 10368 cmd->pattern_id = cpu_to_le32(pattern_id); 10369 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10370 10371 ptr += sizeof(*cmd); 10372 10373 /* bitmap */ 10374 tlv = ptr; 10375 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap)); 10376 10377 ptr += sizeof(*tlv); 10378 10379 bitmap = ptr; 10380 bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T, 10381 sizeof(*bitmap)); 10382 memcpy(bitmap->patternbuf, pattern, pattern_len); 10383 memcpy(bitmap->bitmaskbuf, mask, pattern_len); 10384 bitmap->pattern_offset = cpu_to_le32(pattern_offset); 10385 bitmap->pattern_len = cpu_to_le32(pattern_len); 10386 bitmap->bitmask_len = cpu_to_le32(pattern_len); 10387 bitmap->pattern_id = cpu_to_le32(pattern_id); 10388 10389 ptr += sizeof(*bitmap); 10390 10391 /* ipv4 sync */ 10392 tlv = ptr; 10393 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10394 10395 ptr += sizeof(*tlv); 10396 10397 /* ipv6 sync */ 10398 tlv = ptr; 10399 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10400 10401 ptr += sizeof(*tlv); 10402 10403 /* magic */ 10404 tlv = ptr; 10405 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10406 10407 ptr += sizeof(*tlv); 10408 10409 /* pattern info timeout */ 10410 tlv = ptr; 10411 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10412 10413 ptr += sizeof(*tlv); 10414 10415 /* ratelimit interval */ 10416 tlv = ptr; 10417 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10418 10419 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n", 10420 vdev_id, pattern_id, pattern_offset, pattern_len); 10421 10422 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ", 10423 bitmap->patternbuf, pattern_len); 10424 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ", 10425 bitmap->bitmaskbuf, pattern_len); 10426 10427 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); 10428 } 10429 10430 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id) 10431 { 10432 struct wmi_wow_del_pattern_cmd *cmd; 10433 struct sk_buff *skb; 10434 size_t len; 10435 10436 len = sizeof(*cmd); 10437 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10438 if (!skb) 10439 return -ENOMEM; 10440 10441 cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; 10442 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD, 10443 sizeof(*cmd)); 10444 cmd->vdev_id = cpu_to_le32(vdev_id); 10445 cmd->pattern_id = cpu_to_le32(pattern_id); 10446 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10447 10448 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", 10449 vdev_id, pattern_id); 10450 10451 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); 10452 } 10453 10454 static struct sk_buff * 10455 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id, 10456 struct wmi_pno_scan_req_arg *pno) 10457 { 10458 struct nlo_configured_params *nlo_list; 10459 size_t len, nlo_list_len, channel_list_len; 10460 struct wmi_wow_nlo_config_cmd *cmd; 10461 __le32 *channel_list; 10462 struct wmi_tlv *tlv; 10463 struct sk_buff *skb; 10464 void *ptr; 10465 u32 i; 10466 10467 len = sizeof(*cmd) + 10468 sizeof(*tlv) + 10469 /* TLV place holder for array of structures 10470 * nlo_configured_params(nlo_list) 10471 */ 10472 sizeof(*tlv); 10473 /* TLV place holder for array of uint32 channel_list */ 10474 10475 channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; 10476 len += channel_list_len; 10477 10478 nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; 10479 len += nlo_list_len; 10480 10481 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10482 if (!skb) 10483 return ERR_PTR(-ENOMEM); 10484 10485 ptr = skb->data; 10486 cmd = ptr; 10487 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd)); 10488 10489 cmd->vdev_id = cpu_to_le32(pno->vdev_id); 10490 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); 10491 10492 /* current FW does not support min-max range for dwell time */ 10493 cmd->active_dwell_time = cpu_to_le32(pno->active_max_time); 10494 cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time); 10495 10496 if (pno->do_passive_scan) 10497 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); 10498 10499 cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period); 10500 cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period); 10501 cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles); 10502 cmd->delay_start_time = cpu_to_le32(pno->delay_start_time); 10503 10504 if (pno->enable_pno_scan_randomization) { 10505 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | 10506 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); 10507 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); 10508 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); 10509 } 10510 10511 ptr += sizeof(*cmd); 10512 10513 /* nlo_configured_params(nlo_list) */ 10514 cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count); 10515 tlv = ptr; 10516 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len); 10517 10518 ptr += sizeof(*tlv); 10519 nlo_list = ptr; 10520 for (i = 0; i < pno->uc_networks_count; i++) { 10521 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); 10522 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 10523 sizeof(*nlo_list)); 10524 10525 nlo_list[i].ssid.valid = cpu_to_le32(1); 10526 nlo_list[i].ssid.ssid.ssid_len = 10527 cpu_to_le32(pno->a_networks[i].ssid.ssid_len); 10528 memcpy(nlo_list[i].ssid.ssid.ssid, 10529 pno->a_networks[i].ssid.ssid, 10530 le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); 10531 10532 if (pno->a_networks[i].rssi_threshold && 10533 pno->a_networks[i].rssi_threshold > -300) { 10534 nlo_list[i].rssi_cond.valid = cpu_to_le32(1); 10535 nlo_list[i].rssi_cond.rssi = 10536 cpu_to_le32(pno->a_networks[i].rssi_threshold); 10537 } 10538 10539 nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1); 10540 nlo_list[i].bcast_nw_type.bcast_nw_type = 10541 cpu_to_le32(pno->a_networks[i].bcast_nw_type); 10542 } 10543 10544 ptr += nlo_list_len; 10545 cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count); 10546 tlv = ptr; 10547 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len); 10548 ptr += sizeof(*tlv); 10549 channel_list = ptr; 10550 10551 for (i = 0; i < pno->a_networks[0].channel_count; i++) 10552 channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]); 10553 10554 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", 10555 vdev_id); 10556 10557 return skb; 10558 } 10559 10560 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar, 10561 u32 vdev_id) 10562 { 10563 struct wmi_wow_nlo_config_cmd *cmd; 10564 struct sk_buff *skb; 10565 size_t len; 10566 10567 len = sizeof(*cmd); 10568 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10569 if (!skb) 10570 return ERR_PTR(-ENOMEM); 10571 10572 cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; 10573 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len); 10574 10575 cmd->vdev_id = cpu_to_le32(vdev_id); 10576 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP); 10577 10578 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10579 "wmi tlv stop pno config vdev_id %d\n", vdev_id); 10580 return skb; 10581 } 10582 10583 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, 10584 struct wmi_pno_scan_req_arg *pno_scan) 10585 { 10586 struct sk_buff *skb; 10587 10588 if (pno_scan->enable) 10589 skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); 10590 else 10591 skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id); 10592 10593 if (IS_ERR_OR_NULL(skb)) 10594 return -ENOMEM; 10595 10596 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); 10597 } 10598 10599 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar, 10600 struct wmi_arp_ns_offload_arg *offload, 10601 void **ptr, 10602 bool enable, 10603 bool ext) 10604 { 10605 struct wmi_ns_offload_params *ns; 10606 struct wmi_tlv *tlv; 10607 void *buf_ptr = *ptr; 10608 u32 ns_cnt, ns_ext_tuples; 10609 int i, max_offloads; 10610 10611 ns_cnt = offload->ipv6_count; 10612 10613 tlv = buf_ptr; 10614 10615 if (ext) { 10616 ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; 10617 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10618 ns_ext_tuples * sizeof(*ns)); 10619 i = WMI_MAX_NS_OFFLOADS; 10620 max_offloads = offload->ipv6_count; 10621 } else { 10622 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10623 WMI_MAX_NS_OFFLOADS * sizeof(*ns)); 10624 i = 0; 10625 max_offloads = WMI_MAX_NS_OFFLOADS; 10626 } 10627 10628 buf_ptr += sizeof(*tlv); 10629 10630 for (; i < max_offloads; i++) { 10631 ns = buf_ptr; 10632 ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE, 10633 sizeof(*ns)); 10634 10635 if (enable) { 10636 if (i < ns_cnt) 10637 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID); 10638 10639 memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); 10640 memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); 10641 10642 if (offload->ipv6_type[i]) 10643 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST); 10644 10645 memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); 10646 10647 if (!is_zero_ether_addr(ns->target_mac.addr)) 10648 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID); 10649 10650 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10651 "wmi index %d ns_solicited %pI6 target %pI6", 10652 i, ns->solicitation_ipaddr, 10653 ns->target_ipaddr[0]); 10654 } 10655 10656 buf_ptr += sizeof(*ns); 10657 } 10658 10659 *ptr = buf_ptr; 10660 } 10661 10662 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, 10663 struct wmi_arp_ns_offload_arg *offload, 10664 void **ptr, 10665 bool enable) 10666 { 10667 struct wmi_arp_offload_params *arp; 10668 struct wmi_tlv *tlv; 10669 void *buf_ptr = *ptr; 10670 int i; 10671 10672 /* fill arp tuple */ 10673 tlv = buf_ptr; 10674 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10675 WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); 10676 buf_ptr += sizeof(*tlv); 10677 10678 for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { 10679 arp = buf_ptr; 10680 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE, 10681 sizeof(*arp)); 10682 10683 if (enable && i < offload->ipv4_count) { 10684 /* Copy the target ip addr and flags */ 10685 arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID); 10686 memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); 10687 10688 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4", 10689 arp->target_ipaddr); 10690 } 10691 10692 buf_ptr += sizeof(*arp); 10693 } 10694 10695 *ptr = buf_ptr; 10696 } 10697 10698 int ath12k_wmi_arp_ns_offload(struct ath12k *ar, 10699 struct ath12k_link_vif *arvif, 10700 struct wmi_arp_ns_offload_arg *offload, 10701 bool enable) 10702 { 10703 struct wmi_set_arp_ns_offload_cmd *cmd; 10704 struct wmi_tlv *tlv; 10705 struct sk_buff *skb; 10706 void *buf_ptr; 10707 size_t len; 10708 u8 ns_cnt, ns_ext_tuples = 0; 10709 10710 ns_cnt = offload->ipv6_count; 10711 10712 len = sizeof(*cmd) + 10713 sizeof(*tlv) + 10714 WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) + 10715 sizeof(*tlv) + 10716 WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params); 10717 10718 if (ns_cnt > WMI_MAX_NS_OFFLOADS) { 10719 ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; 10720 len += sizeof(*tlv) + 10721 ns_ext_tuples * sizeof(struct wmi_ns_offload_params); 10722 } 10723 10724 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10725 if (!skb) 10726 return -ENOMEM; 10727 10728 buf_ptr = skb->data; 10729 cmd = buf_ptr; 10730 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, 10731 sizeof(*cmd)); 10732 cmd->flags = cpu_to_le32(0); 10733 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10734 cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples); 10735 10736 buf_ptr += sizeof(*cmd); 10737 10738 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); 10739 ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); 10740 10741 if (ns_ext_tuples) 10742 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); 10743 10744 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); 10745 } 10746 10747 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, 10748 struct ath12k_link_vif *arvif, bool enable) 10749 { 10750 struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; 10751 struct wmi_gtk_rekey_offload_cmd *cmd; 10752 struct sk_buff *skb; 10753 __le64 replay_ctr; 10754 int len; 10755 10756 len = sizeof(*cmd); 10757 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10758 if (!skb) 10759 return -ENOMEM; 10760 10761 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10762 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10763 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10764 10765 if (enable) { 10766 cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE); 10767 10768 /* the length in rekey_data and cmd is equal */ 10769 memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); 10770 memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); 10771 10772 replay_ctr = cpu_to_le64(rekey_data->replay_ctr); 10773 memcpy(cmd->replay_ctr, &replay_ctr, 10774 sizeof(replay_ctr)); 10775 } else { 10776 cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE); 10777 } 10778 10779 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", 10780 arvif->vdev_id, enable); 10781 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10782 } 10783 10784 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, 10785 struct ath12k_link_vif *arvif) 10786 { 10787 struct wmi_gtk_rekey_offload_cmd *cmd; 10788 struct sk_buff *skb; 10789 int len; 10790 10791 len = sizeof(*cmd); 10792 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10793 if (!skb) 10794 return -ENOMEM; 10795 10796 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10797 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10798 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10799 cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE); 10800 10801 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n", 10802 arvif->vdev_id); 10803 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10804 } 10805 10806 int ath12k_wmi_sta_keepalive(struct ath12k *ar, 10807 const struct wmi_sta_keepalive_arg *arg) 10808 { 10809 struct wmi_sta_keepalive_arp_resp_params *arp; 10810 struct ath12k_wmi_pdev *wmi = ar->wmi; 10811 struct wmi_sta_keepalive_cmd *cmd; 10812 struct sk_buff *skb; 10813 size_t len; 10814 10815 len = sizeof(*cmd) + sizeof(*arp); 10816 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10817 if (!skb) 10818 return -ENOMEM; 10819 10820 cmd = (struct wmi_sta_keepalive_cmd *)skb->data; 10821 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd)); 10822 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10823 cmd->enabled = cpu_to_le32(arg->enabled); 10824 cmd->interval = cpu_to_le32(arg->interval); 10825 cmd->method = cpu_to_le32(arg->method); 10826 10827 arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1); 10828 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, 10829 sizeof(*arp)); 10830 if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || 10831 arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { 10832 arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr); 10833 arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr); 10834 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); 10835 } 10836 10837 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10838 "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", 10839 arg->vdev_id, arg->enabled, arg->method, arg->interval); 10840 10841 return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); 10842 } 10843 10844 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params) 10845 { 10846 struct wmi_mlo_setup_cmd *cmd; 10847 struct ath12k_wmi_pdev *wmi = ar->wmi; 10848 u32 *partner_links, num_links; 10849 int i, ret, buf_len, arg_len; 10850 struct sk_buff *skb; 10851 struct wmi_tlv *tlv; 10852 void *ptr; 10853 10854 num_links = mlo_params->num_partner_links; 10855 arg_len = num_links * sizeof(u32); 10856 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len; 10857 10858 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10859 if (!skb) 10860 return -ENOMEM; 10861 10862 cmd = (struct wmi_mlo_setup_cmd *)skb->data; 10863 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD, 10864 sizeof(*cmd)); 10865 cmd->mld_group_id = mlo_params->group_id; 10866 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10867 ptr = skb->data + sizeof(*cmd); 10868 10869 tlv = ptr; 10870 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10871 ptr += TLV_HDR_SIZE; 10872 10873 partner_links = ptr; 10874 for (i = 0; i < num_links; i++) 10875 partner_links[i] = mlo_params->partner_link_id[i]; 10876 10877 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID); 10878 if (ret) { 10879 ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n", 10880 ret); 10881 dev_kfree_skb(skb); 10882 return ret; 10883 } 10884 10885 return 0; 10886 } 10887 10888 int ath12k_wmi_mlo_ready(struct ath12k *ar) 10889 { 10890 struct wmi_mlo_ready_cmd *cmd; 10891 struct ath12k_wmi_pdev *wmi = ar->wmi; 10892 struct sk_buff *skb; 10893 int ret, len; 10894 10895 len = sizeof(*cmd); 10896 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10897 if (!skb) 10898 return -ENOMEM; 10899 10900 cmd = (struct wmi_mlo_ready_cmd *)skb->data; 10901 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD, 10902 sizeof(*cmd)); 10903 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10904 10905 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID); 10906 if (ret) { 10907 ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n", 10908 ret); 10909 dev_kfree_skb(skb); 10910 return ret; 10911 } 10912 10913 return 0; 10914 } 10915 10916 int ath12k_wmi_mlo_teardown(struct ath12k *ar) 10917 { 10918 struct wmi_mlo_teardown_cmd *cmd; 10919 struct ath12k_wmi_pdev *wmi = ar->wmi; 10920 struct sk_buff *skb; 10921 int ret, len; 10922 10923 len = sizeof(*cmd); 10924 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10925 if (!skb) 10926 return -ENOMEM; 10927 10928 cmd = (struct wmi_mlo_teardown_cmd *)skb->data; 10929 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD, 10930 sizeof(*cmd)); 10931 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10932 cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON; 10933 10934 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID); 10935 if (ret) { 10936 ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n", 10937 ret); 10938 dev_kfree_skb(skb); 10939 return ret; 10940 } 10941 10942 return 0; 10943 } 10944 10945 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar) 10946 { 10947 return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 10948 ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; 10949 } 10950 10951 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, 10952 u32 vdev_id, 10953 struct ath12k_reg_tpc_power_info *param) 10954 { 10955 struct wmi_vdev_set_tpc_power_cmd *cmd; 10956 struct ath12k_wmi_pdev *wmi = ar->wmi; 10957 struct wmi_vdev_ch_power_params *ch; 10958 int i, ret, len, array_len; 10959 struct sk_buff *skb; 10960 struct wmi_tlv *tlv; 10961 u8 *ptr; 10962 10963 array_len = sizeof(*ch) * param->num_pwr_levels; 10964 len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; 10965 10966 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10967 if (!skb) 10968 return -ENOMEM; 10969 10970 ptr = skb->data; 10971 10972 cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; 10973 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD, 10974 sizeof(*cmd)); 10975 cmd->vdev_id = cpu_to_le32(vdev_id); 10976 cmd->psd_power = cpu_to_le32(param->is_psd_power); 10977 cmd->eirp_power = cpu_to_le32(param->eirp_power); 10978 cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type); 10979 10980 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10981 "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", 10982 vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); 10983 10984 ptr += sizeof(*cmd); 10985 tlv = (struct wmi_tlv *)ptr; 10986 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len); 10987 10988 ptr += TLV_HDR_SIZE; 10989 ch = (struct wmi_vdev_ch_power_params *)ptr; 10990 10991 for (i = 0; i < param->num_pwr_levels; i++, ch++) { 10992 ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO, 10993 sizeof(*ch)); 10994 ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq); 10995 ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power); 10996 10997 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n", 10998 ch->chan_cfreq, ch->tx_power); 10999 } 11000 11001 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); 11002 if (ret) { 11003 ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); 11004 dev_kfree_skb(skb); 11005 return ret; 11006 } 11007 11008 return 0; 11009 } 11010 11011 static int 11012 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab, 11013 struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap, 11014 struct wmi_mlo_link_set_active_arg *arg) 11015 { 11016 struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg; 11017 u8 i; 11018 11019 if (arg->num_disallow_mode_comb > 11020 ARRAY_SIZE(arg->disallow_bmap)) { 11021 ath12k_warn(ab, "invalid num_disallow_mode_comb: %d", 11022 arg->num_disallow_mode_comb); 11023 return -EINVAL; 11024 } 11025 11026 dislw_bmap_arg = &arg->disallow_bmap[0]; 11027 for (i = 0; i < arg->num_disallow_mode_comb; i++) { 11028 dislw_bmap->tlv_header = 11029 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap)); 11030 dislw_bmap->disallowed_mode_bitmap = 11031 cpu_to_le32(dislw_bmap_arg->disallowed_mode); 11032 dislw_bmap->ieee_link_id_comb = 11033 le32_encode_bits(dislw_bmap_arg->ieee_link_id[0], 11034 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) | 11035 le32_encode_bits(dislw_bmap_arg->ieee_link_id[1], 11036 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) | 11037 le32_encode_bits(dislw_bmap_arg->ieee_link_id[2], 11038 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) | 11039 le32_encode_bits(dislw_bmap_arg->ieee_link_id[3], 11040 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4); 11041 11042 ath12k_dbg(ab, ATH12K_DBG_WMI, 11043 "entry %d disallowed_mode %d ieee_link_id_comb 0x%x", 11044 i, dislw_bmap_arg->disallowed_mode, 11045 dislw_bmap_arg->ieee_link_id_comb); 11046 dislw_bmap++; 11047 dislw_bmap_arg++; 11048 } 11049 11050 return 0; 11051 } 11052 11053 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab, 11054 struct wmi_mlo_link_set_active_arg *arg) 11055 { 11056 struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap; 11057 struct wmi_mlo_set_active_link_number_params *link_num_param; 11058 u32 num_link_num_param = 0, num_vdev_bitmap = 0; 11059 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 11060 struct wmi_mlo_link_set_active_cmd *cmd; 11061 u32 num_inactive_vdev_bitmap = 0; 11062 u32 num_disallow_mode_comb = 0; 11063 struct wmi_tlv *tlv; 11064 struct sk_buff *skb; 11065 __le32 *vdev_bitmap; 11066 void *buf_ptr; 11067 int i, ret; 11068 u32 len; 11069 11070 if (!arg->num_vdev_bitmap && !arg->num_link_entry) { 11071 ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry"); 11072 return -EINVAL; 11073 } 11074 11075 switch (arg->force_mode) { 11076 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM: 11077 case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM: 11078 num_link_num_param = arg->num_link_entry; 11079 fallthrough; 11080 case WMI_MLO_LINK_FORCE_MODE_ACTIVE: 11081 case WMI_MLO_LINK_FORCE_MODE_INACTIVE: 11082 case WMI_MLO_LINK_FORCE_MODE_NO_FORCE: 11083 num_vdev_bitmap = arg->num_vdev_bitmap; 11084 break; 11085 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE: 11086 num_vdev_bitmap = arg->num_vdev_bitmap; 11087 num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap; 11088 break; 11089 default: 11090 ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode); 11091 return -EINVAL; 11092 } 11093 11094 num_disallow_mode_comb = arg->num_disallow_mode_comb; 11095 len = sizeof(*cmd) + 11096 TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param + 11097 TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap + 11098 TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE + 11099 TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb; 11100 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) 11101 len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11102 11103 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 11104 if (!skb) 11105 return -ENOMEM; 11106 11107 cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data; 11108 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD, 11109 sizeof(*cmd)); 11110 cmd->force_mode = cpu_to_le32(arg->force_mode); 11111 cmd->reason = cpu_to_le32(arg->reason); 11112 ath12k_dbg(ab, ATH12K_DBG_WMI, 11113 "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d", 11114 arg->force_mode, arg->reason, num_link_num_param, 11115 num_vdev_bitmap, num_inactive_vdev_bitmap, 11116 num_disallow_mode_comb); 11117 11118 buf_ptr = skb->data + sizeof(*cmd); 11119 tlv = buf_ptr; 11120 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11121 sizeof(*link_num_param) * num_link_num_param); 11122 buf_ptr += TLV_HDR_SIZE; 11123 11124 if (num_link_num_param) { 11125 cmd->ctrl_flags = 11126 le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0, 11127 CRTL_F_DYNC_FORCE_LINK_NUM); 11128 11129 link_num_param = buf_ptr; 11130 for (i = 0; i < num_link_num_param; i++) { 11131 link_num_param->tlv_header = 11132 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param)); 11133 link_num_param->num_of_link = 11134 cpu_to_le32(arg->link_num[i].num_of_link); 11135 link_num_param->vdev_type = 11136 cpu_to_le32(arg->link_num[i].vdev_type); 11137 link_num_param->vdev_subtype = 11138 cpu_to_le32(arg->link_num[i].vdev_subtype); 11139 link_num_param->home_freq = 11140 cpu_to_le32(arg->link_num[i].home_freq); 11141 ath12k_dbg(ab, ATH12K_DBG_WMI, 11142 "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d", 11143 i, arg->link_num[i].num_of_link, 11144 arg->link_num[i].vdev_type, 11145 arg->link_num[i].vdev_subtype, 11146 arg->link_num[i].home_freq, 11147 __le32_to_cpu(cmd->ctrl_flags)); 11148 link_num_param++; 11149 } 11150 11151 buf_ptr += sizeof(*link_num_param) * num_link_num_param; 11152 } 11153 11154 tlv = buf_ptr; 11155 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11156 sizeof(*vdev_bitmap) * num_vdev_bitmap); 11157 buf_ptr += TLV_HDR_SIZE; 11158 11159 if (num_vdev_bitmap) { 11160 vdev_bitmap = buf_ptr; 11161 for (i = 0; i < num_vdev_bitmap; i++) { 11162 vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]); 11163 ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x", 11164 i, arg->vdev_bitmap[i]); 11165 } 11166 11167 buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap; 11168 } 11169 11170 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) { 11171 tlv = buf_ptr; 11172 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11173 sizeof(*vdev_bitmap) * 11174 num_inactive_vdev_bitmap); 11175 buf_ptr += TLV_HDR_SIZE; 11176 11177 if (num_inactive_vdev_bitmap) { 11178 vdev_bitmap = buf_ptr; 11179 for (i = 0; i < num_inactive_vdev_bitmap; i++) { 11180 vdev_bitmap[i] = 11181 cpu_to_le32(arg->inactive_vdev_bitmap[i]); 11182 ath12k_dbg(ab, ATH12K_DBG_WMI, 11183 "entry %d inactive_vdev_id_bitmap 0x%x", 11184 i, arg->inactive_vdev_bitmap[i]); 11185 } 11186 11187 buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11188 } 11189 } else { 11190 /* add empty vdev bitmap2 tlv */ 11191 tlv = buf_ptr; 11192 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11193 buf_ptr += TLV_HDR_SIZE; 11194 } 11195 11196 /* add empty ieee_link_id_bitmap tlv */ 11197 tlv = buf_ptr; 11198 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11199 buf_ptr += TLV_HDR_SIZE; 11200 11201 /* add empty ieee_link_id_bitmap2 tlv */ 11202 tlv = buf_ptr; 11203 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11204 buf_ptr += TLV_HDR_SIZE; 11205 11206 tlv = buf_ptr; 11207 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11208 sizeof(*disallowed_mode_bmap) * 11209 arg->num_disallow_mode_comb); 11210 buf_ptr += TLV_HDR_SIZE; 11211 11212 ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg); 11213 if (ret) 11214 goto free_skb; 11215 11216 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID); 11217 if (ret) { 11218 ath12k_warn(ab, 11219 "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret); 11220 goto free_skb; 11221 } 11222 11223 ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd"); 11224 11225 return ret; 11226 11227 free_skb: 11228 dev_kfree_skb(skb); 11229 return ret; 11230 } 11231