1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 #include <linux/skbuff.h> 7 #include <linux/ctype.h> 8 #include <net/mac80211.h> 9 #include <net/cfg80211.h> 10 #include <linux/completion.h> 11 #include <linux/if_ether.h> 12 #include <linux/types.h> 13 #include <linux/pci.h> 14 #include <linux/uuid.h> 15 #include <linux/time.h> 16 #include <linux/of.h> 17 #include "core.h" 18 #include "debug.h" 19 #include "mac.h" 20 #include "hw.h" 21 #include "peer.h" 22 23 struct ath12k_wmi_svc_ready_parse { 24 bool wmi_svc_bitmap_done; 25 }; 26 27 struct ath12k_wmi_dma_ring_caps_parse { 28 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps; 29 u32 n_dma_ring_caps; 30 }; 31 32 struct ath12k_wmi_service_ext_arg { 33 u32 default_conc_scan_config_bits; 34 u32 default_fw_config_bits; 35 struct ath12k_wmi_ppe_threshold_arg ppet; 36 u32 he_cap_info; 37 u32 mpdu_density; 38 u32 max_bssid_rx_filters; 39 u32 num_hw_modes; 40 u32 num_phy; 41 }; 42 43 struct ath12k_wmi_svc_rdy_ext_parse { 44 struct ath12k_wmi_service_ext_arg arg; 45 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps; 46 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 47 u32 n_hw_mode_caps; 48 u32 tot_phy_id; 49 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps; 50 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps; 51 u32 n_mac_phy_caps; 52 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps; 53 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps; 54 u32 n_ext_hal_reg_caps; 55 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 56 bool hw_mode_done; 57 bool mac_phy_done; 58 bool ext_hal_reg_done; 59 bool mac_phy_chainmask_combo_done; 60 bool mac_phy_chainmask_cap_done; 61 bool oem_dma_ring_cap_done; 62 bool dma_ring_cap_done; 63 }; 64 65 struct ath12k_wmi_svc_rdy_ext2_parse { 66 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 67 bool dma_ring_cap_done; 68 }; 69 70 struct ath12k_wmi_rdy_parse { 71 u32 num_extra_mac_addr; 72 }; 73 74 struct ath12k_wmi_dma_buf_release_arg { 75 struct ath12k_wmi_dma_buf_release_fixed_params fixed; 76 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry; 77 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data; 78 u32 num_buf_entry; 79 u32 num_meta; 80 bool buf_entry_done; 81 bool meta_data_done; 82 }; 83 84 struct ath12k_wmi_tlv_policy { 85 size_t min_len; 86 }; 87 88 struct wmi_tlv_mgmt_rx_parse { 89 const struct ath12k_wmi_mgmt_rx_params *fixed; 90 const u8 *frame_buf; 91 bool frame_buf_done; 92 }; 93 94 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { 95 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, 96 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, 97 [WMI_TAG_SERVICE_READY_EVENT] = { 98 .min_len = sizeof(struct wmi_service_ready_event) }, 99 [WMI_TAG_SERVICE_READY_EXT_EVENT] = { 100 .min_len = sizeof(struct wmi_service_ready_ext_event) }, 101 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { 102 .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) }, 103 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { 104 .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) }, 105 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { 106 .min_len = sizeof(struct wmi_vdev_start_resp_event) }, 107 [WMI_TAG_PEER_DELETE_RESP_EVENT] = { 108 .min_len = sizeof(struct wmi_peer_delete_resp_event) }, 109 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { 110 .min_len = sizeof(struct wmi_bcn_tx_status_event) }, 111 [WMI_TAG_VDEV_STOPPED_EVENT] = { 112 .min_len = sizeof(struct wmi_vdev_stopped_event) }, 113 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { 114 .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, 115 [WMI_TAG_MGMT_RX_HDR] = { 116 .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) }, 117 [WMI_TAG_MGMT_TX_COMPL_EVENT] = { 118 .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, 119 [WMI_TAG_SCAN_EVENT] = { 120 .min_len = sizeof(struct wmi_scan_event) }, 121 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { 122 .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 123 [WMI_TAG_ROAM_EVENT] = { 124 .min_len = sizeof(struct wmi_roam_event) }, 125 [WMI_TAG_CHAN_INFO_EVENT] = { 126 .min_len = sizeof(struct wmi_chan_info_event) }, 127 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { 128 .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, 129 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { 130 .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, 131 [WMI_TAG_READY_EVENT] = { 132 .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) }, 133 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = { 134 .min_len = sizeof(struct wmi_service_available_event) }, 135 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { 136 .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, 137 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { 138 .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, 139 [WMI_TAG_HOST_SWFDA_EVENT] = { 140 .min_len = sizeof(struct wmi_fils_discovery_event) }, 141 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { 142 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, 143 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { 144 .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, 145 }; 146 147 static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) 148 { 149 return le32_encode_bits(cmd, WMI_TLV_TAG) | 150 le32_encode_bits(len, WMI_TLV_LEN); 151 } 152 153 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len) 154 { 155 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE); 156 } 157 158 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, 159 struct ath12k_wmi_resource_config_arg *config) 160 { 161 config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS; 162 163 if (ab->num_radios == 2) { 164 config->num_peers = TARGET_NUM_PEERS(DBS); 165 config->num_tids = TARGET_NUM_TIDS(DBS); 166 } else if (ab->num_radios == 3) { 167 config->num_peers = TARGET_NUM_PEERS(DBS_SBS); 168 config->num_tids = TARGET_NUM_TIDS(DBS_SBS); 169 } else { 170 /* Control should not reach here */ 171 config->num_peers = TARGET_NUM_PEERS(SINGLE); 172 config->num_tids = TARGET_NUM_TIDS(SINGLE); 173 } 174 config->num_offload_peers = TARGET_NUM_OFFLD_PEERS; 175 config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS; 176 config->num_peer_keys = TARGET_NUM_PEER_KEYS; 177 config->ast_skid_limit = TARGET_AST_SKID_LIMIT; 178 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 179 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 180 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; 181 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 182 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 183 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 184 185 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) 186 config->rx_decap_mode = TARGET_DECAP_MODE_RAW; 187 else 188 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 189 190 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 191 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 192 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; 193 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; 194 config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS; 195 config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS; 196 config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE; 197 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; 198 config->num_wds_entries = TARGET_NUM_WDS_ENTRIES; 199 config->dma_burst_size = TARGET_DMA_BURST_SIZE; 200 config->rx_skip_defrag_timeout_dup_detection_check = 201 TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 202 config->vow_config = TARGET_VOW_CONFIG; 203 config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV; 204 config->num_msdu_desc = TARGET_NUM_MSDU_DESC; 205 config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD; 206 config->rx_batchmode = TARGET_RX_BATCHMODE; 207 /* Indicates host supports peer map v3 and unmap v2 support */ 208 config->peer_map_unmap_version = 0x32; 209 config->twt_ap_pdev_count = ab->num_radios; 210 config->twt_ap_sta_count = 1000; 211 } 212 213 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, 214 struct ath12k_wmi_resource_config_arg *config) 215 { 216 config->num_vdevs = 4; 217 config->num_peers = 16; 218 config->num_tids = 32; 219 220 config->num_offload_peers = 3; 221 config->num_offload_reorder_buffs = 3; 222 config->num_peer_keys = TARGET_NUM_PEER_KEYS; 223 config->ast_skid_limit = TARGET_AST_SKID_LIMIT; 224 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 225 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 226 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; 227 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 228 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 229 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 230 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 231 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 232 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 233 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; 234 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; 235 config->num_mcast_groups = 0; 236 config->num_mcast_table_elems = 0; 237 config->mcast2ucast_mode = 0; 238 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; 239 config->num_wds_entries = 0; 240 config->dma_burst_size = 0; 241 config->rx_skip_defrag_timeout_dup_detection_check = 0; 242 config->vow_config = TARGET_VOW_CONFIG; 243 config->gtk_offload_max_vdev = 2; 244 config->num_msdu_desc = 0x400; 245 config->beacon_tx_offload_max_vdev = 2; 246 config->rx_batchmode = TARGET_RX_BATCHMODE; 247 248 config->peer_map_unmap_version = 0x1; 249 config->use_pdev_id = 1; 250 config->max_frag_entries = 0xa; 251 config->num_tdls_vdevs = 0x1; 252 config->num_tdls_conn_table_entries = 8; 253 config->beacon_tx_offload_max_vdev = 0x2; 254 config->num_multicast_filter_entries = 0x20; 255 config->num_wow_filters = 0x16; 256 config->num_keep_alive_pattern = 0; 257 } 258 259 #define PRIMAP(_hw_mode_) \ 260 [_hw_mode_] = _hw_mode_##_PRI 261 262 static const int ath12k_hw_mode_pri_map[] = { 263 PRIMAP(WMI_HOST_HW_MODE_SINGLE), 264 PRIMAP(WMI_HOST_HW_MODE_DBS), 265 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), 266 PRIMAP(WMI_HOST_HW_MODE_SBS), 267 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), 268 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), 269 /* keep last */ 270 PRIMAP(WMI_HOST_HW_MODE_MAX), 271 }; 272 273 static int 274 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 275 int (*iter)(struct ath12k_base *ab, u16 tag, u16 len, 276 const void *ptr, void *data), 277 void *data) 278 { 279 const void *begin = ptr; 280 const struct wmi_tlv *tlv; 281 u16 tlv_tag, tlv_len; 282 int ret; 283 284 while (len > 0) { 285 if (len < sizeof(*tlv)) { 286 ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 287 ptr - begin, len, sizeof(*tlv)); 288 return -EINVAL; 289 } 290 291 tlv = ptr; 292 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 293 tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN); 294 ptr += sizeof(*tlv); 295 len -= sizeof(*tlv); 296 297 if (tlv_len > len) { 298 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 299 tlv_tag, ptr - begin, len, tlv_len); 300 return -EINVAL; 301 } 302 303 if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) && 304 ath12k_wmi_tlv_policies[tlv_tag].min_len && 305 ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 306 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", 307 tlv_tag, ptr - begin, tlv_len, 308 ath12k_wmi_tlv_policies[tlv_tag].min_len); 309 return -EINVAL; 310 } 311 312 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 313 if (ret) 314 return ret; 315 316 ptr += tlv_len; 317 len -= tlv_len; 318 } 319 320 return 0; 321 } 322 323 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len, 324 const void *ptr, void *data) 325 { 326 const void **tb = data; 327 328 if (tag < WMI_TAG_MAX) 329 tb[tag] = ptr; 330 331 return 0; 332 } 333 334 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, 335 const void *ptr, size_t len) 336 { 337 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse, 338 (void *)tb); 339 } 340 341 static const void ** 342 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr, 343 size_t len, gfp_t gfp) 344 { 345 const void **tb; 346 int ret; 347 348 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp); 349 if (!tb) 350 return ERR_PTR(-ENOMEM); 351 352 ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len); 353 if (ret) { 354 kfree(tb); 355 return ERR_PTR(ret); 356 } 357 358 return tb; 359 } 360 361 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 362 u32 cmd_id) 363 { 364 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 365 struct ath12k_base *ab = wmi->wmi_ab->ab; 366 struct wmi_cmd_hdr *cmd_hdr; 367 int ret; 368 369 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr))) 370 return -ENOMEM; 371 372 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 373 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID); 374 375 memset(skb_cb, 0, sizeof(*skb_cb)); 376 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb); 377 378 if (ret) 379 goto err_pull; 380 381 return 0; 382 383 err_pull: 384 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 385 return ret; 386 } 387 388 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 389 u32 cmd_id) 390 { 391 struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab; 392 int ret = -EOPNOTSUPP; 393 394 might_sleep(); 395 396 wait_event_timeout(wmi_sc->tx_credits_wq, ({ 397 ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); 398 399 if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags)) 400 ret = -ESHUTDOWN; 401 402 (ret != -EAGAIN); 403 }), WMI_SEND_TIMEOUT_HZ); 404 405 if (ret == -EAGAIN) 406 ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); 407 408 return ret; 409 } 410 411 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 412 const void *ptr, 413 struct ath12k_wmi_service_ext_arg *arg) 414 { 415 const struct wmi_service_ready_ext_event *ev = ptr; 416 int i; 417 418 if (!ev) 419 return -EINVAL; 420 421 /* Move this to host based bitmap */ 422 arg->default_conc_scan_config_bits = 423 le32_to_cpu(ev->default_conc_scan_config_bits); 424 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits); 425 arg->he_cap_info = le32_to_cpu(ev->he_cap_info); 426 arg->mpdu_density = le32_to_cpu(ev->mpdu_density); 427 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters); 428 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1); 429 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info); 430 431 for (i = 0; i < WMI_MAX_NUM_SS; i++) 432 arg->ppet.ppet16_ppet8_ru3_ru0[i] = 433 le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]); 434 435 return 0; 436 } 437 438 static int 439 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 440 struct ath12k_wmi_svc_rdy_ext_parse *svc, 441 u8 hw_mode_id, u8 phy_id, 442 struct ath12k_pdev *pdev) 443 { 444 const struct ath12k_wmi_mac_phy_caps_params *mac_caps; 445 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps; 446 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps; 447 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps; 448 struct ath12k_band_cap *cap_band; 449 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 450 u32 phy_map; 451 u32 hw_idx, phy_idx = 0; 452 int i; 453 454 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps) 455 return -EINVAL; 456 457 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) { 458 if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id)) 459 break; 460 461 phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map); 462 phy_idx = fls(phy_map); 463 } 464 465 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes)) 466 return -EINVAL; 467 468 phy_idx += phy_id; 469 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy)) 470 return -EINVAL; 471 472 mac_caps = wmi_mac_phy_caps + phy_idx; 473 474 pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id); 475 pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands); 476 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); 477 478 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from 479 * band to band for a single radio, need to see how this should be 480 * handled. 481 */ 482 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) { 483 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g); 484 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g); 485 } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) { 486 pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g); 487 pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g); 488 pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 489 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g); 490 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g); 491 } else { 492 return -EINVAL; 493 } 494 495 /* tx/rx chainmask reported from fw depends on the actual hw chains used, 496 * For example, for 4x4 capable macphys, first 4 chains can be used for first 497 * mac and the remaining 4 chains can be used for the second mac or vice-versa. 498 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 499 * will be advertised for second mac or vice-versa. Compute the shift value 500 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to 501 * mac80211. 502 */ 503 pdev_cap->tx_chain_mask_shift = 504 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); 505 pdev_cap->rx_chain_mask_shift = 506 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); 507 508 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) { 509 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 510 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 511 cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g); 512 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g); 513 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g); 514 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext); 515 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g); 516 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 517 cap_band->he_cap_phy_info[i] = 518 le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]); 519 520 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1); 521 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info); 522 523 for (i = 0; i < WMI_MAX_NUM_SS; i++) 524 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 525 le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]); 526 } 527 528 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) { 529 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 530 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 531 cap_band->max_bw_supported = 532 le32_to_cpu(mac_caps->max_bw_supported_5g); 533 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 534 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 535 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 536 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 537 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 538 cap_band->he_cap_phy_info[i] = 539 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 540 541 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 542 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 543 544 for (i = 0; i < WMI_MAX_NUM_SS; i++) 545 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 546 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 547 548 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; 549 cap_band->max_bw_supported = 550 le32_to_cpu(mac_caps->max_bw_supported_5g); 551 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 552 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 553 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 554 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 555 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 556 cap_band->he_cap_phy_info[i] = 557 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 558 559 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 560 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 561 562 for (i = 0; i < WMI_MAX_NUM_SS; i++) 563 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 564 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 565 } 566 567 return 0; 568 } 569 570 static int 571 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle, 572 const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps, 573 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps, 574 u8 phy_idx, 575 struct ath12k_wmi_hal_reg_capabilities_ext_arg *param) 576 { 577 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap; 578 579 if (!reg_caps || !ext_caps) 580 return -EINVAL; 581 582 if (phy_idx >= le32_to_cpu(reg_caps->num_phy)) 583 return -EINVAL; 584 585 ext_reg_cap = &ext_caps[phy_idx]; 586 587 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id); 588 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain); 589 param->eeprom_reg_domain_ext = 590 le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext); 591 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1); 592 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2); 593 /* check if param->wireless_mode is needed */ 594 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan); 595 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan); 596 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan); 597 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan); 598 599 return 0; 600 } 601 602 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab, 603 const void *evt_buf, 604 struct ath12k_wmi_target_cap_arg *cap) 605 { 606 const struct wmi_service_ready_event *ev = evt_buf; 607 608 if (!ev) { 609 ath12k_err(ab, "%s: failed by NULL param\n", 610 __func__); 611 return -EINVAL; 612 } 613 614 cap->phy_capability = le32_to_cpu(ev->phy_capability); 615 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry); 616 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains); 617 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info); 618 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info); 619 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs); 620 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power); 621 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power); 622 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info); 623 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable); 624 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size); 625 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels); 626 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs); 627 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps); 628 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask); 629 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index); 630 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc); 631 632 return 0; 633 } 634 635 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in 636 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each 637 * 4-byte word. 638 */ 639 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi, 640 const u32 *wmi_svc_bm) 641 { 642 int i, j; 643 644 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { 645 do { 646 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) 647 set_bit(j, wmi->wmi_ab->svc_map); 648 } while (++j % WMI_SERVICE_BITS_IN_SIZE32); 649 } 650 } 651 652 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 653 const void *ptr, void *data) 654 { 655 struct ath12k_wmi_svc_ready_parse *svc_ready = data; 656 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 657 u16 expect_len; 658 659 switch (tag) { 660 case WMI_TAG_SERVICE_READY_EVENT: 661 if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) 662 return -EINVAL; 663 break; 664 665 case WMI_TAG_ARRAY_UINT32: 666 if (!svc_ready->wmi_svc_bitmap_done) { 667 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); 668 if (len < expect_len) { 669 ath12k_warn(ab, "invalid len %d for the tag 0x%x\n", 670 len, tag); 671 return -EINVAL; 672 } 673 674 ath12k_wmi_service_bitmap_copy(wmi_handle, ptr); 675 676 svc_ready->wmi_svc_bitmap_done = true; 677 } 678 break; 679 default: 680 break; 681 } 682 683 return 0; 684 } 685 686 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 687 { 688 struct ath12k_wmi_svc_ready_parse svc_ready = { }; 689 int ret; 690 691 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 692 ath12k_wmi_svc_rdy_parse, 693 &svc_ready); 694 if (ret) { 695 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 696 return ret; 697 } 698 699 return 0; 700 } 701 702 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len) 703 { 704 struct sk_buff *skb; 705 struct ath12k_base *ab = wmi_sc->ab; 706 u32 round_len = roundup(len, 4); 707 708 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); 709 if (!skb) 710 return NULL; 711 712 skb_reserve(skb, WMI_SKB_HEADROOM); 713 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 714 ath12k_warn(ab, "unaligned WMI skb data\n"); 715 716 skb_put(skb, round_len); 717 memset(skb->data, 0, round_len); 718 719 return skb; 720 } 721 722 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, 723 struct sk_buff *frame) 724 { 725 struct ath12k_wmi_pdev *wmi = ar->wmi; 726 struct wmi_mgmt_send_cmd *cmd; 727 struct wmi_tlv *frame_tlv; 728 struct sk_buff *skb; 729 u32 buf_len; 730 int ret, len; 731 732 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN); 733 734 len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4); 735 736 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 737 if (!skb) 738 return -ENOMEM; 739 740 cmd = (struct wmi_mgmt_send_cmd *)skb->data; 741 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD, 742 sizeof(*cmd)); 743 cmd->vdev_id = cpu_to_le32(vdev_id); 744 cmd->desc_id = cpu_to_le32(buf_id); 745 cmd->chanfreq = 0; 746 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr)); 747 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr)); 748 cmd->frame_len = cpu_to_le32(frame->len); 749 cmd->buf_len = cpu_to_le32(buf_len); 750 cmd->tx_params_valid = 0; 751 752 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 753 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len); 754 755 memcpy(frame_tlv->value, frame->data, buf_len); 756 757 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); 758 if (ret) { 759 ath12k_warn(ar->ab, 760 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); 761 dev_kfree_skb(skb); 762 } 763 764 return ret; 765 } 766 767 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, 768 struct ath12k_wmi_vdev_create_arg *args) 769 { 770 struct ath12k_wmi_pdev *wmi = ar->wmi; 771 struct wmi_vdev_create_cmd *cmd; 772 struct sk_buff *skb; 773 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; 774 struct wmi_tlv *tlv; 775 int ret, len; 776 void *ptr; 777 778 /* It can be optimized my sending tx/rx chain configuration 779 * only for supported bands instead of always sending it for 780 * both the bands. 781 */ 782 len = sizeof(*cmd) + TLV_HDR_SIZE + 783 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)); 784 785 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 786 if (!skb) 787 return -ENOMEM; 788 789 cmd = (struct wmi_vdev_create_cmd *)skb->data; 790 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD, 791 sizeof(*cmd)); 792 793 cmd->vdev_id = cpu_to_le32(args->if_id); 794 cmd->vdev_type = cpu_to_le32(args->type); 795 cmd->vdev_subtype = cpu_to_le32(args->subtype); 796 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); 797 cmd->pdev_id = cpu_to_le32(args->pdev_id); 798 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); 799 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); 800 801 ptr = skb->data + sizeof(*cmd); 802 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 803 804 tlv = ptr; 805 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 806 807 ptr += TLV_HDR_SIZE; 808 txrx_streams = ptr; 809 len = sizeof(*txrx_streams); 810 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 811 len); 812 txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; 813 txrx_streams->supported_tx_streams = 814 args->chains[NL80211_BAND_2GHZ].tx; 815 txrx_streams->supported_rx_streams = 816 args->chains[NL80211_BAND_2GHZ].rx; 817 818 txrx_streams++; 819 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 820 len); 821 txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; 822 txrx_streams->supported_tx_streams = 823 args->chains[NL80211_BAND_5GHZ].tx; 824 txrx_streams->supported_rx_streams = 825 args->chains[NL80211_BAND_5GHZ].rx; 826 827 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 828 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", 829 args->if_id, args->type, args->subtype, 830 macaddr, args->pdev_id); 831 832 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); 833 if (ret) { 834 ath12k_warn(ar->ab, 835 "failed to submit WMI_VDEV_CREATE_CMDID\n"); 836 dev_kfree_skb(skb); 837 } 838 839 return ret; 840 } 841 842 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id) 843 { 844 struct ath12k_wmi_pdev *wmi = ar->wmi; 845 struct wmi_vdev_delete_cmd *cmd; 846 struct sk_buff *skb; 847 int ret; 848 849 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 850 if (!skb) 851 return -ENOMEM; 852 853 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 854 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD, 855 sizeof(*cmd)); 856 cmd->vdev_id = cpu_to_le32(vdev_id); 857 858 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); 859 860 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); 861 if (ret) { 862 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); 863 dev_kfree_skb(skb); 864 } 865 866 return ret; 867 } 868 869 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id) 870 { 871 struct ath12k_wmi_pdev *wmi = ar->wmi; 872 struct wmi_vdev_stop_cmd *cmd; 873 struct sk_buff *skb; 874 int ret; 875 876 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 877 if (!skb) 878 return -ENOMEM; 879 880 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 881 882 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD, 883 sizeof(*cmd)); 884 cmd->vdev_id = cpu_to_le32(vdev_id); 885 886 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id); 887 888 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); 889 if (ret) { 890 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); 891 dev_kfree_skb(skb); 892 } 893 894 return ret; 895 } 896 897 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id) 898 { 899 struct ath12k_wmi_pdev *wmi = ar->wmi; 900 struct wmi_vdev_down_cmd *cmd; 901 struct sk_buff *skb; 902 int ret; 903 904 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 905 if (!skb) 906 return -ENOMEM; 907 908 cmd = (struct wmi_vdev_down_cmd *)skb->data; 909 910 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD, 911 sizeof(*cmd)); 912 cmd->vdev_id = cpu_to_le32(vdev_id); 913 914 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id); 915 916 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); 917 if (ret) { 918 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); 919 dev_kfree_skb(skb); 920 } 921 922 return ret; 923 } 924 925 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, 926 struct wmi_vdev_start_req_arg *arg) 927 { 928 memset(chan, 0, sizeof(*chan)); 929 930 chan->mhz = cpu_to_le32(arg->freq); 931 chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1); 932 if (arg->mode == MODE_11AC_VHT80_80) 933 chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2); 934 else 935 chan->band_center_freq2 = 0; 936 937 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE); 938 if (arg->passive) 939 chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 940 if (arg->allow_ibss) 941 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED); 942 if (arg->allow_ht) 943 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 944 if (arg->allow_vht) 945 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 946 if (arg->allow_he) 947 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 948 if (arg->ht40plus) 949 chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS); 950 if (arg->chan_radar) 951 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 952 if (arg->freq2_radar) 953 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2); 954 955 chan->reg_info_1 = le32_encode_bits(arg->max_power, 956 WMI_CHAN_REG_INFO1_MAX_PWR) | 957 le32_encode_bits(arg->max_reg_power, 958 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 959 960 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain, 961 WMI_CHAN_REG_INFO2_ANT_MAX) | 962 le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR); 963 } 964 965 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, 966 bool restart) 967 { 968 struct ath12k_wmi_pdev *wmi = ar->wmi; 969 struct wmi_vdev_start_request_cmd *cmd; 970 struct sk_buff *skb; 971 struct ath12k_wmi_channel_params *chan; 972 struct wmi_tlv *tlv; 973 void *ptr; 974 int ret, len; 975 976 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 977 return -EINVAL; 978 979 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; 980 981 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 982 if (!skb) 983 return -ENOMEM; 984 985 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 986 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD, 987 sizeof(*cmd)); 988 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 989 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval); 990 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate); 991 cmd->dtim_period = cpu_to_le32(arg->dtim_period); 992 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors); 993 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams); 994 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams); 995 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms); 996 cmd->regdomain = cpu_to_le32(arg->regdomain); 997 cmd->he_ops = cpu_to_le32(arg->he_ops); 998 999 if (!restart) { 1000 if (arg->ssid) { 1001 cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len); 1002 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 1003 } 1004 if (arg->hidden_ssid) 1005 cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID); 1006 if (arg->pmf_enabled) 1007 cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED); 1008 } 1009 1010 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED); 1011 1012 ptr = skb->data + sizeof(*cmd); 1013 chan = ptr; 1014 1015 ath12k_wmi_put_wmi_channel(chan, arg); 1016 1017 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 1018 sizeof(*chan)); 1019 ptr += sizeof(*chan); 1020 1021 tlv = ptr; 1022 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 1023 1024 /* Note: This is a nested TLV containing: 1025 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. 1026 */ 1027 1028 ptr += sizeof(*tlv); 1029 1030 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", 1031 restart ? "restart" : "start", arg->vdev_id, 1032 arg->freq, arg->mode); 1033 1034 if (restart) 1035 ret = ath12k_wmi_cmd_send(wmi, skb, 1036 WMI_VDEV_RESTART_REQUEST_CMDID); 1037 else 1038 ret = ath12k_wmi_cmd_send(wmi, skb, 1039 WMI_VDEV_START_REQUEST_CMDID); 1040 if (ret) { 1041 ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n", 1042 restart ? "restart" : "start"); 1043 dev_kfree_skb(skb); 1044 } 1045 1046 return ret; 1047 } 1048 1049 int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 1050 { 1051 struct ath12k_wmi_pdev *wmi = ar->wmi; 1052 struct wmi_vdev_up_cmd *cmd; 1053 struct sk_buff *skb; 1054 int ret; 1055 1056 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1057 if (!skb) 1058 return -ENOMEM; 1059 1060 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1061 1062 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, 1063 sizeof(*cmd)); 1064 cmd->vdev_id = cpu_to_le32(vdev_id); 1065 cmd->vdev_assoc_id = cpu_to_le32(aid); 1066 1067 ether_addr_copy(cmd->vdev_bssid.addr, bssid); 1068 1069 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1070 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 1071 vdev_id, aid, bssid); 1072 1073 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); 1074 if (ret) { 1075 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); 1076 dev_kfree_skb(skb); 1077 } 1078 1079 return ret; 1080 } 1081 1082 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, 1083 struct ath12k_wmi_peer_create_arg *arg) 1084 { 1085 struct ath12k_wmi_pdev *wmi = ar->wmi; 1086 struct wmi_peer_create_cmd *cmd; 1087 struct sk_buff *skb; 1088 int ret; 1089 1090 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1091 if (!skb) 1092 return -ENOMEM; 1093 1094 cmd = (struct wmi_peer_create_cmd *)skb->data; 1095 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD, 1096 sizeof(*cmd)); 1097 1098 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr); 1099 cmd->peer_type = cpu_to_le32(arg->peer_type); 1100 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1101 1102 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1103 "WMI peer create vdev_id %d peer_addr %pM\n", 1104 arg->vdev_id, arg->peer_addr); 1105 1106 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); 1107 if (ret) { 1108 ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); 1109 dev_kfree_skb(skb); 1110 } 1111 1112 return ret; 1113 } 1114 1115 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar, 1116 const u8 *peer_addr, u8 vdev_id) 1117 { 1118 struct ath12k_wmi_pdev *wmi = ar->wmi; 1119 struct wmi_peer_delete_cmd *cmd; 1120 struct sk_buff *skb; 1121 int ret; 1122 1123 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1124 if (!skb) 1125 return -ENOMEM; 1126 1127 cmd = (struct wmi_peer_delete_cmd *)skb->data; 1128 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD, 1129 sizeof(*cmd)); 1130 1131 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1132 cmd->vdev_id = cpu_to_le32(vdev_id); 1133 1134 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1135 "WMI peer delete vdev_id %d peer_addr %pM\n", 1136 vdev_id, peer_addr); 1137 1138 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); 1139 if (ret) { 1140 ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); 1141 dev_kfree_skb(skb); 1142 } 1143 1144 return ret; 1145 } 1146 1147 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar, 1148 struct ath12k_wmi_pdev_set_regdomain_arg *arg) 1149 { 1150 struct ath12k_wmi_pdev *wmi = ar->wmi; 1151 struct wmi_pdev_set_regdomain_cmd *cmd; 1152 struct sk_buff *skb; 1153 int ret; 1154 1155 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1156 if (!skb) 1157 return -ENOMEM; 1158 1159 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 1160 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD, 1161 sizeof(*cmd)); 1162 1163 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use); 1164 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g); 1165 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g); 1166 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g); 1167 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g); 1168 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain); 1169 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 1170 1171 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1172 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", 1173 arg->current_rd_in_use, arg->current_rd_2g, 1174 arg->current_rd_5g, arg->dfs_domain, arg->pdev_id); 1175 1176 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 1177 if (ret) { 1178 ath12k_warn(ar->ab, 1179 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); 1180 dev_kfree_skb(skb); 1181 } 1182 1183 return ret; 1184 } 1185 1186 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr, 1187 u32 vdev_id, u32 param_id, u32 param_val) 1188 { 1189 struct ath12k_wmi_pdev *wmi = ar->wmi; 1190 struct wmi_peer_set_param_cmd *cmd; 1191 struct sk_buff *skb; 1192 int ret; 1193 1194 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1195 if (!skb) 1196 return -ENOMEM; 1197 1198 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 1199 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD, 1200 sizeof(*cmd)); 1201 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1202 cmd->vdev_id = cpu_to_le32(vdev_id); 1203 cmd->param_id = cpu_to_le32(param_id); 1204 cmd->param_value = cpu_to_le32(param_val); 1205 1206 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1207 "WMI vdev %d peer 0x%pM set param %d value %d\n", 1208 vdev_id, peer_addr, param_id, param_val); 1209 1210 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); 1211 if (ret) { 1212 ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); 1213 dev_kfree_skb(skb); 1214 } 1215 1216 return ret; 1217 } 1218 1219 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, 1220 u8 peer_addr[ETH_ALEN], 1221 u32 peer_tid_bitmap, 1222 u8 vdev_id) 1223 { 1224 struct ath12k_wmi_pdev *wmi = ar->wmi; 1225 struct wmi_peer_flush_tids_cmd *cmd; 1226 struct sk_buff *skb; 1227 int ret; 1228 1229 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1230 if (!skb) 1231 return -ENOMEM; 1232 1233 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 1234 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD, 1235 sizeof(*cmd)); 1236 1237 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1238 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap); 1239 cmd->vdev_id = cpu_to_le32(vdev_id); 1240 1241 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1242 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n", 1243 vdev_id, peer_addr, peer_tid_bitmap); 1244 1245 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); 1246 if (ret) { 1247 ath12k_warn(ar->ab, 1248 "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); 1249 dev_kfree_skb(skb); 1250 } 1251 1252 return ret; 1253 } 1254 1255 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar, 1256 int vdev_id, const u8 *addr, 1257 dma_addr_t paddr, u8 tid, 1258 u8 ba_window_size_valid, 1259 u32 ba_window_size) 1260 { 1261 struct wmi_peer_reorder_queue_setup_cmd *cmd; 1262 struct sk_buff *skb; 1263 int ret; 1264 1265 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 1266 if (!skb) 1267 return -ENOMEM; 1268 1269 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; 1270 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD, 1271 sizeof(*cmd)); 1272 1273 ether_addr_copy(cmd->peer_macaddr.addr, addr); 1274 cmd->vdev_id = cpu_to_le32(vdev_id); 1275 cmd->tid = cpu_to_le32(tid); 1276 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr)); 1277 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr)); 1278 cmd->queue_no = cpu_to_le32(tid); 1279 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid); 1280 cmd->ba_window_size = cpu_to_le32(ba_window_size); 1281 1282 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1283 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n", 1284 addr, vdev_id, tid); 1285 1286 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 1287 WMI_PEER_REORDER_QUEUE_SETUP_CMDID); 1288 if (ret) { 1289 ath12k_warn(ar->ab, 1290 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); 1291 dev_kfree_skb(skb); 1292 } 1293 1294 return ret; 1295 } 1296 1297 int 1298 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar, 1299 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg) 1300 { 1301 struct ath12k_wmi_pdev *wmi = ar->wmi; 1302 struct wmi_peer_reorder_queue_remove_cmd *cmd; 1303 struct sk_buff *skb; 1304 int ret; 1305 1306 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1307 if (!skb) 1308 return -ENOMEM; 1309 1310 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; 1311 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD, 1312 sizeof(*cmd)); 1313 1314 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr); 1315 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1316 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap); 1317 1318 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1319 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, 1320 arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap); 1321 1322 ret = ath12k_wmi_cmd_send(wmi, skb, 1323 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); 1324 if (ret) { 1325 ath12k_warn(ar->ab, 1326 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); 1327 dev_kfree_skb(skb); 1328 } 1329 1330 return ret; 1331 } 1332 1333 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id, 1334 u32 param_value, u8 pdev_id) 1335 { 1336 struct ath12k_wmi_pdev *wmi = ar->wmi; 1337 struct wmi_pdev_set_param_cmd *cmd; 1338 struct sk_buff *skb; 1339 int ret; 1340 1341 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1342 if (!skb) 1343 return -ENOMEM; 1344 1345 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 1346 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD, 1347 sizeof(*cmd)); 1348 cmd->pdev_id = cpu_to_le32(pdev_id); 1349 cmd->param_id = cpu_to_le32(param_id); 1350 cmd->param_value = cpu_to_le32(param_value); 1351 1352 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1353 "WMI pdev set param %d pdev id %d value %d\n", 1354 param_id, pdev_id, param_value); 1355 1356 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); 1357 if (ret) { 1358 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1359 dev_kfree_skb(skb); 1360 } 1361 1362 return ret; 1363 } 1364 1365 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable) 1366 { 1367 struct ath12k_wmi_pdev *wmi = ar->wmi; 1368 struct wmi_pdev_set_ps_mode_cmd *cmd; 1369 struct sk_buff *skb; 1370 int ret; 1371 1372 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1373 if (!skb) 1374 return -ENOMEM; 1375 1376 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; 1377 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD, 1378 sizeof(*cmd)); 1379 cmd->vdev_id = cpu_to_le32(vdev_id); 1380 cmd->sta_ps_mode = cpu_to_le32(enable); 1381 1382 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1383 "WMI vdev set psmode %d vdev id %d\n", 1384 enable, vdev_id); 1385 1386 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); 1387 if (ret) { 1388 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1389 dev_kfree_skb(skb); 1390 } 1391 1392 return ret; 1393 } 1394 1395 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt, 1396 u32 pdev_id) 1397 { 1398 struct ath12k_wmi_pdev *wmi = ar->wmi; 1399 struct wmi_pdev_suspend_cmd *cmd; 1400 struct sk_buff *skb; 1401 int ret; 1402 1403 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1404 if (!skb) 1405 return -ENOMEM; 1406 1407 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 1408 1409 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD, 1410 sizeof(*cmd)); 1411 1412 cmd->suspend_opt = cpu_to_le32(suspend_opt); 1413 cmd->pdev_id = cpu_to_le32(pdev_id); 1414 1415 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1416 "WMI pdev suspend pdev_id %d\n", pdev_id); 1417 1418 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); 1419 if (ret) { 1420 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); 1421 dev_kfree_skb(skb); 1422 } 1423 1424 return ret; 1425 } 1426 1427 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id) 1428 { 1429 struct ath12k_wmi_pdev *wmi = ar->wmi; 1430 struct wmi_pdev_resume_cmd *cmd; 1431 struct sk_buff *skb; 1432 int ret; 1433 1434 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1435 if (!skb) 1436 return -ENOMEM; 1437 1438 cmd = (struct wmi_pdev_resume_cmd *)skb->data; 1439 1440 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD, 1441 sizeof(*cmd)); 1442 cmd->pdev_id = cpu_to_le32(pdev_id); 1443 1444 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1445 "WMI pdev resume pdev id %d\n", pdev_id); 1446 1447 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); 1448 if (ret) { 1449 ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); 1450 dev_kfree_skb(skb); 1451 } 1452 1453 return ret; 1454 } 1455 1456 /* TODO FW Support for the cmd is not available yet. 1457 * Can be tested once the command and corresponding 1458 * event is implemented in FW 1459 */ 1460 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, 1461 enum wmi_bss_chan_info_req_type type) 1462 { 1463 struct ath12k_wmi_pdev *wmi = ar->wmi; 1464 struct wmi_pdev_bss_chan_info_req_cmd *cmd; 1465 struct sk_buff *skb; 1466 int ret; 1467 1468 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1469 if (!skb) 1470 return -ENOMEM; 1471 1472 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; 1473 1474 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, 1475 sizeof(*cmd)); 1476 cmd->req_type = cpu_to_le32(type); 1477 1478 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1479 "WMI bss chan info req type %d\n", type); 1480 1481 ret = ath12k_wmi_cmd_send(wmi, skb, 1482 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); 1483 if (ret) { 1484 ath12k_warn(ar->ab, 1485 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); 1486 dev_kfree_skb(skb); 1487 } 1488 1489 return ret; 1490 } 1491 1492 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr, 1493 struct ath12k_wmi_ap_ps_arg *arg) 1494 { 1495 struct ath12k_wmi_pdev *wmi = ar->wmi; 1496 struct wmi_ap_ps_peer_cmd *cmd; 1497 struct sk_buff *skb; 1498 int ret; 1499 1500 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1501 if (!skb) 1502 return -ENOMEM; 1503 1504 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 1505 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD, 1506 sizeof(*cmd)); 1507 1508 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1509 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1510 cmd->param = cpu_to_le32(arg->param); 1511 cmd->value = cpu_to_le32(arg->value); 1512 1513 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1514 "WMI set ap ps vdev id %d peer %pM param %d value %d\n", 1515 arg->vdev_id, peer_addr, arg->param, arg->value); 1516 1517 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); 1518 if (ret) { 1519 ath12k_warn(ar->ab, 1520 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); 1521 dev_kfree_skb(skb); 1522 } 1523 1524 return ret; 1525 } 1526 1527 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id, 1528 u32 param, u32 param_value) 1529 { 1530 struct ath12k_wmi_pdev *wmi = ar->wmi; 1531 struct wmi_sta_powersave_param_cmd *cmd; 1532 struct sk_buff *skb; 1533 int ret; 1534 1535 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1536 if (!skb) 1537 return -ENOMEM; 1538 1539 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 1540 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD, 1541 sizeof(*cmd)); 1542 1543 cmd->vdev_id = cpu_to_le32(vdev_id); 1544 cmd->param = cpu_to_le32(param); 1545 cmd->value = cpu_to_le32(param_value); 1546 1547 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1548 "WMI set sta ps vdev_id %d param %d value %d\n", 1549 vdev_id, param, param_value); 1550 1551 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 1552 if (ret) { 1553 ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); 1554 dev_kfree_skb(skb); 1555 } 1556 1557 return ret; 1558 } 1559 1560 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms) 1561 { 1562 struct ath12k_wmi_pdev *wmi = ar->wmi; 1563 struct wmi_force_fw_hang_cmd *cmd; 1564 struct sk_buff *skb; 1565 int ret, len; 1566 1567 len = sizeof(*cmd); 1568 1569 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1570 if (!skb) 1571 return -ENOMEM; 1572 1573 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 1574 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD, 1575 len); 1576 1577 cmd->type = cpu_to_le32(type); 1578 cmd->delay_time_ms = cpu_to_le32(delay_time_ms); 1579 1580 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); 1581 1582 if (ret) { 1583 ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); 1584 dev_kfree_skb(skb); 1585 } 1586 return ret; 1587 } 1588 1589 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id, 1590 u32 param_id, u32 param_value) 1591 { 1592 struct ath12k_wmi_pdev *wmi = ar->wmi; 1593 struct wmi_vdev_set_param_cmd *cmd; 1594 struct sk_buff *skb; 1595 int ret; 1596 1597 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1598 if (!skb) 1599 return -ENOMEM; 1600 1601 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 1602 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD, 1603 sizeof(*cmd)); 1604 1605 cmd->vdev_id = cpu_to_le32(vdev_id); 1606 cmd->param_id = cpu_to_le32(param_id); 1607 cmd->param_value = cpu_to_le32(param_value); 1608 1609 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1610 "WMI vdev id 0x%x set param %d value %d\n", 1611 vdev_id, param_id, param_value); 1612 1613 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); 1614 if (ret) { 1615 ath12k_warn(ar->ab, 1616 "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); 1617 dev_kfree_skb(skb); 1618 } 1619 1620 return ret; 1621 } 1622 1623 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar) 1624 { 1625 struct ath12k_wmi_pdev *wmi = ar->wmi; 1626 struct wmi_get_pdev_temperature_cmd *cmd; 1627 struct sk_buff *skb; 1628 int ret; 1629 1630 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1631 if (!skb) 1632 return -ENOMEM; 1633 1634 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; 1635 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD, 1636 sizeof(*cmd)); 1637 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1638 1639 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1640 "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); 1641 1642 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); 1643 if (ret) { 1644 ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); 1645 dev_kfree_skb(skb); 1646 } 1647 1648 return ret; 1649 } 1650 1651 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, 1652 u32 vdev_id, u32 bcn_ctrl_op) 1653 { 1654 struct ath12k_wmi_pdev *wmi = ar->wmi; 1655 struct wmi_bcn_offload_ctrl_cmd *cmd; 1656 struct sk_buff *skb; 1657 int ret; 1658 1659 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1660 if (!skb) 1661 return -ENOMEM; 1662 1663 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; 1664 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD, 1665 sizeof(*cmd)); 1666 1667 cmd->vdev_id = cpu_to_le32(vdev_id); 1668 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op); 1669 1670 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1671 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n", 1672 vdev_id, bcn_ctrl_op); 1673 1674 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); 1675 if (ret) { 1676 ath12k_warn(ar->ab, 1677 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); 1678 dev_kfree_skb(skb); 1679 } 1680 1681 return ret; 1682 } 1683 1684 int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, 1685 struct ieee80211_mutable_offsets *offs, 1686 struct sk_buff *bcn) 1687 { 1688 struct ath12k_wmi_pdev *wmi = ar->wmi; 1689 struct wmi_bcn_tmpl_cmd *cmd; 1690 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; 1691 struct wmi_tlv *tlv; 1692 struct sk_buff *skb; 1693 void *ptr; 1694 int ret, len; 1695 size_t aligned_len = roundup(bcn->len, 4); 1696 1697 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; 1698 1699 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1700 if (!skb) 1701 return -ENOMEM; 1702 1703 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; 1704 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD, 1705 sizeof(*cmd)); 1706 cmd->vdev_id = cpu_to_le32(vdev_id); 1707 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); 1708 cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]); 1709 cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]); 1710 cmd->buf_len = cpu_to_le32(bcn->len); 1711 1712 ptr = skb->data + sizeof(*cmd); 1713 1714 bcn_prb_info = ptr; 1715 len = sizeof(*bcn_prb_info); 1716 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 1717 len); 1718 bcn_prb_info->caps = 0; 1719 bcn_prb_info->erp = 0; 1720 1721 ptr += sizeof(*bcn_prb_info); 1722 1723 tlv = ptr; 1724 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 1725 memcpy(tlv->value, bcn->data, bcn->len); 1726 1727 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); 1728 if (ret) { 1729 ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 1730 dev_kfree_skb(skb); 1731 } 1732 1733 return ret; 1734 } 1735 1736 int ath12k_wmi_vdev_install_key(struct ath12k *ar, 1737 struct wmi_vdev_install_key_arg *arg) 1738 { 1739 struct ath12k_wmi_pdev *wmi = ar->wmi; 1740 struct wmi_vdev_install_key_cmd *cmd; 1741 struct wmi_tlv *tlv; 1742 struct sk_buff *skb; 1743 int ret, len, key_len_aligned; 1744 1745 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key 1746 * length is specified in cmd->key_len. 1747 */ 1748 key_len_aligned = roundup(arg->key_len, 4); 1749 1750 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; 1751 1752 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1753 if (!skb) 1754 return -ENOMEM; 1755 1756 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 1757 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD, 1758 sizeof(*cmd)); 1759 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1760 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 1761 cmd->key_idx = cpu_to_le32(arg->key_idx); 1762 cmd->key_flags = cpu_to_le32(arg->key_flags); 1763 cmd->key_cipher = cpu_to_le32(arg->key_cipher); 1764 cmd->key_len = cpu_to_le32(arg->key_len); 1765 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len); 1766 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len); 1767 1768 if (arg->key_rsc_counter) 1769 cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter); 1770 1771 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 1772 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned); 1773 memcpy(tlv->value, arg->key_data, arg->key_len); 1774 1775 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1776 "WMI vdev install key idx %d cipher %d len %d\n", 1777 arg->key_idx, arg->key_cipher, arg->key_len); 1778 1779 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); 1780 if (ret) { 1781 ath12k_warn(ar->ab, 1782 "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); 1783 dev_kfree_skb(skb); 1784 } 1785 1786 return ret; 1787 } 1788 1789 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, 1790 struct ath12k_wmi_peer_assoc_arg *arg, 1791 bool hw_crypto_disabled) 1792 { 1793 cmd->peer_flags = 0; 1794 1795 if (arg->is_wme_set) { 1796 if (arg->qos_flag) 1797 cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS); 1798 if (arg->apsd_flag) 1799 cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD); 1800 if (arg->ht_flag) 1801 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT); 1802 if (arg->bw_40) 1803 cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ); 1804 if (arg->bw_80) 1805 cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ); 1806 if (arg->bw_160) 1807 cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); 1808 1809 /* Typically if STBC is enabled for VHT it should be enabled 1810 * for HT as well 1811 **/ 1812 if (arg->stbc_flag) 1813 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC); 1814 1815 /* Typically if LDPC is enabled for VHT it should be enabled 1816 * for HT as well 1817 **/ 1818 if (arg->ldpc_flag) 1819 cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC); 1820 1821 if (arg->static_mimops_flag) 1822 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS); 1823 if (arg->dynamic_mimops_flag) 1824 cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS); 1825 if (arg->spatial_mux_flag) 1826 cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX); 1827 if (arg->vht_flag) 1828 cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT); 1829 if (arg->he_flag) 1830 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE); 1831 if (arg->twt_requester) 1832 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ); 1833 if (arg->twt_responder) 1834 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP); 1835 } 1836 1837 /* Suppress authorization for all AUTH modes that need 4-way handshake 1838 * (during re-association). 1839 * Authorization will be done for these modes on key installation. 1840 */ 1841 if (arg->auth_flag) 1842 cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH); 1843 if (arg->need_ptk_4_way) { 1844 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY); 1845 if (!hw_crypto_disabled) 1846 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH); 1847 } 1848 if (arg->need_gtk_2_way) 1849 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY); 1850 /* safe mode bypass the 4-way handshake */ 1851 if (arg->safe_mode_enabled) 1852 cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY | 1853 WMI_PEER_NEED_GTK_2_WAY)); 1854 1855 if (arg->is_pmf_enabled) 1856 cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF); 1857 1858 /* Disable AMSDU for station transmit, if user configures it */ 1859 /* Disable AMSDU for AP transmit to 11n Stations, if user configures 1860 * it 1861 * if (arg->amsdu_disable) Add after FW support 1862 **/ 1863 1864 /* Target asserts if node is marked HT and all MCS is set to 0. 1865 * Mark the node as non-HT if all the mcs rates are disabled through 1866 * iwpriv 1867 **/ 1868 if (arg->peer_ht_rates.num_rates == 0) 1869 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT); 1870 } 1871 1872 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, 1873 struct ath12k_wmi_peer_assoc_arg *arg) 1874 { 1875 struct ath12k_wmi_pdev *wmi = ar->wmi; 1876 struct wmi_peer_assoc_complete_cmd *cmd; 1877 struct ath12k_wmi_vht_rate_set_params *mcs; 1878 struct ath12k_wmi_he_rate_set_params *he_mcs; 1879 struct sk_buff *skb; 1880 struct wmi_tlv *tlv; 1881 void *ptr; 1882 u32 peer_legacy_rates_align; 1883 u32 peer_ht_rates_align; 1884 int i, ret, len; 1885 1886 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, 1887 sizeof(u32)); 1888 peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates, 1889 sizeof(u32)); 1890 1891 len = sizeof(*cmd) + 1892 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + 1893 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + 1894 sizeof(*mcs) + TLV_HDR_SIZE + 1895 (sizeof(*he_mcs) * arg->peer_he_mcs_count); 1896 1897 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1898 if (!skb) 1899 return -ENOMEM; 1900 1901 ptr = skb->data; 1902 1903 cmd = ptr; 1904 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD, 1905 sizeof(*cmd)); 1906 1907 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1908 1909 cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc); 1910 cmd->peer_associd = cpu_to_le32(arg->peer_associd); 1911 1912 ath12k_wmi_copy_peer_flags(cmd, arg, 1913 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, 1914 &ar->ab->dev_flags)); 1915 1916 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac); 1917 1918 cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps); 1919 cmd->peer_caps = cpu_to_le32(arg->peer_caps); 1920 cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval); 1921 cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps); 1922 cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu); 1923 cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density); 1924 cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps); 1925 cmd->peer_phymode = cpu_to_le32(arg->peer_phymode); 1926 1927 /* Update 11ax capabilities */ 1928 cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]); 1929 cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]); 1930 cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal); 1931 cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz); 1932 cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops); 1933 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 1934 cmd->peer_he_cap_phy[i] = 1935 cpu_to_le32(arg->peer_he_cap_phyinfo[i]); 1936 cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1); 1937 cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask); 1938 for (i = 0; i < WMI_MAX_NUM_SS; i++) 1939 cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] = 1940 cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]); 1941 1942 /* Update peer legacy rate information */ 1943 ptr += sizeof(*cmd); 1944 1945 tlv = ptr; 1946 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align); 1947 1948 ptr += TLV_HDR_SIZE; 1949 1950 cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates); 1951 memcpy(ptr, arg->peer_legacy_rates.rates, 1952 arg->peer_legacy_rates.num_rates); 1953 1954 /* Update peer HT rate information */ 1955 ptr += peer_legacy_rates_align; 1956 1957 tlv = ptr; 1958 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align); 1959 ptr += TLV_HDR_SIZE; 1960 cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates); 1961 memcpy(ptr, arg->peer_ht_rates.rates, 1962 arg->peer_ht_rates.num_rates); 1963 1964 /* VHT Rates */ 1965 ptr += peer_ht_rates_align; 1966 1967 mcs = ptr; 1968 1969 mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET, 1970 sizeof(*mcs)); 1971 1972 cmd->peer_nss = cpu_to_le32(arg->peer_nss); 1973 1974 /* Update bandwidth-NSS mapping */ 1975 cmd->peer_bw_rxnss_override = 0; 1976 cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override); 1977 1978 if (arg->vht_capable) { 1979 mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate); 1980 mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set); 1981 mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate); 1982 mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set); 1983 } 1984 1985 /* HE Rates */ 1986 cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count); 1987 cmd->min_data_rate = cpu_to_le32(arg->min_data_rate); 1988 1989 ptr += sizeof(*mcs); 1990 1991 len = arg->peer_he_mcs_count * sizeof(*he_mcs); 1992 1993 tlv = ptr; 1994 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 1995 ptr += TLV_HDR_SIZE; 1996 1997 /* Loop through the HE rate set */ 1998 for (i = 0; i < arg->peer_he_mcs_count; i++) { 1999 he_mcs = ptr; 2000 he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, 2001 sizeof(*he_mcs)); 2002 2003 he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]); 2004 he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]); 2005 ptr += sizeof(*he_mcs); 2006 } 2007 2008 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2009 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", 2010 cmd->vdev_id, cmd->peer_associd, arg->peer_mac, 2011 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, 2012 cmd->peer_listen_intval, cmd->peer_ht_caps, 2013 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, 2014 cmd->peer_mpdu_density, 2015 cmd->peer_vht_caps, cmd->peer_he_cap_info, 2016 cmd->peer_he_ops, cmd->peer_he_cap_info_ext, 2017 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], 2018 cmd->peer_he_cap_phy[2], 2019 cmd->peer_bw_rxnss_override); 2020 2021 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); 2022 if (ret) { 2023 ath12k_warn(ar->ab, 2024 "failed to send WMI_PEER_ASSOC_CMDID\n"); 2025 dev_kfree_skb(skb); 2026 } 2027 2028 return ret; 2029 } 2030 2031 void ath12k_wmi_start_scan_init(struct ath12k *ar, 2032 struct ath12k_wmi_scan_req_arg *arg) 2033 { 2034 /* setup commonly used values */ 2035 arg->scan_req_id = 1; 2036 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2037 arg->dwell_time_active = 50; 2038 arg->dwell_time_active_2g = 0; 2039 arg->dwell_time_passive = 150; 2040 arg->dwell_time_active_6g = 40; 2041 arg->dwell_time_passive_6g = 30; 2042 arg->min_rest_time = 50; 2043 arg->max_rest_time = 500; 2044 arg->repeat_probe_time = 0; 2045 arg->probe_spacing_time = 0; 2046 arg->idle_time = 0; 2047 arg->max_scan_time = 20000; 2048 arg->probe_delay = 5; 2049 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | 2050 WMI_SCAN_EVENT_COMPLETED | 2051 WMI_SCAN_EVENT_BSS_CHANNEL | 2052 WMI_SCAN_EVENT_FOREIGN_CHAN | 2053 WMI_SCAN_EVENT_DEQUEUED; 2054 arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT; 2055 arg->num_bssid = 1; 2056 2057 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be 2058 * ZEROs in probe request 2059 */ 2060 eth_broadcast_addr(arg->bssid_list[0].addr); 2061 } 2062 2063 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, 2064 struct ath12k_wmi_scan_req_arg *arg) 2065 { 2066 /* Scan events subscription */ 2067 if (arg->scan_ev_started) 2068 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED); 2069 if (arg->scan_ev_completed) 2070 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED); 2071 if (arg->scan_ev_bss_chan) 2072 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL); 2073 if (arg->scan_ev_foreign_chan) 2074 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN); 2075 if (arg->scan_ev_dequeued) 2076 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED); 2077 if (arg->scan_ev_preempted) 2078 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED); 2079 if (arg->scan_ev_start_failed) 2080 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED); 2081 if (arg->scan_ev_restarted) 2082 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED); 2083 if (arg->scan_ev_foreign_chn_exit) 2084 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT); 2085 if (arg->scan_ev_suspended) 2086 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED); 2087 if (arg->scan_ev_resumed) 2088 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED); 2089 2090 /** Set scan control flags */ 2091 cmd->scan_ctrl_flags = 0; 2092 if (arg->scan_f_passive) 2093 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE); 2094 if (arg->scan_f_strict_passive_pch) 2095 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN); 2096 if (arg->scan_f_promisc_mode) 2097 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS); 2098 if (arg->scan_f_capture_phy_err) 2099 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR); 2100 if (arg->scan_f_half_rate) 2101 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT); 2102 if (arg->scan_f_quarter_rate) 2103 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT); 2104 if (arg->scan_f_cck_rates) 2105 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES); 2106 if (arg->scan_f_ofdm_rates) 2107 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES); 2108 if (arg->scan_f_chan_stat_evnt) 2109 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT); 2110 if (arg->scan_f_filter_prb_req) 2111 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); 2112 if (arg->scan_f_bcast_probe) 2113 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ); 2114 if (arg->scan_f_offchan_mgmt_tx) 2115 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX); 2116 if (arg->scan_f_offchan_data_tx) 2117 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX); 2118 if (arg->scan_f_force_active_dfs_chn) 2119 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS); 2120 if (arg->scan_f_add_tpc_ie_in_probe) 2121 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ); 2122 if (arg->scan_f_add_ds_ie_in_probe) 2123 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ); 2124 if (arg->scan_f_add_spoofed_mac_in_probe) 2125 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ); 2126 if (arg->scan_f_add_rand_seq_in_probe) 2127 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ); 2128 if (arg->scan_f_en_ie_whitelist_in_probe) 2129 cmd->scan_ctrl_flags |= 2130 cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ); 2131 2132 cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode, 2133 WMI_SCAN_DWELL_MODE_MASK); 2134 } 2135 2136 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, 2137 struct ath12k_wmi_scan_req_arg *arg) 2138 { 2139 struct ath12k_wmi_pdev *wmi = ar->wmi; 2140 struct wmi_start_scan_cmd *cmd; 2141 struct ath12k_wmi_ssid_params *ssid = NULL; 2142 struct ath12k_wmi_mac_addr_params *bssid; 2143 struct sk_buff *skb; 2144 struct wmi_tlv *tlv; 2145 void *ptr; 2146 int i, ret, len; 2147 u32 *tmp_ptr, extraie_len_with_pad = 0; 2148 struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL; 2149 struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL; 2150 2151 len = sizeof(*cmd); 2152 2153 len += TLV_HDR_SIZE; 2154 if (arg->num_chan) 2155 len += arg->num_chan * sizeof(u32); 2156 2157 len += TLV_HDR_SIZE; 2158 if (arg->num_ssids) 2159 len += arg->num_ssids * sizeof(*ssid); 2160 2161 len += TLV_HDR_SIZE; 2162 if (arg->num_bssid) 2163 len += sizeof(*bssid) * arg->num_bssid; 2164 2165 len += TLV_HDR_SIZE; 2166 if (arg->extraie.len) 2167 extraie_len_with_pad = 2168 roundup(arg->extraie.len, sizeof(u32)); 2169 len += extraie_len_with_pad; 2170 2171 if (arg->num_hint_bssid) 2172 len += TLV_HDR_SIZE + 2173 arg->num_hint_bssid * sizeof(*hint_bssid); 2174 2175 if (arg->num_hint_s_ssid) 2176 len += TLV_HDR_SIZE + 2177 arg->num_hint_s_ssid * sizeof(*s_ssid); 2178 2179 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2180 if (!skb) 2181 return -ENOMEM; 2182 2183 ptr = skb->data; 2184 2185 cmd = ptr; 2186 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD, 2187 sizeof(*cmd)); 2188 2189 cmd->scan_id = cpu_to_le32(arg->scan_id); 2190 cmd->scan_req_id = cpu_to_le32(arg->scan_req_id); 2191 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2192 cmd->scan_priority = cpu_to_le32(arg->scan_priority); 2193 cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events); 2194 2195 ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg); 2196 2197 cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active); 2198 cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g); 2199 cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive); 2200 cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g); 2201 cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g); 2202 cmd->min_rest_time = cpu_to_le32(arg->min_rest_time); 2203 cmd->max_rest_time = cpu_to_le32(arg->max_rest_time); 2204 cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time); 2205 cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time); 2206 cmd->idle_time = cpu_to_le32(arg->idle_time); 2207 cmd->max_scan_time = cpu_to_le32(arg->max_scan_time); 2208 cmd->probe_delay = cpu_to_le32(arg->probe_delay); 2209 cmd->burst_duration = cpu_to_le32(arg->burst_duration); 2210 cmd->num_chan = cpu_to_le32(arg->num_chan); 2211 cmd->num_bssid = cpu_to_le32(arg->num_bssid); 2212 cmd->num_ssids = cpu_to_le32(arg->num_ssids); 2213 cmd->ie_len = cpu_to_le32(arg->extraie.len); 2214 cmd->n_probes = cpu_to_le32(arg->n_probes); 2215 2216 ptr += sizeof(*cmd); 2217 2218 len = arg->num_chan * sizeof(u32); 2219 2220 tlv = ptr; 2221 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len); 2222 ptr += TLV_HDR_SIZE; 2223 tmp_ptr = (u32 *)ptr; 2224 2225 memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4); 2226 2227 ptr += len; 2228 2229 len = arg->num_ssids * sizeof(*ssid); 2230 tlv = ptr; 2231 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2232 2233 ptr += TLV_HDR_SIZE; 2234 2235 if (arg->num_ssids) { 2236 ssid = ptr; 2237 for (i = 0; i < arg->num_ssids; ++i) { 2238 ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len); 2239 memcpy(ssid->ssid, arg->ssid[i].ssid, 2240 arg->ssid[i].ssid_len); 2241 ssid++; 2242 } 2243 } 2244 2245 ptr += (arg->num_ssids * sizeof(*ssid)); 2246 len = arg->num_bssid * sizeof(*bssid); 2247 tlv = ptr; 2248 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2249 2250 ptr += TLV_HDR_SIZE; 2251 bssid = ptr; 2252 2253 if (arg->num_bssid) { 2254 for (i = 0; i < arg->num_bssid; ++i) { 2255 ether_addr_copy(bssid->addr, 2256 arg->bssid_list[i].addr); 2257 bssid++; 2258 } 2259 } 2260 2261 ptr += arg->num_bssid * sizeof(*bssid); 2262 2263 len = extraie_len_with_pad; 2264 tlv = ptr; 2265 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); 2266 ptr += TLV_HDR_SIZE; 2267 2268 if (arg->extraie.len) 2269 memcpy(ptr, arg->extraie.ptr, 2270 arg->extraie.len); 2271 2272 ptr += extraie_len_with_pad; 2273 2274 if (arg->num_hint_s_ssid) { 2275 len = arg->num_hint_s_ssid * sizeof(*s_ssid); 2276 tlv = ptr; 2277 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2278 ptr += TLV_HDR_SIZE; 2279 s_ssid = ptr; 2280 for (i = 0; i < arg->num_hint_s_ssid; ++i) { 2281 s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags; 2282 s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid; 2283 s_ssid++; 2284 } 2285 ptr += len; 2286 } 2287 2288 if (arg->num_hint_bssid) { 2289 len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg); 2290 tlv = ptr; 2291 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2292 ptr += TLV_HDR_SIZE; 2293 hint_bssid = ptr; 2294 for (i = 0; i < arg->num_hint_bssid; ++i) { 2295 hint_bssid->freq_flags = 2296 arg->hint_bssid[i].freq_flags; 2297 ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0], 2298 &hint_bssid->bssid.addr[0]); 2299 hint_bssid++; 2300 } 2301 } 2302 2303 ret = ath12k_wmi_cmd_send(wmi, skb, 2304 WMI_START_SCAN_CMDID); 2305 if (ret) { 2306 ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); 2307 dev_kfree_skb(skb); 2308 } 2309 2310 return ret; 2311 } 2312 2313 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar, 2314 struct ath12k_wmi_scan_cancel_arg *arg) 2315 { 2316 struct ath12k_wmi_pdev *wmi = ar->wmi; 2317 struct wmi_stop_scan_cmd *cmd; 2318 struct sk_buff *skb; 2319 int ret; 2320 2321 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2322 if (!skb) 2323 return -ENOMEM; 2324 2325 cmd = (struct wmi_stop_scan_cmd *)skb->data; 2326 2327 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD, 2328 sizeof(*cmd)); 2329 2330 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2331 cmd->requestor = cpu_to_le32(arg->requester); 2332 cmd->scan_id = cpu_to_le32(arg->scan_id); 2333 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2334 /* stop the scan with the corresponding scan_id */ 2335 if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { 2336 /* Cancelling all scans */ 2337 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL); 2338 } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { 2339 /* Cancelling VAP scans */ 2340 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL); 2341 } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) { 2342 /* Cancelling specific scan */ 2343 cmd->req_type = WMI_SCAN_STOP_ONE; 2344 } else { 2345 ath12k_warn(ar->ab, "invalid scan cancel req_type %d", 2346 arg->req_type); 2347 dev_kfree_skb(skb); 2348 return -EINVAL; 2349 } 2350 2351 ret = ath12k_wmi_cmd_send(wmi, skb, 2352 WMI_STOP_SCAN_CMDID); 2353 if (ret) { 2354 ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); 2355 dev_kfree_skb(skb); 2356 } 2357 2358 return ret; 2359 } 2360 2361 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, 2362 struct ath12k_wmi_scan_chan_list_arg *arg) 2363 { 2364 struct ath12k_wmi_pdev *wmi = ar->wmi; 2365 struct wmi_scan_chan_list_cmd *cmd; 2366 struct sk_buff *skb; 2367 struct ath12k_wmi_channel_params *chan_info; 2368 struct ath12k_wmi_channel_arg *channel_arg; 2369 struct wmi_tlv *tlv; 2370 void *ptr; 2371 int i, ret, len; 2372 u16 num_send_chans, num_sends = 0, max_chan_limit = 0; 2373 __le32 *reg1, *reg2; 2374 2375 channel_arg = &arg->channel[0]; 2376 while (arg->nallchans) { 2377 len = sizeof(*cmd) + TLV_HDR_SIZE; 2378 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / 2379 sizeof(*chan_info); 2380 2381 num_send_chans = min(arg->nallchans, max_chan_limit); 2382 2383 arg->nallchans -= num_send_chans; 2384 len += sizeof(*chan_info) * num_send_chans; 2385 2386 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2387 if (!skb) 2388 return -ENOMEM; 2389 2390 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 2391 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD, 2392 sizeof(*cmd)); 2393 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2394 cmd->num_scan_chans = cpu_to_le32(num_send_chans); 2395 if (num_sends) 2396 cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG); 2397 2398 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2399 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", 2400 num_send_chans, len, cmd->pdev_id, num_sends); 2401 2402 ptr = skb->data + sizeof(*cmd); 2403 2404 len = sizeof(*chan_info) * num_send_chans; 2405 tlv = ptr; 2406 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT, 2407 len); 2408 ptr += TLV_HDR_SIZE; 2409 2410 for (i = 0; i < num_send_chans; ++i) { 2411 chan_info = ptr; 2412 memset(chan_info, 0, sizeof(*chan_info)); 2413 len = sizeof(*chan_info); 2414 chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 2415 len); 2416 2417 reg1 = &chan_info->reg_info_1; 2418 reg2 = &chan_info->reg_info_2; 2419 chan_info->mhz = cpu_to_le32(channel_arg->mhz); 2420 chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1); 2421 chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2); 2422 2423 if (channel_arg->is_chan_passive) 2424 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 2425 if (channel_arg->allow_he) 2426 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 2427 else if (channel_arg->allow_vht) 2428 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 2429 else if (channel_arg->allow_ht) 2430 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 2431 if (channel_arg->half_rate) 2432 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE); 2433 if (channel_arg->quarter_rate) 2434 chan_info->info |= 2435 cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE); 2436 2437 if (channel_arg->psc_channel) 2438 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC); 2439 2440 if (channel_arg->dfs_set) 2441 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 2442 2443 chan_info->info |= le32_encode_bits(channel_arg->phy_mode, 2444 WMI_CHAN_INFO_MODE); 2445 *reg1 |= le32_encode_bits(channel_arg->minpower, 2446 WMI_CHAN_REG_INFO1_MIN_PWR); 2447 *reg1 |= le32_encode_bits(channel_arg->maxpower, 2448 WMI_CHAN_REG_INFO1_MAX_PWR); 2449 *reg1 |= le32_encode_bits(channel_arg->maxregpower, 2450 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 2451 *reg1 |= le32_encode_bits(channel_arg->reg_class_id, 2452 WMI_CHAN_REG_INFO1_REG_CLS); 2453 *reg2 |= le32_encode_bits(channel_arg->antennamax, 2454 WMI_CHAN_REG_INFO2_ANT_MAX); 2455 2456 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2457 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", 2458 i, chan_info->mhz, chan_info->info); 2459 2460 ptr += sizeof(*chan_info); 2461 2462 channel_arg++; 2463 } 2464 2465 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); 2466 if (ret) { 2467 ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); 2468 dev_kfree_skb(skb); 2469 return ret; 2470 } 2471 2472 num_sends++; 2473 } 2474 2475 return 0; 2476 } 2477 2478 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id, 2479 struct wmi_wmm_params_all_arg *param) 2480 { 2481 struct ath12k_wmi_pdev *wmi = ar->wmi; 2482 struct wmi_vdev_set_wmm_params_cmd *cmd; 2483 struct wmi_wmm_params *wmm_param; 2484 struct wmi_wmm_params_arg *wmi_wmm_arg; 2485 struct sk_buff *skb; 2486 int ret, ac; 2487 2488 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2489 if (!skb) 2490 return -ENOMEM; 2491 2492 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; 2493 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 2494 sizeof(*cmd)); 2495 2496 cmd->vdev_id = cpu_to_le32(vdev_id); 2497 cmd->wmm_param_type = 0; 2498 2499 for (ac = 0; ac < WME_NUM_AC; ac++) { 2500 switch (ac) { 2501 case WME_AC_BE: 2502 wmi_wmm_arg = ¶m->ac_be; 2503 break; 2504 case WME_AC_BK: 2505 wmi_wmm_arg = ¶m->ac_bk; 2506 break; 2507 case WME_AC_VI: 2508 wmi_wmm_arg = ¶m->ac_vi; 2509 break; 2510 case WME_AC_VO: 2511 wmi_wmm_arg = ¶m->ac_vo; 2512 break; 2513 } 2514 2515 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; 2516 wmm_param->tlv_header = 2517 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 2518 sizeof(*wmm_param)); 2519 2520 wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs); 2521 wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin); 2522 wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax); 2523 wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop); 2524 wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm); 2525 wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack); 2526 2527 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2528 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 2529 ac, wmm_param->aifs, wmm_param->cwmin, 2530 wmm_param->cwmax, wmm_param->txoplimit, 2531 wmm_param->acm, wmm_param->no_ack); 2532 } 2533 ret = ath12k_wmi_cmd_send(wmi, skb, 2534 WMI_VDEV_SET_WMM_PARAMS_CMDID); 2535 if (ret) { 2536 ath12k_warn(ar->ab, 2537 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); 2538 dev_kfree_skb(skb); 2539 } 2540 2541 return ret; 2542 } 2543 2544 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, 2545 u32 pdev_id) 2546 { 2547 struct ath12k_wmi_pdev *wmi = ar->wmi; 2548 struct wmi_dfs_phyerr_offload_cmd *cmd; 2549 struct sk_buff *skb; 2550 int ret; 2551 2552 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2553 if (!skb) 2554 return -ENOMEM; 2555 2556 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; 2557 cmd->tlv_header = 2558 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, 2559 sizeof(*cmd)); 2560 2561 cmd->pdev_id = cpu_to_le32(pdev_id); 2562 2563 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2564 "WMI dfs phy err offload enable pdev id %d\n", pdev_id); 2565 2566 ret = ath12k_wmi_cmd_send(wmi, skb, 2567 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); 2568 if (ret) { 2569 ath12k_warn(ar->ab, 2570 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); 2571 dev_kfree_skb(skb); 2572 } 2573 2574 return ret; 2575 } 2576 2577 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 2578 u32 tid, u32 initiator, u32 reason) 2579 { 2580 struct ath12k_wmi_pdev *wmi = ar->wmi; 2581 struct wmi_delba_send_cmd *cmd; 2582 struct sk_buff *skb; 2583 int ret; 2584 2585 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2586 if (!skb) 2587 return -ENOMEM; 2588 2589 cmd = (struct wmi_delba_send_cmd *)skb->data; 2590 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD, 2591 sizeof(*cmd)); 2592 cmd->vdev_id = cpu_to_le32(vdev_id); 2593 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2594 cmd->tid = cpu_to_le32(tid); 2595 cmd->initiator = cpu_to_le32(initiator); 2596 cmd->reasoncode = cpu_to_le32(reason); 2597 2598 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2599 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", 2600 vdev_id, mac, tid, initiator, reason); 2601 2602 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); 2603 2604 if (ret) { 2605 ath12k_warn(ar->ab, 2606 "failed to send WMI_DELBA_SEND_CMDID cmd\n"); 2607 dev_kfree_skb(skb); 2608 } 2609 2610 return ret; 2611 } 2612 2613 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac, 2614 u32 tid, u32 status) 2615 { 2616 struct ath12k_wmi_pdev *wmi = ar->wmi; 2617 struct wmi_addba_setresponse_cmd *cmd; 2618 struct sk_buff *skb; 2619 int ret; 2620 2621 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2622 if (!skb) 2623 return -ENOMEM; 2624 2625 cmd = (struct wmi_addba_setresponse_cmd *)skb->data; 2626 cmd->tlv_header = 2627 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD, 2628 sizeof(*cmd)); 2629 cmd->vdev_id = cpu_to_le32(vdev_id); 2630 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2631 cmd->tid = cpu_to_le32(tid); 2632 cmd->statuscode = cpu_to_le32(status); 2633 2634 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2635 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", 2636 vdev_id, mac, tid, status); 2637 2638 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); 2639 2640 if (ret) { 2641 ath12k_warn(ar->ab, 2642 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); 2643 dev_kfree_skb(skb); 2644 } 2645 2646 return ret; 2647 } 2648 2649 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 2650 u32 tid, u32 buf_size) 2651 { 2652 struct ath12k_wmi_pdev *wmi = ar->wmi; 2653 struct wmi_addba_send_cmd *cmd; 2654 struct sk_buff *skb; 2655 int ret; 2656 2657 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2658 if (!skb) 2659 return -ENOMEM; 2660 2661 cmd = (struct wmi_addba_send_cmd *)skb->data; 2662 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD, 2663 sizeof(*cmd)); 2664 cmd->vdev_id = cpu_to_le32(vdev_id); 2665 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2666 cmd->tid = cpu_to_le32(tid); 2667 cmd->buffersize = cpu_to_le32(buf_size); 2668 2669 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2670 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", 2671 vdev_id, mac, tid, buf_size); 2672 2673 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); 2674 2675 if (ret) { 2676 ath12k_warn(ar->ab, 2677 "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); 2678 dev_kfree_skb(skb); 2679 } 2680 2681 return ret; 2682 } 2683 2684 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac) 2685 { 2686 struct ath12k_wmi_pdev *wmi = ar->wmi; 2687 struct wmi_addba_clear_resp_cmd *cmd; 2688 struct sk_buff *skb; 2689 int ret; 2690 2691 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2692 if (!skb) 2693 return -ENOMEM; 2694 2695 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; 2696 cmd->tlv_header = 2697 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD, 2698 sizeof(*cmd)); 2699 cmd->vdev_id = cpu_to_le32(vdev_id); 2700 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2701 2702 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2703 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", 2704 vdev_id, mac); 2705 2706 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); 2707 2708 if (ret) { 2709 ath12k_warn(ar->ab, 2710 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); 2711 dev_kfree_skb(skb); 2712 } 2713 2714 return ret; 2715 } 2716 2717 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar, 2718 struct ath12k_wmi_init_country_arg *arg) 2719 { 2720 struct ath12k_wmi_pdev *wmi = ar->wmi; 2721 struct wmi_init_country_cmd *cmd; 2722 struct sk_buff *skb; 2723 int ret; 2724 2725 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2726 if (!skb) 2727 return -ENOMEM; 2728 2729 cmd = (struct wmi_init_country_cmd *)skb->data; 2730 cmd->tlv_header = 2731 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD, 2732 sizeof(*cmd)); 2733 2734 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 2735 2736 switch (arg->flags) { 2737 case ALPHA_IS_SET: 2738 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; 2739 memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3); 2740 break; 2741 case CC_IS_SET: 2742 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE); 2743 cmd->cc_info.country_code = 2744 cpu_to_le32(arg->cc_info.country_code); 2745 break; 2746 case REGDMN_IS_SET: 2747 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN); 2748 cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id); 2749 break; 2750 default: 2751 ret = -EINVAL; 2752 goto out; 2753 } 2754 2755 ret = ath12k_wmi_cmd_send(wmi, skb, 2756 WMI_SET_INIT_COUNTRY_CMDID); 2757 2758 out: 2759 if (ret) { 2760 ath12k_warn(ar->ab, 2761 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", 2762 ret); 2763 dev_kfree_skb(skb); 2764 } 2765 2766 return ret; 2767 } 2768 2769 int 2770 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id) 2771 { 2772 struct ath12k_wmi_pdev *wmi = ar->wmi; 2773 struct ath12k_base *ab = wmi->wmi_ab->ab; 2774 struct wmi_twt_enable_params_cmd *cmd; 2775 struct sk_buff *skb; 2776 int ret, len; 2777 2778 len = sizeof(*cmd); 2779 2780 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2781 if (!skb) 2782 return -ENOMEM; 2783 2784 cmd = (struct wmi_twt_enable_params_cmd *)skb->data; 2785 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD, 2786 len); 2787 cmd->pdev_id = cpu_to_le32(pdev_id); 2788 cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS); 2789 cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE); 2790 cmd->congestion_thresh_setup = 2791 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP); 2792 cmd->congestion_thresh_teardown = 2793 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN); 2794 cmd->congestion_thresh_critical = 2795 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL); 2796 cmd->interference_thresh_teardown = 2797 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN); 2798 cmd->interference_thresh_setup = 2799 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP); 2800 cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP); 2801 cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN); 2802 cmd->no_of_bcast_mcast_slots = 2803 cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS); 2804 cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS); 2805 cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT); 2806 cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL); 2807 cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL); 2808 cmd->remove_sta_slot_interval = 2809 cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL); 2810 /* TODO add MBSSID support */ 2811 cmd->mbss_support = 0; 2812 2813 ret = ath12k_wmi_cmd_send(wmi, skb, 2814 WMI_TWT_ENABLE_CMDID); 2815 if (ret) { 2816 ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); 2817 dev_kfree_skb(skb); 2818 } 2819 return ret; 2820 } 2821 2822 int 2823 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id) 2824 { 2825 struct ath12k_wmi_pdev *wmi = ar->wmi; 2826 struct ath12k_base *ab = wmi->wmi_ab->ab; 2827 struct wmi_twt_disable_params_cmd *cmd; 2828 struct sk_buff *skb; 2829 int ret, len; 2830 2831 len = sizeof(*cmd); 2832 2833 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2834 if (!skb) 2835 return -ENOMEM; 2836 2837 cmd = (struct wmi_twt_disable_params_cmd *)skb->data; 2838 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD, 2839 len); 2840 cmd->pdev_id = cpu_to_le32(pdev_id); 2841 2842 ret = ath12k_wmi_cmd_send(wmi, skb, 2843 WMI_TWT_DISABLE_CMDID); 2844 if (ret) { 2845 ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); 2846 dev_kfree_skb(skb); 2847 } 2848 return ret; 2849 } 2850 2851 int 2852 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id, 2853 struct ieee80211_he_obss_pd *he_obss_pd) 2854 { 2855 struct ath12k_wmi_pdev *wmi = ar->wmi; 2856 struct ath12k_base *ab = wmi->wmi_ab->ab; 2857 struct wmi_obss_spatial_reuse_params_cmd *cmd; 2858 struct sk_buff *skb; 2859 int ret, len; 2860 2861 len = sizeof(*cmd); 2862 2863 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2864 if (!skb) 2865 return -ENOMEM; 2866 2867 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; 2868 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD, 2869 len); 2870 cmd->vdev_id = cpu_to_le32(vdev_id); 2871 cmd->enable = cpu_to_le32(he_obss_pd->enable); 2872 cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset); 2873 cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset); 2874 2875 ret = ath12k_wmi_cmd_send(wmi, skb, 2876 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); 2877 if (ret) { 2878 ath12k_warn(ab, 2879 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); 2880 dev_kfree_skb(skb); 2881 } 2882 return ret; 2883 } 2884 2885 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id, 2886 u8 bss_color, u32 period, 2887 bool enable) 2888 { 2889 struct ath12k_wmi_pdev *wmi = ar->wmi; 2890 struct ath12k_base *ab = wmi->wmi_ab->ab; 2891 struct wmi_obss_color_collision_cfg_params_cmd *cmd; 2892 struct sk_buff *skb; 2893 int ret, len; 2894 2895 len = sizeof(*cmd); 2896 2897 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2898 if (!skb) 2899 return -ENOMEM; 2900 2901 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; 2902 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG, 2903 len); 2904 cmd->vdev_id = cpu_to_le32(vdev_id); 2905 cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) : 2906 cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE); 2907 cmd->current_bss_color = cpu_to_le32(bss_color); 2908 cmd->detection_period_ms = cpu_to_le32(period); 2909 cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS); 2910 cmd->free_slot_expiry_time_ms = 0; 2911 cmd->flags = 0; 2912 2913 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2914 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n", 2915 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, 2916 cmd->detection_period_ms, cmd->scan_period_ms); 2917 2918 ret = ath12k_wmi_cmd_send(wmi, skb, 2919 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); 2920 if (ret) { 2921 ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); 2922 dev_kfree_skb(skb); 2923 } 2924 return ret; 2925 } 2926 2927 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id, 2928 bool enable) 2929 { 2930 struct ath12k_wmi_pdev *wmi = ar->wmi; 2931 struct ath12k_base *ab = wmi->wmi_ab->ab; 2932 struct wmi_bss_color_change_enable_params_cmd *cmd; 2933 struct sk_buff *skb; 2934 int ret, len; 2935 2936 len = sizeof(*cmd); 2937 2938 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2939 if (!skb) 2940 return -ENOMEM; 2941 2942 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; 2943 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE, 2944 len); 2945 cmd->vdev_id = cpu_to_le32(vdev_id); 2946 cmd->enable = enable ? cpu_to_le32(1) : 0; 2947 2948 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2949 "wmi_send_bss_color_change_enable id %d enable %d\n", 2950 cmd->vdev_id, cmd->enable); 2951 2952 ret = ath12k_wmi_cmd_send(wmi, skb, 2953 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); 2954 if (ret) { 2955 ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); 2956 dev_kfree_skb(skb); 2957 } 2958 return ret; 2959 } 2960 2961 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id, 2962 struct sk_buff *tmpl) 2963 { 2964 struct wmi_tlv *tlv; 2965 struct sk_buff *skb; 2966 void *ptr; 2967 int ret, len; 2968 size_t aligned_len; 2969 struct wmi_fils_discovery_tmpl_cmd *cmd; 2970 2971 aligned_len = roundup(tmpl->len, 4); 2972 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 2973 2974 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2975 "WMI vdev %i set FILS discovery template\n", vdev_id); 2976 2977 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 2978 if (!skb) 2979 return -ENOMEM; 2980 2981 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; 2982 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD, 2983 sizeof(*cmd)); 2984 cmd->vdev_id = cpu_to_le32(vdev_id); 2985 cmd->buf_len = cpu_to_le32(tmpl->len); 2986 ptr = skb->data + sizeof(*cmd); 2987 2988 tlv = ptr; 2989 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 2990 memcpy(tlv->value, tmpl->data, tmpl->len); 2991 2992 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); 2993 if (ret) { 2994 ath12k_warn(ar->ab, 2995 "WMI vdev %i failed to send FILS discovery template command\n", 2996 vdev_id); 2997 dev_kfree_skb(skb); 2998 } 2999 return ret; 3000 } 3001 3002 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, 3003 struct sk_buff *tmpl) 3004 { 3005 struct wmi_probe_tmpl_cmd *cmd; 3006 struct ath12k_wmi_bcn_prb_info_params *probe_info; 3007 struct wmi_tlv *tlv; 3008 struct sk_buff *skb; 3009 void *ptr; 3010 int ret, len; 3011 size_t aligned_len = roundup(tmpl->len, 4); 3012 3013 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3014 "WMI vdev %i set probe response template\n", vdev_id); 3015 3016 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; 3017 3018 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3019 if (!skb) 3020 return -ENOMEM; 3021 3022 cmd = (struct wmi_probe_tmpl_cmd *)skb->data; 3023 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD, 3024 sizeof(*cmd)); 3025 cmd->vdev_id = cpu_to_le32(vdev_id); 3026 cmd->buf_len = cpu_to_le32(tmpl->len); 3027 3028 ptr = skb->data + sizeof(*cmd); 3029 3030 probe_info = ptr; 3031 len = sizeof(*probe_info); 3032 probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 3033 len); 3034 probe_info->caps = 0; 3035 probe_info->erp = 0; 3036 3037 ptr += sizeof(*probe_info); 3038 3039 tlv = ptr; 3040 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3041 memcpy(tlv->value, tmpl->data, tmpl->len); 3042 3043 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); 3044 if (ret) { 3045 ath12k_warn(ar->ab, 3046 "WMI vdev %i failed to send probe response template command\n", 3047 vdev_id); 3048 dev_kfree_skb(skb); 3049 } 3050 return ret; 3051 } 3052 3053 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, 3054 bool unsol_bcast_probe_resp_enabled) 3055 { 3056 struct sk_buff *skb; 3057 int ret, len; 3058 struct wmi_fils_discovery_cmd *cmd; 3059 3060 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3061 "WMI vdev %i set %s interval to %u TU\n", 3062 vdev_id, unsol_bcast_probe_resp_enabled ? 3063 "unsolicited broadcast probe response" : "FILS discovery", 3064 interval); 3065 3066 len = sizeof(*cmd); 3067 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3068 if (!skb) 3069 return -ENOMEM; 3070 3071 cmd = (struct wmi_fils_discovery_cmd *)skb->data; 3072 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD, 3073 len); 3074 cmd->vdev_id = cpu_to_le32(vdev_id); 3075 cmd->interval = cpu_to_le32(interval); 3076 cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled); 3077 3078 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); 3079 if (ret) { 3080 ath12k_warn(ar->ab, 3081 "WMI vdev %i failed to send FILS discovery enable/disable command\n", 3082 vdev_id); 3083 dev_kfree_skb(skb); 3084 } 3085 return ret; 3086 } 3087 3088 static void 3089 ath12k_fill_band_to_mac_param(struct ath12k_base *soc, 3090 struct ath12k_wmi_pdev_band_arg *arg) 3091 { 3092 u8 i; 3093 struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap; 3094 struct ath12k_pdev *pdev; 3095 3096 for (i = 0; i < soc->num_radios; i++) { 3097 pdev = &soc->pdevs[i]; 3098 hal_reg_cap = &soc->hal_reg_cap[i]; 3099 arg[i].pdev_id = pdev->pdev_id; 3100 3101 switch (pdev->cap.supported_bands) { 3102 case WMI_HOST_WLAN_2G_5G_CAP: 3103 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3104 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3105 break; 3106 case WMI_HOST_WLAN_2G_CAP: 3107 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3108 arg[i].end_freq = hal_reg_cap->high_2ghz_chan; 3109 break; 3110 case WMI_HOST_WLAN_5G_CAP: 3111 arg[i].start_freq = hal_reg_cap->low_5ghz_chan; 3112 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3113 break; 3114 default: 3115 break; 3116 } 3117 } 3118 } 3119 3120 static void 3121 ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg, 3122 struct ath12k_wmi_resource_config_arg *tg_cfg) 3123 { 3124 wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs); 3125 wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers); 3126 wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers); 3127 wmi_cfg->num_offload_reorder_buffs = 3128 cpu_to_le32(tg_cfg->num_offload_reorder_buffs); 3129 wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys); 3130 wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids); 3131 wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit); 3132 wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask); 3133 wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask); 3134 wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]); 3135 wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]); 3136 wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]); 3137 wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]); 3138 wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode); 3139 wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req); 3140 wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev); 3141 wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev); 3142 wmi_cfg->roam_offload_max_ap_profiles = 3143 cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles); 3144 wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups); 3145 wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems); 3146 wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode); 3147 wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size); 3148 wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries); 3149 wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size); 3150 wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim); 3151 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = 3152 cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check); 3153 wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config); 3154 wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev); 3155 wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc); 3156 wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries); 3157 wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs); 3158 wmi_cfg->num_tdls_conn_table_entries = 3159 cpu_to_le32(tg_cfg->num_tdls_conn_table_entries); 3160 wmi_cfg->beacon_tx_offload_max_vdev = 3161 cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev); 3162 wmi_cfg->num_multicast_filter_entries = 3163 cpu_to_le32(tg_cfg->num_multicast_filter_entries); 3164 wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters); 3165 wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern); 3166 wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size); 3167 wmi_cfg->max_tdls_concurrent_sleep_sta = 3168 cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta); 3169 wmi_cfg->max_tdls_concurrent_buffer_sta = 3170 cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta); 3171 wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate); 3172 wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs); 3173 wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels); 3174 wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules); 3175 wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); 3176 wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); 3177 wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); 3178 wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config); 3179 wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); 3180 wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); 3181 wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); 3182 wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); 3183 wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << 3184 WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); 3185 } 3186 3187 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, 3188 struct ath12k_wmi_init_cmd_arg *arg) 3189 { 3190 struct ath12k_base *ab = wmi->wmi_ab->ab; 3191 struct sk_buff *skb; 3192 struct wmi_init_cmd *cmd; 3193 struct ath12k_wmi_resource_config_params *cfg; 3194 struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode; 3195 struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac; 3196 struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks; 3197 struct wmi_tlv *tlv; 3198 size_t ret, len; 3199 void *ptr; 3200 u32 hw_mode_len = 0; 3201 u16 idx; 3202 3203 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) 3204 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + 3205 (arg->num_band_to_mac * sizeof(*band_to_mac)); 3206 3207 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + 3208 (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); 3209 3210 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3211 if (!skb) 3212 return -ENOMEM; 3213 3214 cmd = (struct wmi_init_cmd *)skb->data; 3215 3216 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD, 3217 sizeof(*cmd)); 3218 3219 ptr = skb->data + sizeof(*cmd); 3220 cfg = ptr; 3221 3222 ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg); 3223 3224 cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG, 3225 sizeof(*cfg)); 3226 3227 ptr += sizeof(*cfg); 3228 host_mem_chunks = ptr + TLV_HDR_SIZE; 3229 len = sizeof(struct ath12k_wmi_host_mem_chunk_params); 3230 3231 for (idx = 0; idx < arg->num_mem_chunks; ++idx) { 3232 host_mem_chunks[idx].tlv_header = 3233 ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK, 3234 len); 3235 3236 host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr); 3237 host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len); 3238 host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id); 3239 3240 ath12k_dbg(ab, ATH12K_DBG_WMI, 3241 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n", 3242 arg->mem_chunks[idx].req_id, 3243 (u64)arg->mem_chunks[idx].paddr, 3244 arg->mem_chunks[idx].len); 3245 } 3246 cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks); 3247 len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks; 3248 3249 /* num_mem_chunks is zero */ 3250 tlv = ptr; 3251 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 3252 ptr += TLV_HDR_SIZE + len; 3253 3254 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) { 3255 hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr; 3256 hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 3257 sizeof(*hw_mode)); 3258 3259 hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id); 3260 hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac); 3261 3262 ptr += sizeof(*hw_mode); 3263 3264 len = arg->num_band_to_mac * sizeof(*band_to_mac); 3265 tlv = ptr; 3266 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 3267 3268 ptr += TLV_HDR_SIZE; 3269 len = sizeof(*band_to_mac); 3270 3271 for (idx = 0; idx < arg->num_band_to_mac; idx++) { 3272 band_to_mac = (void *)ptr; 3273 3274 band_to_mac->tlv_header = 3275 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC, 3276 len); 3277 band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id); 3278 band_to_mac->start_freq = 3279 cpu_to_le32(arg->band_to_mac[idx].start_freq); 3280 band_to_mac->end_freq = 3281 cpu_to_le32(arg->band_to_mac[idx].end_freq); 3282 ptr += sizeof(*band_to_mac); 3283 } 3284 } 3285 3286 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); 3287 if (ret) { 3288 ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n"); 3289 dev_kfree_skb(skb); 3290 } 3291 3292 return ret; 3293 } 3294 3295 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, 3296 int pdev_id) 3297 { 3298 struct ath12k_wmi_pdev_lro_config_cmd *cmd; 3299 struct sk_buff *skb; 3300 int ret; 3301 3302 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 3303 if (!skb) 3304 return -ENOMEM; 3305 3306 cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data; 3307 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD, 3308 sizeof(*cmd)); 3309 3310 get_random_bytes(cmd->th_4, sizeof(cmd->th_4)); 3311 get_random_bytes(cmd->th_6, sizeof(cmd->th_6)); 3312 3313 cmd->pdev_id = cpu_to_le32(pdev_id); 3314 3315 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3316 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id); 3317 3318 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); 3319 if (ret) { 3320 ath12k_warn(ar->ab, 3321 "failed to send lro cfg req wmi cmd\n"); 3322 goto err; 3323 } 3324 3325 return 0; 3326 err: 3327 dev_kfree_skb(skb); 3328 return ret; 3329 } 3330 3331 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab) 3332 { 3333 unsigned long time_left; 3334 3335 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, 3336 WMI_SERVICE_READY_TIMEOUT_HZ); 3337 if (!time_left) 3338 return -ETIMEDOUT; 3339 3340 return 0; 3341 } 3342 3343 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab) 3344 { 3345 unsigned long time_left; 3346 3347 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, 3348 WMI_SERVICE_READY_TIMEOUT_HZ); 3349 if (!time_left) 3350 return -ETIMEDOUT; 3351 3352 return 0; 3353 } 3354 3355 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, 3356 enum wmi_host_hw_mode_config_type mode) 3357 { 3358 struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd; 3359 struct sk_buff *skb; 3360 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3361 int len; 3362 int ret; 3363 3364 len = sizeof(*cmd); 3365 3366 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3367 if (!skb) 3368 return -ENOMEM; 3369 3370 cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data; 3371 3372 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 3373 sizeof(*cmd)); 3374 3375 cmd->pdev_id = WMI_PDEV_ID_SOC; 3376 cmd->hw_mode_index = cpu_to_le32(mode); 3377 3378 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); 3379 if (ret) { 3380 ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); 3381 dev_kfree_skb(skb); 3382 } 3383 3384 return ret; 3385 } 3386 3387 int ath12k_wmi_cmd_init(struct ath12k_base *ab) 3388 { 3389 struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab; 3390 struct ath12k_wmi_init_cmd_arg arg = {}; 3391 3392 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 3393 ab->wmi_ab.svc_map)) 3394 arg.res_cfg.is_reg_cc_ext_event_supported = true; 3395 3396 ab->hw_params->wmi_init(ab, &arg.res_cfg); 3397 3398 arg.num_mem_chunks = wmi_sc->num_mem_chunks; 3399 arg.hw_mode_id = wmi_sc->preferred_hw_mode; 3400 arg.mem_chunks = wmi_sc->mem_chunks; 3401 3402 if (ab->hw_params->single_pdev_only) 3403 arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; 3404 3405 arg.num_band_to_mac = ab->num_radios; 3406 ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); 3407 3408 return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg); 3409 } 3410 3411 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, 3412 struct ath12k_wmi_vdev_spectral_conf_arg *arg) 3413 { 3414 struct ath12k_wmi_vdev_spectral_conf_cmd *cmd; 3415 struct sk_buff *skb; 3416 int ret; 3417 3418 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 3419 if (!skb) 3420 return -ENOMEM; 3421 3422 cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data; 3423 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD, 3424 sizeof(*cmd)); 3425 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 3426 cmd->scan_count = cpu_to_le32(arg->scan_count); 3427 cmd->scan_period = cpu_to_le32(arg->scan_period); 3428 cmd->scan_priority = cpu_to_le32(arg->scan_priority); 3429 cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size); 3430 cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena); 3431 cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena); 3432 cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref); 3433 cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay); 3434 cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr); 3435 cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr); 3436 cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode); 3437 cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode); 3438 cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr); 3439 cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format); 3440 cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode); 3441 cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale); 3442 cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj); 3443 cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask); 3444 3445 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3446 "WMI spectral scan config cmd vdev_id 0x%x\n", 3447 arg->vdev_id); 3448 3449 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 3450 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); 3451 if (ret) { 3452 ath12k_warn(ar->ab, 3453 "failed to send spectral scan config wmi cmd\n"); 3454 goto err; 3455 } 3456 3457 return 0; 3458 err: 3459 dev_kfree_skb(skb); 3460 return ret; 3461 } 3462 3463 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id, 3464 u32 trigger, u32 enable) 3465 { 3466 struct ath12k_wmi_vdev_spectral_enable_cmd *cmd; 3467 struct sk_buff *skb; 3468 int ret; 3469 3470 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 3471 if (!skb) 3472 return -ENOMEM; 3473 3474 cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data; 3475 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD, 3476 sizeof(*cmd)); 3477 3478 cmd->vdev_id = cpu_to_le32(vdev_id); 3479 cmd->trigger_cmd = cpu_to_le32(trigger); 3480 cmd->enable_cmd = cpu_to_le32(enable); 3481 3482 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3483 "WMI spectral enable cmd vdev id 0x%x\n", 3484 vdev_id); 3485 3486 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 3487 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); 3488 if (ret) { 3489 ath12k_warn(ar->ab, 3490 "failed to send spectral enable wmi cmd\n"); 3491 goto err; 3492 } 3493 3494 return 0; 3495 err: 3496 dev_kfree_skb(skb); 3497 return ret; 3498 } 3499 3500 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, 3501 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg) 3502 { 3503 struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; 3504 struct sk_buff *skb; 3505 int ret; 3506 3507 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 3508 if (!skb) 3509 return -ENOMEM; 3510 3511 cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; 3512 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, 3513 sizeof(*cmd)); 3514 3515 cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id)); 3516 cmd->module_id = cpu_to_le32(arg->module_id); 3517 cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); 3518 cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); 3519 cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo); 3520 cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi); 3521 cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo); 3522 cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi); 3523 cmd->num_elems = cpu_to_le32(arg->num_elems); 3524 cmd->buf_size = cpu_to_le32(arg->buf_size); 3525 cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event); 3526 cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms); 3527 3528 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3529 "WMI DMA ring cfg req cmd pdev_id 0x%x\n", 3530 arg->pdev_id); 3531 3532 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 3533 WMI_PDEV_DMA_RING_CFG_REQ_CMDID); 3534 if (ret) { 3535 ath12k_warn(ar->ab, 3536 "failed to send dma ring cfg req wmi cmd\n"); 3537 goto err; 3538 } 3539 3540 return 0; 3541 err: 3542 dev_kfree_skb(skb); 3543 return ret; 3544 } 3545 3546 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc, 3547 u16 tag, u16 len, 3548 const void *ptr, void *data) 3549 { 3550 struct ath12k_wmi_dma_buf_release_arg *arg = data; 3551 3552 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) 3553 return -EPROTO; 3554 3555 if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry)) 3556 return -ENOBUFS; 3557 3558 arg->num_buf_entry++; 3559 return 0; 3560 } 3561 3562 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc, 3563 u16 tag, u16 len, 3564 const void *ptr, void *data) 3565 { 3566 struct ath12k_wmi_dma_buf_release_arg *arg = data; 3567 3568 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) 3569 return -EPROTO; 3570 3571 if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry)) 3572 return -ENOBUFS; 3573 3574 arg->num_meta++; 3575 3576 return 0; 3577 } 3578 3579 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab, 3580 u16 tag, u16 len, 3581 const void *ptr, void *data) 3582 { 3583 struct ath12k_wmi_dma_buf_release_arg *arg = data; 3584 const struct ath12k_wmi_dma_buf_release_fixed_params *fixed; 3585 u32 pdev_id; 3586 int ret; 3587 3588 switch (tag) { 3589 case WMI_TAG_DMA_BUF_RELEASE: 3590 fixed = ptr; 3591 arg->fixed = *fixed; 3592 pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id)); 3593 arg->fixed.pdev_id = cpu_to_le32(pdev_id); 3594 break; 3595 case WMI_TAG_ARRAY_STRUCT: 3596 if (!arg->buf_entry_done) { 3597 arg->num_buf_entry = 0; 3598 arg->buf_entry = ptr; 3599 3600 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 3601 ath12k_wmi_dma_buf_entry_parse, 3602 arg); 3603 if (ret) { 3604 ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n", 3605 ret); 3606 return ret; 3607 } 3608 3609 arg->buf_entry_done = true; 3610 } else if (!arg->meta_data_done) { 3611 arg->num_meta = 0; 3612 arg->meta_data = ptr; 3613 3614 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 3615 ath12k_wmi_dma_buf_meta_parse, 3616 arg); 3617 if (ret) { 3618 ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n", 3619 ret); 3620 return ret; 3621 } 3622 3623 arg->meta_data_done = true; 3624 } 3625 break; 3626 default: 3627 break; 3628 } 3629 return 0; 3630 } 3631 3632 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab, 3633 struct sk_buff *skb) 3634 { 3635 struct ath12k_wmi_dma_buf_release_arg arg = {}; 3636 struct ath12k_dbring_buf_release_event param; 3637 int ret; 3638 3639 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 3640 ath12k_wmi_dma_buf_parse, 3641 &arg); 3642 if (ret) { 3643 ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); 3644 return; 3645 } 3646 3647 param.fixed = arg.fixed; 3648 param.buf_entry = arg.buf_entry; 3649 param.num_buf_entry = arg.num_buf_entry; 3650 param.meta_data = arg.meta_data; 3651 param.num_meta = arg.num_meta; 3652 3653 ret = ath12k_dbring_buffer_release_event(ab, ¶m); 3654 if (ret) { 3655 ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret); 3656 return; 3657 } 3658 } 3659 3660 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc, 3661 u16 tag, u16 len, 3662 const void *ptr, void *data) 3663 { 3664 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 3665 struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 3666 u32 phy_map = 0; 3667 3668 if (tag != WMI_TAG_HW_MODE_CAPABILITIES) 3669 return -EPROTO; 3670 3671 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes) 3672 return -ENOBUFS; 3673 3674 hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params, 3675 hw_mode_id); 3676 svc_rdy_ext->n_hw_mode_caps++; 3677 3678 phy_map = le32_to_cpu(hw_mode_cap->phy_id_map); 3679 svc_rdy_ext->tot_phy_id += fls(phy_map); 3680 3681 return 0; 3682 } 3683 3684 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, 3685 u16 len, const void *ptr, void *data) 3686 { 3687 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 3688 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 3689 enum wmi_host_hw_mode_config_type mode, pref; 3690 u32 i; 3691 int ret; 3692 3693 svc_rdy_ext->n_hw_mode_caps = 0; 3694 svc_rdy_ext->hw_mode_caps = ptr; 3695 3696 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 3697 ath12k_wmi_hw_mode_caps_parse, 3698 svc_rdy_ext); 3699 if (ret) { 3700 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 3701 return ret; 3702 } 3703 3704 for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) { 3705 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; 3706 mode = le32_to_cpu(hw_mode_caps->hw_mode_id); 3707 pref = soc->wmi_ab.preferred_hw_mode; 3708 3709 if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) { 3710 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; 3711 soc->wmi_ab.preferred_hw_mode = mode; 3712 } 3713 } 3714 3715 ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n", 3716 soc->wmi_ab.preferred_hw_mode); 3717 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) 3718 return -EINVAL; 3719 3720 return 0; 3721 } 3722 3723 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc, 3724 u16 tag, u16 len, 3725 const void *ptr, void *data) 3726 { 3727 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 3728 3729 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) 3730 return -EPROTO; 3731 3732 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) 3733 return -ENOBUFS; 3734 3735 len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params)); 3736 if (!svc_rdy_ext->n_mac_phy_caps) { 3737 svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len, 3738 GFP_ATOMIC); 3739 if (!svc_rdy_ext->mac_phy_caps) 3740 return -ENOMEM; 3741 } 3742 3743 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); 3744 svc_rdy_ext->n_mac_phy_caps++; 3745 return 0; 3746 } 3747 3748 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc, 3749 u16 tag, u16 len, 3750 const void *ptr, void *data) 3751 { 3752 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 3753 3754 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) 3755 return -EPROTO; 3756 3757 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy) 3758 return -ENOBUFS; 3759 3760 svc_rdy_ext->n_ext_hal_reg_caps++; 3761 return 0; 3762 } 3763 3764 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, 3765 u16 len, const void *ptr, void *data) 3766 { 3767 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 3768 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 3769 struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap; 3770 int ret; 3771 u32 i; 3772 3773 svc_rdy_ext->n_ext_hal_reg_caps = 0; 3774 svc_rdy_ext->ext_hal_reg_caps = ptr; 3775 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 3776 ath12k_wmi_ext_hal_reg_caps_parse, 3777 svc_rdy_ext); 3778 if (ret) { 3779 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 3780 return ret; 3781 } 3782 3783 for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) { 3784 ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle, 3785 svc_rdy_ext->soc_hal_reg_caps, 3786 svc_rdy_ext->ext_hal_reg_caps, i, 3787 ®_cap); 3788 if (ret) { 3789 ath12k_warn(soc, "failed to extract reg cap %d\n", i); 3790 return ret; 3791 } 3792 soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; 3793 } 3794 return 0; 3795 } 3796 3797 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc, 3798 u16 len, const void *ptr, 3799 void *data) 3800 { 3801 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 3802 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 3803 u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id); 3804 u32 phy_id_map; 3805 int pdev_index = 0; 3806 int ret; 3807 3808 svc_rdy_ext->soc_hal_reg_caps = ptr; 3809 svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy); 3810 3811 soc->num_radios = 0; 3812 phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map); 3813 3814 while (phy_id_map && soc->num_radios < MAX_RADIOS) { 3815 ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, 3816 svc_rdy_ext, 3817 hw_mode_id, soc->num_radios, 3818 &soc->pdevs[pdev_index]); 3819 if (ret) { 3820 ath12k_warn(soc, "failed to extract mac caps, idx :%d\n", 3821 soc->num_radios); 3822 return ret; 3823 } 3824 3825 soc->num_radios++; 3826 3827 /* For single_pdev_only targets, 3828 * save mac_phy capability in the same pdev 3829 */ 3830 if (soc->hw_params->single_pdev_only) 3831 pdev_index = 0; 3832 else 3833 pdev_index = soc->num_radios; 3834 3835 /* TODO: mac_phy_cap prints */ 3836 phy_id_map >>= 1; 3837 } 3838 3839 if (soc->hw_params->single_pdev_only) { 3840 soc->num_radios = 1; 3841 soc->pdevs[0].pdev_id = 0; 3842 } 3843 3844 return 0; 3845 } 3846 3847 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc, 3848 u16 tag, u16 len, 3849 const void *ptr, void *data) 3850 { 3851 struct ath12k_wmi_dma_ring_caps_parse *parse = data; 3852 3853 if (tag != WMI_TAG_DMA_RING_CAPABILITIES) 3854 return -EPROTO; 3855 3856 parse->n_dma_ring_caps++; 3857 return 0; 3858 } 3859 3860 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab, 3861 u32 num_cap) 3862 { 3863 size_t sz; 3864 void *ptr; 3865 3866 sz = num_cap * sizeof(struct ath12k_dbring_cap); 3867 ptr = kzalloc(sz, GFP_ATOMIC); 3868 if (!ptr) 3869 return -ENOMEM; 3870 3871 ab->db_caps = ptr; 3872 ab->num_db_cap = num_cap; 3873 3874 return 0; 3875 } 3876 3877 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) 3878 { 3879 kfree(ab->db_caps); 3880 ab->db_caps = NULL; 3881 } 3882 3883 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, 3884 u16 len, const void *ptr, void *data) 3885 { 3886 struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data; 3887 struct ath12k_wmi_dma_ring_caps_params *dma_caps; 3888 struct ath12k_dbring_cap *dir_buff_caps; 3889 int ret; 3890 u32 i; 3891 3892 dma_caps_parse->n_dma_ring_caps = 0; 3893 dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr; 3894 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 3895 ath12k_wmi_dma_ring_caps_parse, 3896 dma_caps_parse); 3897 if (ret) { 3898 ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); 3899 return ret; 3900 } 3901 3902 if (!dma_caps_parse->n_dma_ring_caps) 3903 return 0; 3904 3905 if (ab->num_db_cap) { 3906 ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n"); 3907 return 0; 3908 } 3909 3910 ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); 3911 if (ret) 3912 return ret; 3913 3914 dir_buff_caps = ab->db_caps; 3915 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { 3916 if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) { 3917 ath12k_warn(ab, "Invalid module id %d\n", 3918 le32_to_cpu(dma_caps[i].module_id)); 3919 ret = -EINVAL; 3920 goto free_dir_buff; 3921 } 3922 3923 dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id); 3924 dir_buff_caps[i].pdev_id = 3925 DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id)); 3926 dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem); 3927 dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz); 3928 dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align); 3929 } 3930 3931 return 0; 3932 3933 free_dir_buff: 3934 ath12k_wmi_free_dbring_caps(ab); 3935 return ret; 3936 } 3937 3938 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, 3939 u16 tag, u16 len, 3940 const void *ptr, void *data) 3941 { 3942 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 3943 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 3944 int ret; 3945 3946 switch (tag) { 3947 case WMI_TAG_SERVICE_READY_EXT_EVENT: 3948 ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr, 3949 &svc_rdy_ext->arg); 3950 if (ret) { 3951 ath12k_warn(ab, "unable to extract ext params\n"); 3952 return ret; 3953 } 3954 break; 3955 3956 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: 3957 svc_rdy_ext->hw_caps = ptr; 3958 svc_rdy_ext->arg.num_hw_modes = 3959 le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes); 3960 break; 3961 3962 case WMI_TAG_SOC_HAL_REG_CAPABILITIES: 3963 ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr, 3964 svc_rdy_ext); 3965 if (ret) 3966 return ret; 3967 break; 3968 3969 case WMI_TAG_ARRAY_STRUCT: 3970 if (!svc_rdy_ext->hw_mode_done) { 3971 ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext); 3972 if (ret) 3973 return ret; 3974 3975 svc_rdy_ext->hw_mode_done = true; 3976 } else if (!svc_rdy_ext->mac_phy_done) { 3977 svc_rdy_ext->n_mac_phy_caps = 0; 3978 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 3979 ath12k_wmi_mac_phy_caps_parse, 3980 svc_rdy_ext); 3981 if (ret) { 3982 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 3983 return ret; 3984 } 3985 3986 svc_rdy_ext->mac_phy_done = true; 3987 } else if (!svc_rdy_ext->ext_hal_reg_done) { 3988 ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); 3989 if (ret) 3990 return ret; 3991 3992 svc_rdy_ext->ext_hal_reg_done = true; 3993 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { 3994 svc_rdy_ext->mac_phy_chainmask_combo_done = true; 3995 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { 3996 svc_rdy_ext->mac_phy_chainmask_cap_done = true; 3997 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { 3998 svc_rdy_ext->oem_dma_ring_cap_done = true; 3999 } else if (!svc_rdy_ext->dma_ring_cap_done) { 4000 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 4001 &svc_rdy_ext->dma_caps_parse); 4002 if (ret) 4003 return ret; 4004 4005 svc_rdy_ext->dma_ring_cap_done = true; 4006 } 4007 break; 4008 4009 default: 4010 break; 4011 } 4012 return 0; 4013 } 4014 4015 static int ath12k_service_ready_ext_event(struct ath12k_base *ab, 4016 struct sk_buff *skb) 4017 { 4018 struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { }; 4019 int ret; 4020 4021 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4022 ath12k_wmi_svc_rdy_ext_parse, 4023 &svc_rdy_ext); 4024 if (ret) { 4025 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4026 goto err; 4027 } 4028 4029 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) 4030 complete(&ab->wmi_ab.service_ready); 4031 4032 kfree(svc_rdy_ext.mac_phy_caps); 4033 return 0; 4034 4035 err: 4036 ath12k_wmi_free_dbring_caps(ab); 4037 return ret; 4038 } 4039 4040 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, 4041 u16 tag, u16 len, 4042 const void *ptr, void *data) 4043 { 4044 struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; 4045 int ret; 4046 4047 switch (tag) { 4048 case WMI_TAG_ARRAY_STRUCT: 4049 if (!parse->dma_ring_cap_done) { 4050 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 4051 &parse->dma_caps_parse); 4052 if (ret) 4053 return ret; 4054 4055 parse->dma_ring_cap_done = true; 4056 } 4057 break; 4058 default: 4059 break; 4060 } 4061 4062 return 0; 4063 } 4064 4065 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab, 4066 struct sk_buff *skb) 4067 { 4068 struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { }; 4069 int ret; 4070 4071 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4072 ath12k_wmi_svc_rdy_ext2_parse, 4073 &svc_rdy_ext2); 4074 if (ret) { 4075 ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); 4076 goto err; 4077 } 4078 4079 complete(&ab->wmi_ab.service_ready); 4080 4081 return 0; 4082 4083 err: 4084 ath12k_wmi_free_dbring_caps(ab); 4085 return ret; 4086 } 4087 4088 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb, 4089 struct wmi_vdev_start_resp_event *vdev_rsp) 4090 { 4091 const void **tb; 4092 const struct wmi_vdev_start_resp_event *ev; 4093 int ret; 4094 4095 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4096 if (IS_ERR(tb)) { 4097 ret = PTR_ERR(tb); 4098 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4099 return ret; 4100 } 4101 4102 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; 4103 if (!ev) { 4104 ath12k_warn(ab, "failed to fetch vdev start resp ev"); 4105 kfree(tb); 4106 return -EPROTO; 4107 } 4108 4109 *vdev_rsp = *ev; 4110 4111 kfree(tb); 4112 return 0; 4113 } 4114 4115 static struct ath12k_reg_rule 4116 *create_ext_reg_rules_from_wmi(u32 num_reg_rules, 4117 struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule) 4118 { 4119 struct ath12k_reg_rule *reg_rule_ptr; 4120 u32 count; 4121 4122 reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), 4123 GFP_ATOMIC); 4124 4125 if (!reg_rule_ptr) 4126 return NULL; 4127 4128 for (count = 0; count < num_reg_rules; count++) { 4129 reg_rule_ptr[count].start_freq = 4130 le32_get_bits(wmi_reg_rule[count].freq_info, 4131 REG_RULE_START_FREQ); 4132 reg_rule_ptr[count].end_freq = 4133 le32_get_bits(wmi_reg_rule[count].freq_info, 4134 REG_RULE_END_FREQ); 4135 reg_rule_ptr[count].max_bw = 4136 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 4137 REG_RULE_MAX_BW); 4138 reg_rule_ptr[count].reg_power = 4139 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 4140 REG_RULE_REG_PWR); 4141 reg_rule_ptr[count].ant_gain = 4142 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 4143 REG_RULE_ANT_GAIN); 4144 reg_rule_ptr[count].flags = 4145 le32_get_bits(wmi_reg_rule[count].flag_info, 4146 REG_RULE_FLAGS); 4147 reg_rule_ptr[count].psd_flag = 4148 le32_get_bits(wmi_reg_rule[count].psd_power_info, 4149 REG_RULE_PSD_INFO); 4150 reg_rule_ptr[count].psd_eirp = 4151 le32_get_bits(wmi_reg_rule[count].psd_power_info, 4152 REG_RULE_PSD_EIRP); 4153 } 4154 4155 return reg_rule_ptr; 4156 } 4157 4158 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, 4159 struct sk_buff *skb, 4160 struct ath12k_reg_info *reg_info) 4161 { 4162 const void **tb; 4163 const struct wmi_reg_chan_list_cc_ext_event *ev; 4164 struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule; 4165 u32 num_2g_reg_rules, num_5g_reg_rules; 4166 u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; 4167 u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; 4168 u32 total_reg_rules = 0; 4169 int ret, i, j; 4170 4171 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); 4172 4173 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4174 if (IS_ERR(tb)) { 4175 ret = PTR_ERR(tb); 4176 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4177 return ret; 4178 } 4179 4180 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; 4181 if (!ev) { 4182 ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n"); 4183 kfree(tb); 4184 return -EPROTO; 4185 } 4186 4187 reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules); 4188 reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules); 4189 reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] = 4190 le32_to_cpu(ev->num_6g_reg_rules_ap_lpi); 4191 reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] = 4192 le32_to_cpu(ev->num_6g_reg_rules_ap_sp); 4193 reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] = 4194 le32_to_cpu(ev->num_6g_reg_rules_ap_vlp); 4195 4196 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 4197 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 4198 le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]); 4199 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 4200 le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]); 4201 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 4202 le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]); 4203 } 4204 4205 num_2g_reg_rules = reg_info->num_2g_reg_rules; 4206 total_reg_rules += num_2g_reg_rules; 4207 num_5g_reg_rules = reg_info->num_5g_reg_rules; 4208 total_reg_rules += num_5g_reg_rules; 4209 4210 if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) { 4211 ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n", 4212 num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES); 4213 kfree(tb); 4214 return -EINVAL; 4215 } 4216 4217 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 4218 num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i]; 4219 4220 if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) { 4221 ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n", 4222 i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES); 4223 kfree(tb); 4224 return -EINVAL; 4225 } 4226 4227 total_reg_rules += num_6g_reg_rules_ap[i]; 4228 } 4229 4230 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 4231 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 4232 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 4233 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 4234 4235 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 4236 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 4237 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 4238 4239 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 4240 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 4241 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 4242 4243 if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES || 4244 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES || 4245 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) { 4246 ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n", 4247 i); 4248 kfree(tb); 4249 return -EINVAL; 4250 } 4251 } 4252 4253 if (!total_reg_rules) { 4254 ath12k_warn(ab, "No reg rules available\n"); 4255 kfree(tb); 4256 return -EINVAL; 4257 } 4258 4259 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); 4260 4261 /* FIXME: Currently FW includes 6G reg rule also in 5G rule 4262 * list for country US. 4263 * Having same 6G reg rule in 5G and 6G rules list causes 4264 * intersect check to be true, and same rules will be shown 4265 * multiple times in iw cmd. So added hack below to avoid 4266 * parsing 6G rule from 5G reg rule list, and this can be 4267 * removed later, after FW updates to remove 6G reg rule 4268 * from 5G rules list. 4269 */ 4270 if (memcmp(reg_info->alpha2, "US", 2) == 0) { 4271 reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES; 4272 num_5g_reg_rules = reg_info->num_5g_reg_rules; 4273 } 4274 4275 reg_info->dfs_region = le32_to_cpu(ev->dfs_region); 4276 reg_info->phybitmap = le32_to_cpu(ev->phybitmap); 4277 reg_info->num_phy = le32_to_cpu(ev->num_phy); 4278 reg_info->phy_id = le32_to_cpu(ev->phy_id); 4279 reg_info->ctry_code = le32_to_cpu(ev->country_id); 4280 reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code); 4281 4282 switch (le32_to_cpu(ev->status_code)) { 4283 case WMI_REG_SET_CC_STATUS_PASS: 4284 reg_info->status_code = REG_SET_CC_STATUS_PASS; 4285 break; 4286 case WMI_REG_CURRENT_ALPHA2_NOT_FOUND: 4287 reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; 4288 break; 4289 case WMI_REG_INIT_ALPHA2_NOT_FOUND: 4290 reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; 4291 break; 4292 case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED: 4293 reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; 4294 break; 4295 case WMI_REG_SET_CC_STATUS_NO_MEMORY: 4296 reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; 4297 break; 4298 case WMI_REG_SET_CC_STATUS_FAIL: 4299 reg_info->status_code = REG_SET_CC_STATUS_FAIL; 4300 break; 4301 } 4302 4303 reg_info->is_ext_reg_event = true; 4304 4305 reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g); 4306 reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g); 4307 reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g); 4308 reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g); 4309 reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi); 4310 reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi); 4311 reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp); 4312 reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp); 4313 reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp); 4314 reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp); 4315 4316 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 4317 reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] = 4318 le32_to_cpu(ev->min_bw_6g_client_lpi[i]); 4319 reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] = 4320 le32_to_cpu(ev->max_bw_6g_client_lpi[i]); 4321 reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 4322 le32_to_cpu(ev->min_bw_6g_client_sp[i]); 4323 reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 4324 le32_to_cpu(ev->max_bw_6g_client_sp[i]); 4325 reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] = 4326 le32_to_cpu(ev->min_bw_6g_client_vlp[i]); 4327 reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] = 4328 le32_to_cpu(ev->max_bw_6g_client_vlp[i]); 4329 } 4330 4331 ath12k_dbg(ab, ATH12K_DBG_WMI, 4332 "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", 4333 __func__, reg_info->alpha2, reg_info->dfs_region, 4334 reg_info->min_bw_2g, reg_info->max_bw_2g, 4335 reg_info->min_bw_5g, reg_info->max_bw_5g); 4336 4337 ath12k_dbg(ab, ATH12K_DBG_WMI, 4338 "num_2g_reg_rules %d num_5g_reg_rules %d", 4339 num_2g_reg_rules, num_5g_reg_rules); 4340 4341 ath12k_dbg(ab, ATH12K_DBG_WMI, 4342 "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d", 4343 num_6g_reg_rules_ap[WMI_REG_INDOOR_AP], 4344 num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP], 4345 num_6g_reg_rules_ap[WMI_REG_VLP_AP]); 4346 4347 ath12k_dbg(ab, ATH12K_DBG_WMI, 4348 "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 4349 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT], 4350 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT], 4351 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]); 4352 4353 ath12k_dbg(ab, ATH12K_DBG_WMI, 4354 "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 4355 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT], 4356 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT], 4357 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]); 4358 4359 ext_wmi_reg_rule = 4360 (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev 4361 + sizeof(*ev) 4362 + sizeof(struct wmi_tlv)); 4363 4364 if (num_2g_reg_rules) { 4365 reg_info->reg_rules_2g_ptr = 4366 create_ext_reg_rules_from_wmi(num_2g_reg_rules, 4367 ext_wmi_reg_rule); 4368 4369 if (!reg_info->reg_rules_2g_ptr) { 4370 kfree(tb); 4371 ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n"); 4372 return -ENOMEM; 4373 } 4374 } 4375 4376 if (num_5g_reg_rules) { 4377 ext_wmi_reg_rule += num_2g_reg_rules; 4378 reg_info->reg_rules_5g_ptr = 4379 create_ext_reg_rules_from_wmi(num_5g_reg_rules, 4380 ext_wmi_reg_rule); 4381 4382 if (!reg_info->reg_rules_5g_ptr) { 4383 kfree(tb); 4384 ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n"); 4385 return -ENOMEM; 4386 } 4387 } 4388 4389 ext_wmi_reg_rule += num_5g_reg_rules; 4390 4391 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 4392 reg_info->reg_rules_6g_ap_ptr[i] = 4393 create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i], 4394 ext_wmi_reg_rule); 4395 4396 if (!reg_info->reg_rules_6g_ap_ptr[i]) { 4397 kfree(tb); 4398 ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n"); 4399 return -ENOMEM; 4400 } 4401 4402 ext_wmi_reg_rule += num_6g_reg_rules_ap[i]; 4403 } 4404 4405 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { 4406 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 4407 reg_info->reg_rules_6g_client_ptr[j][i] = 4408 create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i], 4409 ext_wmi_reg_rule); 4410 4411 if (!reg_info->reg_rules_6g_client_ptr[j][i]) { 4412 kfree(tb); 4413 ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n"); 4414 return -ENOMEM; 4415 } 4416 4417 ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i]; 4418 } 4419 } 4420 4421 reg_info->client_type = le32_to_cpu(ev->client_type); 4422 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; 4423 reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; 4424 reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] = 4425 le32_to_cpu(ev->domain_code_6g_ap_lpi); 4426 reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] = 4427 le32_to_cpu(ev->domain_code_6g_ap_sp); 4428 reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] = 4429 le32_to_cpu(ev->domain_code_6g_ap_vlp); 4430 4431 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 4432 reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] = 4433 le32_to_cpu(ev->domain_code_6g_client_lpi[i]); 4434 reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] = 4435 le32_to_cpu(ev->domain_code_6g_client_sp[i]); 4436 reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] = 4437 le32_to_cpu(ev->domain_code_6g_client_vlp[i]); 4438 } 4439 4440 reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id); 4441 4442 ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d", 4443 reg_info->client_type, reg_info->domain_code_6g_super_id); 4444 4445 ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n"); 4446 4447 kfree(tb); 4448 return 0; 4449 } 4450 4451 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb, 4452 struct wmi_peer_delete_resp_event *peer_del_resp) 4453 { 4454 const void **tb; 4455 const struct wmi_peer_delete_resp_event *ev; 4456 int ret; 4457 4458 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4459 if (IS_ERR(tb)) { 4460 ret = PTR_ERR(tb); 4461 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4462 return ret; 4463 } 4464 4465 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; 4466 if (!ev) { 4467 ath12k_warn(ab, "failed to fetch peer delete resp ev"); 4468 kfree(tb); 4469 return -EPROTO; 4470 } 4471 4472 memset(peer_del_resp, 0, sizeof(*peer_del_resp)); 4473 4474 peer_del_resp->vdev_id = ev->vdev_id; 4475 ether_addr_copy(peer_del_resp->peer_macaddr.addr, 4476 ev->peer_macaddr.addr); 4477 4478 kfree(tb); 4479 return 0; 4480 } 4481 4482 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, 4483 struct sk_buff *skb, 4484 u32 *vdev_id) 4485 { 4486 const void **tb; 4487 const struct wmi_vdev_delete_resp_event *ev; 4488 int ret; 4489 4490 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4491 if (IS_ERR(tb)) { 4492 ret = PTR_ERR(tb); 4493 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4494 return ret; 4495 } 4496 4497 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; 4498 if (!ev) { 4499 ath12k_warn(ab, "failed to fetch vdev delete resp ev"); 4500 kfree(tb); 4501 return -EPROTO; 4502 } 4503 4504 *vdev_id = le32_to_cpu(ev->vdev_id); 4505 4506 kfree(tb); 4507 return 0; 4508 } 4509 4510 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf, 4511 u32 len, u32 *vdev_id, 4512 u32 *tx_status) 4513 { 4514 const void **tb; 4515 const struct wmi_bcn_tx_status_event *ev; 4516 int ret; 4517 4518 tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 4519 if (IS_ERR(tb)) { 4520 ret = PTR_ERR(tb); 4521 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4522 return ret; 4523 } 4524 4525 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; 4526 if (!ev) { 4527 ath12k_warn(ab, "failed to fetch bcn tx status ev"); 4528 kfree(tb); 4529 return -EPROTO; 4530 } 4531 4532 *vdev_id = le32_to_cpu(ev->vdev_id); 4533 *tx_status = le32_to_cpu(ev->tx_status); 4534 4535 kfree(tb); 4536 return 0; 4537 } 4538 4539 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb, 4540 u32 *vdev_id) 4541 { 4542 const void **tb; 4543 const struct wmi_vdev_stopped_event *ev; 4544 int ret; 4545 4546 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4547 if (IS_ERR(tb)) { 4548 ret = PTR_ERR(tb); 4549 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4550 return ret; 4551 } 4552 4553 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; 4554 if (!ev) { 4555 ath12k_warn(ab, "failed to fetch vdev stop ev"); 4556 kfree(tb); 4557 return -EPROTO; 4558 } 4559 4560 *vdev_id = le32_to_cpu(ev->vdev_id); 4561 4562 kfree(tb); 4563 return 0; 4564 } 4565 4566 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab, 4567 u16 tag, u16 len, 4568 const void *ptr, void *data) 4569 { 4570 struct wmi_tlv_mgmt_rx_parse *parse = data; 4571 4572 switch (tag) { 4573 case WMI_TAG_MGMT_RX_HDR: 4574 parse->fixed = ptr; 4575 break; 4576 case WMI_TAG_ARRAY_BYTE: 4577 if (!parse->frame_buf_done) { 4578 parse->frame_buf = ptr; 4579 parse->frame_buf_done = true; 4580 } 4581 break; 4582 } 4583 return 0; 4584 } 4585 4586 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab, 4587 struct sk_buff *skb, 4588 struct ath12k_wmi_mgmt_rx_arg *hdr) 4589 { 4590 struct wmi_tlv_mgmt_rx_parse parse = { }; 4591 const struct ath12k_wmi_mgmt_rx_params *ev; 4592 const u8 *frame; 4593 int i, ret; 4594 4595 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4596 ath12k_wmi_tlv_mgmt_rx_parse, 4597 &parse); 4598 if (ret) { 4599 ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); 4600 return ret; 4601 } 4602 4603 ev = parse.fixed; 4604 frame = parse.frame_buf; 4605 4606 if (!ev || !frame) { 4607 ath12k_warn(ab, "failed to fetch mgmt rx hdr"); 4608 return -EPROTO; 4609 } 4610 4611 hdr->pdev_id = le32_to_cpu(ev->pdev_id); 4612 hdr->chan_freq = le32_to_cpu(ev->chan_freq); 4613 hdr->channel = le32_to_cpu(ev->channel); 4614 hdr->snr = le32_to_cpu(ev->snr); 4615 hdr->rate = le32_to_cpu(ev->rate); 4616 hdr->phy_mode = le32_to_cpu(ev->phy_mode); 4617 hdr->buf_len = le32_to_cpu(ev->buf_len); 4618 hdr->status = le32_to_cpu(ev->status); 4619 hdr->flags = le32_to_cpu(ev->flags); 4620 hdr->rssi = a_sle32_to_cpu(ev->rssi); 4621 hdr->tsf_delta = le32_to_cpu(ev->tsf_delta); 4622 4623 for (i = 0; i < ATH_MAX_ANTENNA; i++) 4624 hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]); 4625 4626 if (skb->len < (frame - skb->data) + hdr->buf_len) { 4627 ath12k_warn(ab, "invalid length in mgmt rx hdr ev"); 4628 return -EPROTO; 4629 } 4630 4631 /* shift the sk_buff to point to `frame` */ 4632 skb_trim(skb, 0); 4633 skb_put(skb, frame - skb->data); 4634 skb_pull(skb, frame - skb->data); 4635 skb_put(skb, hdr->buf_len); 4636 4637 return 0; 4638 } 4639 4640 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, 4641 u32 status) 4642 { 4643 struct sk_buff *msdu; 4644 struct ieee80211_tx_info *info; 4645 struct ath12k_skb_cb *skb_cb; 4646 int num_mgmt; 4647 4648 spin_lock_bh(&ar->txmgmt_idr_lock); 4649 msdu = idr_find(&ar->txmgmt_idr, desc_id); 4650 4651 if (!msdu) { 4652 ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", 4653 desc_id); 4654 spin_unlock_bh(&ar->txmgmt_idr_lock); 4655 return -ENOENT; 4656 } 4657 4658 idr_remove(&ar->txmgmt_idr, desc_id); 4659 spin_unlock_bh(&ar->txmgmt_idr_lock); 4660 4661 skb_cb = ATH12K_SKB_CB(msdu); 4662 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 4663 4664 info = IEEE80211_SKB_CB(msdu); 4665 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) 4666 info->flags |= IEEE80211_TX_STAT_ACK; 4667 4668 ieee80211_tx_status_irqsafe(ar->hw, msdu); 4669 4670 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 4671 4672 /* WARN when we received this event without doing any mgmt tx */ 4673 if (num_mgmt < 0) 4674 WARN_ON_ONCE(1); 4675 4676 if (!num_mgmt) 4677 wake_up(&ar->txmgmt_empty_waitq); 4678 4679 return 0; 4680 } 4681 4682 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab, 4683 struct sk_buff *skb, 4684 struct wmi_mgmt_tx_compl_event *param) 4685 { 4686 const void **tb; 4687 const struct wmi_mgmt_tx_compl_event *ev; 4688 int ret; 4689 4690 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4691 if (IS_ERR(tb)) { 4692 ret = PTR_ERR(tb); 4693 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4694 return ret; 4695 } 4696 4697 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; 4698 if (!ev) { 4699 ath12k_warn(ab, "failed to fetch mgmt tx compl ev"); 4700 kfree(tb); 4701 return -EPROTO; 4702 } 4703 4704 param->pdev_id = ev->pdev_id; 4705 param->desc_id = ev->desc_id; 4706 param->status = ev->status; 4707 4708 kfree(tb); 4709 return 0; 4710 } 4711 4712 static void ath12k_wmi_event_scan_started(struct ath12k *ar) 4713 { 4714 lockdep_assert_held(&ar->data_lock); 4715 4716 switch (ar->scan.state) { 4717 case ATH12K_SCAN_IDLE: 4718 case ATH12K_SCAN_RUNNING: 4719 case ATH12K_SCAN_ABORTING: 4720 ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", 4721 ath12k_scan_state_str(ar->scan.state), 4722 ar->scan.state); 4723 break; 4724 case ATH12K_SCAN_STARTING: 4725 ar->scan.state = ATH12K_SCAN_RUNNING; 4726 complete(&ar->scan.started); 4727 break; 4728 } 4729 } 4730 4731 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar) 4732 { 4733 lockdep_assert_held(&ar->data_lock); 4734 4735 switch (ar->scan.state) { 4736 case ATH12K_SCAN_IDLE: 4737 case ATH12K_SCAN_RUNNING: 4738 case ATH12K_SCAN_ABORTING: 4739 ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", 4740 ath12k_scan_state_str(ar->scan.state), 4741 ar->scan.state); 4742 break; 4743 case ATH12K_SCAN_STARTING: 4744 complete(&ar->scan.started); 4745 __ath12k_mac_scan_finish(ar); 4746 break; 4747 } 4748 } 4749 4750 static void ath12k_wmi_event_scan_completed(struct ath12k *ar) 4751 { 4752 lockdep_assert_held(&ar->data_lock); 4753 4754 switch (ar->scan.state) { 4755 case ATH12K_SCAN_IDLE: 4756 case ATH12K_SCAN_STARTING: 4757 /* One suspected reason scan can be completed while starting is 4758 * if firmware fails to deliver all scan events to the host, 4759 * e.g. when transport pipe is full. This has been observed 4760 * with spectral scan phyerr events starving wmi transport 4761 * pipe. In such case the "scan completed" event should be (and 4762 * is) ignored by the host as it may be just firmware's scan 4763 * state machine recovering. 4764 */ 4765 ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", 4766 ath12k_scan_state_str(ar->scan.state), 4767 ar->scan.state); 4768 break; 4769 case ATH12K_SCAN_RUNNING: 4770 case ATH12K_SCAN_ABORTING: 4771 __ath12k_mac_scan_finish(ar); 4772 break; 4773 } 4774 } 4775 4776 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar) 4777 { 4778 lockdep_assert_held(&ar->data_lock); 4779 4780 switch (ar->scan.state) { 4781 case ATH12K_SCAN_IDLE: 4782 case ATH12K_SCAN_STARTING: 4783 ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", 4784 ath12k_scan_state_str(ar->scan.state), 4785 ar->scan.state); 4786 break; 4787 case ATH12K_SCAN_RUNNING: 4788 case ATH12K_SCAN_ABORTING: 4789 ar->scan_channel = NULL; 4790 break; 4791 } 4792 } 4793 4794 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) 4795 { 4796 lockdep_assert_held(&ar->data_lock); 4797 4798 switch (ar->scan.state) { 4799 case ATH12K_SCAN_IDLE: 4800 case ATH12K_SCAN_STARTING: 4801 ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", 4802 ath12k_scan_state_str(ar->scan.state), 4803 ar->scan.state); 4804 break; 4805 case ATH12K_SCAN_RUNNING: 4806 case ATH12K_SCAN_ABORTING: 4807 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); 4808 break; 4809 } 4810 } 4811 4812 static const char * 4813 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type, 4814 enum wmi_scan_completion_reason reason) 4815 { 4816 switch (type) { 4817 case WMI_SCAN_EVENT_STARTED: 4818 return "started"; 4819 case WMI_SCAN_EVENT_COMPLETED: 4820 switch (reason) { 4821 case WMI_SCAN_REASON_COMPLETED: 4822 return "completed"; 4823 case WMI_SCAN_REASON_CANCELLED: 4824 return "completed [cancelled]"; 4825 case WMI_SCAN_REASON_PREEMPTED: 4826 return "completed [preempted]"; 4827 case WMI_SCAN_REASON_TIMEDOUT: 4828 return "completed [timedout]"; 4829 case WMI_SCAN_REASON_INTERNAL_FAILURE: 4830 return "completed [internal err]"; 4831 case WMI_SCAN_REASON_MAX: 4832 break; 4833 } 4834 return "completed [unknown]"; 4835 case WMI_SCAN_EVENT_BSS_CHANNEL: 4836 return "bss channel"; 4837 case WMI_SCAN_EVENT_FOREIGN_CHAN: 4838 return "foreign channel"; 4839 case WMI_SCAN_EVENT_DEQUEUED: 4840 return "dequeued"; 4841 case WMI_SCAN_EVENT_PREEMPTED: 4842 return "preempted"; 4843 case WMI_SCAN_EVENT_START_FAILED: 4844 return "start failed"; 4845 case WMI_SCAN_EVENT_RESTARTED: 4846 return "restarted"; 4847 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 4848 return "foreign channel exit"; 4849 default: 4850 return "unknown"; 4851 } 4852 } 4853 4854 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb, 4855 struct wmi_scan_event *scan_evt_param) 4856 { 4857 const void **tb; 4858 const struct wmi_scan_event *ev; 4859 int ret; 4860 4861 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4862 if (IS_ERR(tb)) { 4863 ret = PTR_ERR(tb); 4864 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4865 return ret; 4866 } 4867 4868 ev = tb[WMI_TAG_SCAN_EVENT]; 4869 if (!ev) { 4870 ath12k_warn(ab, "failed to fetch scan ev"); 4871 kfree(tb); 4872 return -EPROTO; 4873 } 4874 4875 scan_evt_param->event_type = ev->event_type; 4876 scan_evt_param->reason = ev->reason; 4877 scan_evt_param->channel_freq = ev->channel_freq; 4878 scan_evt_param->scan_req_id = ev->scan_req_id; 4879 scan_evt_param->scan_id = ev->scan_id; 4880 scan_evt_param->vdev_id = ev->vdev_id; 4881 scan_evt_param->tsf_timestamp = ev->tsf_timestamp; 4882 4883 kfree(tb); 4884 return 0; 4885 } 4886 4887 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb, 4888 struct wmi_peer_sta_kickout_arg *arg) 4889 { 4890 const void **tb; 4891 const struct wmi_peer_sta_kickout_event *ev; 4892 int ret; 4893 4894 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4895 if (IS_ERR(tb)) { 4896 ret = PTR_ERR(tb); 4897 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4898 return ret; 4899 } 4900 4901 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; 4902 if (!ev) { 4903 ath12k_warn(ab, "failed to fetch peer sta kickout ev"); 4904 kfree(tb); 4905 return -EPROTO; 4906 } 4907 4908 arg->mac_addr = ev->peer_macaddr.addr; 4909 4910 kfree(tb); 4911 return 0; 4912 } 4913 4914 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, 4915 struct wmi_roam_event *roam_ev) 4916 { 4917 const void **tb; 4918 const struct wmi_roam_event *ev; 4919 int ret; 4920 4921 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4922 if (IS_ERR(tb)) { 4923 ret = PTR_ERR(tb); 4924 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4925 return ret; 4926 } 4927 4928 ev = tb[WMI_TAG_ROAM_EVENT]; 4929 if (!ev) { 4930 ath12k_warn(ab, "failed to fetch roam ev"); 4931 kfree(tb); 4932 return -EPROTO; 4933 } 4934 4935 roam_ev->vdev_id = ev->vdev_id; 4936 roam_ev->reason = ev->reason; 4937 roam_ev->rssi = ev->rssi; 4938 4939 kfree(tb); 4940 return 0; 4941 } 4942 4943 static int freq_to_idx(struct ath12k *ar, int freq) 4944 { 4945 struct ieee80211_supported_band *sband; 4946 int band, ch, idx = 0; 4947 4948 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 4949 if (!ar->mac.sbands[band].channels) 4950 continue; 4951 4952 sband = ar->hw->wiphy->bands[band]; 4953 if (!sband) 4954 continue; 4955 4956 for (ch = 0; ch < sband->n_channels; ch++, idx++) 4957 if (sband->channels[ch].center_freq == freq) 4958 goto exit; 4959 } 4960 4961 exit: 4962 return idx; 4963 } 4964 4965 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf, 4966 u32 len, struct wmi_chan_info_event *ch_info_ev) 4967 { 4968 const void **tb; 4969 const struct wmi_chan_info_event *ev; 4970 int ret; 4971 4972 tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 4973 if (IS_ERR(tb)) { 4974 ret = PTR_ERR(tb); 4975 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 4976 return ret; 4977 } 4978 4979 ev = tb[WMI_TAG_CHAN_INFO_EVENT]; 4980 if (!ev) { 4981 ath12k_warn(ab, "failed to fetch chan info ev"); 4982 kfree(tb); 4983 return -EPROTO; 4984 } 4985 4986 ch_info_ev->err_code = ev->err_code; 4987 ch_info_ev->freq = ev->freq; 4988 ch_info_ev->cmd_flags = ev->cmd_flags; 4989 ch_info_ev->noise_floor = ev->noise_floor; 4990 ch_info_ev->rx_clear_count = ev->rx_clear_count; 4991 ch_info_ev->cycle_count = ev->cycle_count; 4992 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; 4993 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; 4994 ch_info_ev->rx_frame_count = ev->rx_frame_count; 4995 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; 4996 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; 4997 ch_info_ev->vdev_id = ev->vdev_id; 4998 4999 kfree(tb); 5000 return 0; 5001 } 5002 5003 static int 5004 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 5005 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) 5006 { 5007 const void **tb; 5008 const struct wmi_pdev_bss_chan_info_event *ev; 5009 int ret; 5010 5011 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5012 if (IS_ERR(tb)) { 5013 ret = PTR_ERR(tb); 5014 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5015 return ret; 5016 } 5017 5018 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; 5019 if (!ev) { 5020 ath12k_warn(ab, "failed to fetch pdev bss chan info ev"); 5021 kfree(tb); 5022 return -EPROTO; 5023 } 5024 5025 bss_ch_info_ev->pdev_id = ev->pdev_id; 5026 bss_ch_info_ev->freq = ev->freq; 5027 bss_ch_info_ev->noise_floor = ev->noise_floor; 5028 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; 5029 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; 5030 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; 5031 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; 5032 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; 5033 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; 5034 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; 5035 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; 5036 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; 5037 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; 5038 5039 kfree(tb); 5040 return 0; 5041 } 5042 5043 static int 5044 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb, 5045 struct wmi_vdev_install_key_complete_arg *arg) 5046 { 5047 const void **tb; 5048 const struct wmi_vdev_install_key_compl_event *ev; 5049 int ret; 5050 5051 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5052 if (IS_ERR(tb)) { 5053 ret = PTR_ERR(tb); 5054 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5055 return ret; 5056 } 5057 5058 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; 5059 if (!ev) { 5060 ath12k_warn(ab, "failed to fetch vdev install key compl ev"); 5061 kfree(tb); 5062 return -EPROTO; 5063 } 5064 5065 arg->vdev_id = le32_to_cpu(ev->vdev_id); 5066 arg->macaddr = ev->peer_macaddr.addr; 5067 arg->key_idx = le32_to_cpu(ev->key_idx); 5068 arg->key_flags = le32_to_cpu(ev->key_flags); 5069 arg->status = le32_to_cpu(ev->status); 5070 5071 kfree(tb); 5072 return 0; 5073 } 5074 5075 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb, 5076 struct wmi_peer_assoc_conf_arg *peer_assoc_conf) 5077 { 5078 const void **tb; 5079 const struct wmi_peer_assoc_conf_event *ev; 5080 int ret; 5081 5082 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5083 if (IS_ERR(tb)) { 5084 ret = PTR_ERR(tb); 5085 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5086 return ret; 5087 } 5088 5089 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; 5090 if (!ev) { 5091 ath12k_warn(ab, "failed to fetch peer assoc conf ev"); 5092 kfree(tb); 5093 return -EPROTO; 5094 } 5095 5096 peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id); 5097 peer_assoc_conf->macaddr = ev->peer_macaddr.addr; 5098 5099 kfree(tb); 5100 return 0; 5101 } 5102 5103 static int 5104 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf, 5105 u32 len, const struct wmi_pdev_temperature_event *ev) 5106 { 5107 const void **tb; 5108 int ret; 5109 5110 tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 5111 if (IS_ERR(tb)) { 5112 ret = PTR_ERR(tb); 5113 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5114 return ret; 5115 } 5116 5117 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; 5118 if (!ev) { 5119 ath12k_warn(ab, "failed to fetch pdev temp ev"); 5120 kfree(tb); 5121 return -EPROTO; 5122 } 5123 5124 kfree(tb); 5125 return 0; 5126 } 5127 5128 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab) 5129 { 5130 /* try to send pending beacons first. they take priority */ 5131 wake_up(&ab->wmi_ab.tx_credits_wq); 5132 } 5133 5134 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, 5135 struct sk_buff *skb) 5136 { 5137 dev_kfree_skb(skb); 5138 } 5139 5140 static bool ath12k_reg_is_world_alpha(char *alpha) 5141 { 5142 return alpha[0] == '0' && alpha[1] == '0'; 5143 } 5144 5145 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) 5146 { 5147 struct ath12k_reg_info *reg_info = NULL; 5148 struct ieee80211_regdomain *regd = NULL; 5149 bool intersect = false; 5150 int ret = 0, pdev_idx, i, j; 5151 struct ath12k *ar; 5152 5153 reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); 5154 if (!reg_info) { 5155 ret = -ENOMEM; 5156 goto fallback; 5157 } 5158 5159 ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); 5160 5161 if (ret) { 5162 ath12k_warn(ab, "failed to extract regulatory info from received event\n"); 5163 goto fallback; 5164 } 5165 5166 if (reg_info->status_code != REG_SET_CC_STATUS_PASS) { 5167 /* In case of failure to set the requested ctry, 5168 * fw retains the current regd. We print a failure info 5169 * and return from here. 5170 */ 5171 ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n"); 5172 goto mem_free; 5173 } 5174 5175 pdev_idx = reg_info->phy_id; 5176 5177 if (pdev_idx >= ab->num_radios) { 5178 /* Process the event for phy0 only if single_pdev_only 5179 * is true. If pdev_idx is valid but not 0, discard the 5180 * event. Otherwise, it goes to fallback. 5181 */ 5182 if (ab->hw_params->single_pdev_only && 5183 pdev_idx < ab->hw_params->num_rxmda_per_pdev) 5184 goto mem_free; 5185 else 5186 goto fallback; 5187 } 5188 5189 /* Avoid multiple overwrites to default regd, during core 5190 * stop-start after mac registration. 5191 */ 5192 if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && 5193 !memcmp(ab->default_regd[pdev_idx]->alpha2, 5194 reg_info->alpha2, 2)) 5195 goto mem_free; 5196 5197 /* Intersect new rules with default regd if a new country setting was 5198 * requested, i.e a default regd was already set during initialization 5199 * and the regd coming from this event has a valid country info. 5200 */ 5201 if (ab->default_regd[pdev_idx] && 5202 !ath12k_reg_is_world_alpha((char *) 5203 ab->default_regd[pdev_idx]->alpha2) && 5204 !ath12k_reg_is_world_alpha((char *)reg_info->alpha2)) 5205 intersect = true; 5206 5207 regd = ath12k_reg_build_regd(ab, reg_info, intersect); 5208 if (!regd) { 5209 ath12k_warn(ab, "failed to build regd from reg_info\n"); 5210 goto fallback; 5211 } 5212 5213 spin_lock(&ab->base_lock); 5214 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { 5215 /* Once mac is registered, ar is valid and all CC events from 5216 * fw is considered to be received due to user requests 5217 * currently. 5218 * Free previously built regd before assigning the newly 5219 * generated regd to ar. NULL pointer handling will be 5220 * taken care by kfree itself. 5221 */ 5222 ar = ab->pdevs[pdev_idx].ar; 5223 kfree(ab->new_regd[pdev_idx]); 5224 ab->new_regd[pdev_idx] = regd; 5225 ieee80211_queue_work(ar->hw, &ar->regd_update_work); 5226 } else { 5227 /* Multiple events for the same *ar is not expected. But we 5228 * can still clear any previously stored default_regd if we 5229 * are receiving this event for the same radio by mistake. 5230 * NULL pointer handling will be taken care by kfree itself. 5231 */ 5232 kfree(ab->default_regd[pdev_idx]); 5233 /* This regd would be applied during mac registration */ 5234 ab->default_regd[pdev_idx] = regd; 5235 } 5236 ab->dfs_region = reg_info->dfs_region; 5237 spin_unlock(&ab->base_lock); 5238 5239 goto mem_free; 5240 5241 fallback: 5242 /* Fallback to older reg (by sending previous country setting 5243 * again if fw has succeeded and we failed to process here. 5244 * The Regdomain should be uniform across driver and fw. Since the 5245 * FW has processed the command and sent a success status, we expect 5246 * this function to succeed as well. If it doesn't, CTRY needs to be 5247 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. 5248 */ 5249 /* TODO: This is rare, but still should also be handled */ 5250 WARN_ON(1); 5251 mem_free: 5252 if (reg_info) { 5253 kfree(reg_info->reg_rules_2g_ptr); 5254 kfree(reg_info->reg_rules_5g_ptr); 5255 if (reg_info->is_ext_reg_event) { 5256 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) 5257 kfree(reg_info->reg_rules_6g_ap_ptr[i]); 5258 5259 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) 5260 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) 5261 kfree(reg_info->reg_rules_6g_client_ptr[j][i]); 5262 } 5263 kfree(reg_info); 5264 } 5265 return ret; 5266 } 5267 5268 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 5269 const void *ptr, void *data) 5270 { 5271 struct ath12k_wmi_rdy_parse *rdy_parse = data; 5272 struct wmi_ready_event fixed_param; 5273 struct ath12k_wmi_mac_addr_params *addr_list; 5274 struct ath12k_pdev *pdev; 5275 u32 num_mac_addr; 5276 int i; 5277 5278 switch (tag) { 5279 case WMI_TAG_READY_EVENT: 5280 memset(&fixed_param, 0, sizeof(fixed_param)); 5281 memcpy(&fixed_param, (struct wmi_ready_event *)ptr, 5282 min_t(u16, sizeof(fixed_param), len)); 5283 ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status); 5284 rdy_parse->num_extra_mac_addr = 5285 le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr); 5286 5287 ether_addr_copy(ab->mac_addr, 5288 fixed_param.ready_event_min.mac_addr.addr); 5289 ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum); 5290 ab->wmi_ready = true; 5291 break; 5292 case WMI_TAG_ARRAY_FIXED_STRUCT: 5293 addr_list = (struct ath12k_wmi_mac_addr_params *)ptr; 5294 num_mac_addr = rdy_parse->num_extra_mac_addr; 5295 5296 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) 5297 break; 5298 5299 for (i = 0; i < ab->num_radios; i++) { 5300 pdev = &ab->pdevs[i]; 5301 ether_addr_copy(pdev->mac_addr, addr_list[i].addr); 5302 } 5303 ab->pdevs_macaddr_valid = true; 5304 break; 5305 default: 5306 break; 5307 } 5308 5309 return 0; 5310 } 5311 5312 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 5313 { 5314 struct ath12k_wmi_rdy_parse rdy_parse = { }; 5315 int ret; 5316 5317 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 5318 ath12k_wmi_rdy_parse, &rdy_parse); 5319 if (ret) { 5320 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 5321 return ret; 5322 } 5323 5324 complete(&ab->wmi_ab.unified_ready); 5325 return 0; 5326 } 5327 5328 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 5329 { 5330 struct wmi_peer_delete_resp_event peer_del_resp; 5331 struct ath12k *ar; 5332 5333 if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { 5334 ath12k_warn(ab, "failed to extract peer delete resp"); 5335 return; 5336 } 5337 5338 rcu_read_lock(); 5339 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id)); 5340 if (!ar) { 5341 ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d", 5342 peer_del_resp.vdev_id); 5343 rcu_read_unlock(); 5344 return; 5345 } 5346 5347 complete(&ar->peer_delete_done); 5348 rcu_read_unlock(); 5349 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", 5350 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); 5351 } 5352 5353 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab, 5354 struct sk_buff *skb) 5355 { 5356 struct ath12k *ar; 5357 u32 vdev_id = 0; 5358 5359 if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { 5360 ath12k_warn(ab, "failed to extract vdev delete resp"); 5361 return; 5362 } 5363 5364 rcu_read_lock(); 5365 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 5366 if (!ar) { 5367 ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d", 5368 vdev_id); 5369 rcu_read_unlock(); 5370 return; 5371 } 5372 5373 complete(&ar->vdev_delete_done); 5374 5375 rcu_read_unlock(); 5376 5377 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n", 5378 vdev_id); 5379 } 5380 5381 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status) 5382 { 5383 switch (vdev_resp_status) { 5384 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: 5385 return "invalid vdev id"; 5386 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: 5387 return "not supported"; 5388 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: 5389 return "dfs violation"; 5390 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: 5391 return "invalid regdomain"; 5392 default: 5393 return "unknown"; 5394 } 5395 } 5396 5397 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 5398 { 5399 struct wmi_vdev_start_resp_event vdev_start_resp; 5400 struct ath12k *ar; 5401 u32 status; 5402 5403 if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { 5404 ath12k_warn(ab, "failed to extract vdev start resp"); 5405 return; 5406 } 5407 5408 rcu_read_lock(); 5409 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id)); 5410 if (!ar) { 5411 ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d", 5412 vdev_start_resp.vdev_id); 5413 rcu_read_unlock(); 5414 return; 5415 } 5416 5417 ar->last_wmi_vdev_start_status = 0; 5418 5419 status = le32_to_cpu(vdev_start_resp.status); 5420 5421 if (WARN_ON_ONCE(status)) { 5422 ath12k_warn(ab, "vdev start resp error status %d (%s)\n", 5423 status, ath12k_wmi_vdev_resp_print(status)); 5424 ar->last_wmi_vdev_start_status = status; 5425 } 5426 5427 complete(&ar->vdev_setup_done); 5428 5429 rcu_read_unlock(); 5430 5431 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d", 5432 vdev_start_resp.vdev_id); 5433 } 5434 5435 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb) 5436 { 5437 u32 vdev_id, tx_status; 5438 5439 if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, 5440 &vdev_id, &tx_status) != 0) { 5441 ath12k_warn(ab, "failed to extract bcn tx status"); 5442 return; 5443 } 5444 } 5445 5446 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb) 5447 { 5448 struct ath12k *ar; 5449 u32 vdev_id = 0; 5450 5451 if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { 5452 ath12k_warn(ab, "failed to extract vdev stopped event"); 5453 return; 5454 } 5455 5456 rcu_read_lock(); 5457 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 5458 if (!ar) { 5459 ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d", 5460 vdev_id); 5461 rcu_read_unlock(); 5462 return; 5463 } 5464 5465 complete(&ar->vdev_setup_done); 5466 5467 rcu_read_unlock(); 5468 5469 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); 5470 } 5471 5472 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) 5473 { 5474 struct ath12k_wmi_mgmt_rx_arg rx_ev = {0}; 5475 struct ath12k *ar; 5476 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5477 struct ieee80211_hdr *hdr; 5478 u16 fc; 5479 struct ieee80211_supported_band *sband; 5480 5481 if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { 5482 ath12k_warn(ab, "failed to extract mgmt rx event"); 5483 dev_kfree_skb(skb); 5484 return; 5485 } 5486 5487 memset(status, 0, sizeof(*status)); 5488 5489 ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n", 5490 rx_ev.status); 5491 5492 rcu_read_lock(); 5493 ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); 5494 5495 if (!ar) { 5496 ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", 5497 rx_ev.pdev_id); 5498 dev_kfree_skb(skb); 5499 goto exit; 5500 } 5501 5502 if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) || 5503 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | 5504 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | 5505 WMI_RX_STATUS_ERR_CRC))) { 5506 dev_kfree_skb(skb); 5507 goto exit; 5508 } 5509 5510 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) 5511 status->flag |= RX_FLAG_MMIC_ERROR; 5512 5513 if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) { 5514 status->band = NL80211_BAND_6GHZ; 5515 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { 5516 status->band = NL80211_BAND_2GHZ; 5517 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) { 5518 status->band = NL80211_BAND_5GHZ; 5519 } else { 5520 /* Shouldn't happen unless list of advertised channels to 5521 * mac80211 has been changed. 5522 */ 5523 WARN_ON_ONCE(1); 5524 dev_kfree_skb(skb); 5525 goto exit; 5526 } 5527 5528 if (rx_ev.phy_mode == MODE_11B && 5529 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) 5530 ath12k_dbg(ab, ATH12K_DBG_WMI, 5531 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); 5532 5533 sband = &ar->mac.sbands[status->band]; 5534 5535 status->freq = ieee80211_channel_to_frequency(rx_ev.channel, 5536 status->band); 5537 status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR; 5538 status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); 5539 5540 hdr = (struct ieee80211_hdr *)skb->data; 5541 fc = le16_to_cpu(hdr->frame_control); 5542 5543 /* Firmware is guaranteed to report all essential management frames via 5544 * WMI while it can deliver some extra via HTT. Since there can be 5545 * duplicates split the reporting wrt monitor/sniffing. 5546 */ 5547 status->flag |= RX_FLAG_SKIP_MONITOR; 5548 5549 /* In case of PMF, FW delivers decrypted frames with Protected Bit set 5550 * including group privacy action frames. 5551 */ 5552 if (ieee80211_has_protected(hdr->frame_control)) { 5553 status->flag |= RX_FLAG_DECRYPTED; 5554 5555 if (!ieee80211_is_robust_mgmt_frame(skb)) { 5556 status->flag |= RX_FLAG_IV_STRIPPED | 5557 RX_FLAG_MMIC_STRIPPED; 5558 hdr->frame_control = __cpu_to_le16(fc & 5559 ~IEEE80211_FCTL_PROTECTED); 5560 } 5561 } 5562 5563 /* TODO: Pending handle beacon implementation 5564 *if (ieee80211_is_beacon(hdr->frame_control)) 5565 * ath12k_mac_handle_beacon(ar, skb); 5566 */ 5567 5568 ath12k_dbg(ab, ATH12K_DBG_MGMT, 5569 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", 5570 skb, skb->len, 5571 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 5572 5573 ath12k_dbg(ab, ATH12K_DBG_MGMT, 5574 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 5575 status->freq, status->band, status->signal, 5576 status->rate_idx); 5577 5578 ieee80211_rx_ni(ar->hw, skb); 5579 5580 exit: 5581 rcu_read_unlock(); 5582 } 5583 5584 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb) 5585 { 5586 struct wmi_mgmt_tx_compl_event tx_compl_param = {0}; 5587 struct ath12k *ar; 5588 5589 if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { 5590 ath12k_warn(ab, "failed to extract mgmt tx compl event"); 5591 return; 5592 } 5593 5594 rcu_read_lock(); 5595 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id)); 5596 if (!ar) { 5597 ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", 5598 tx_compl_param.pdev_id); 5599 goto exit; 5600 } 5601 5602 wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id), 5603 le32_to_cpu(tx_compl_param.status)); 5604 5605 ath12k_dbg(ab, ATH12K_DBG_MGMT, 5606 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", 5607 tx_compl_param.pdev_id, tx_compl_param.desc_id, 5608 tx_compl_param.status); 5609 5610 exit: 5611 rcu_read_unlock(); 5612 } 5613 5614 static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, 5615 u32 vdev_id) 5616 { 5617 int i; 5618 struct ath12k_pdev *pdev; 5619 struct ath12k *ar; 5620 5621 for (i = 0; i < ab->num_radios; i++) { 5622 pdev = rcu_dereference(ab->pdevs_active[i]); 5623 if (pdev && pdev->ar) { 5624 ar = pdev->ar; 5625 5626 spin_lock_bh(&ar->data_lock); 5627 if (ar->scan.state == ATH12K_SCAN_ABORTING && 5628 ar->scan.vdev_id == vdev_id) { 5629 spin_unlock_bh(&ar->data_lock); 5630 return ar; 5631 } 5632 spin_unlock_bh(&ar->data_lock); 5633 } 5634 } 5635 return NULL; 5636 } 5637 5638 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) 5639 { 5640 struct ath12k *ar; 5641 struct wmi_scan_event scan_ev = {0}; 5642 5643 if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) { 5644 ath12k_warn(ab, "failed to extract scan event"); 5645 return; 5646 } 5647 5648 rcu_read_lock(); 5649 5650 /* In case the scan was cancelled, ex. during interface teardown, 5651 * the interface will not be found in active interfaces. 5652 * Rather, in such scenarios, iterate over the active pdev's to 5653 * search 'ar' if the corresponding 'ar' scan is ABORTING and the 5654 * aborting scan's vdev id matches this event info. 5655 */ 5656 if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && 5657 le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) 5658 ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id)); 5659 else 5660 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); 5661 5662 if (!ar) { 5663 ath12k_warn(ab, "Received scan event for unknown vdev"); 5664 rcu_read_unlock(); 5665 return; 5666 } 5667 5668 spin_lock_bh(&ar->data_lock); 5669 5670 ath12k_dbg(ab, ATH12K_DBG_WMI, 5671 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 5672 ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type), 5673 le32_to_cpu(scan_ev.reason)), 5674 le32_to_cpu(scan_ev.event_type), 5675 le32_to_cpu(scan_ev.reason), 5676 le32_to_cpu(scan_ev.channel_freq), 5677 le32_to_cpu(scan_ev.scan_req_id), 5678 le32_to_cpu(scan_ev.scan_id), 5679 le32_to_cpu(scan_ev.vdev_id), 5680 ath12k_scan_state_str(ar->scan.state), ar->scan.state); 5681 5682 switch (le32_to_cpu(scan_ev.event_type)) { 5683 case WMI_SCAN_EVENT_STARTED: 5684 ath12k_wmi_event_scan_started(ar); 5685 break; 5686 case WMI_SCAN_EVENT_COMPLETED: 5687 ath12k_wmi_event_scan_completed(ar); 5688 break; 5689 case WMI_SCAN_EVENT_BSS_CHANNEL: 5690 ath12k_wmi_event_scan_bss_chan(ar); 5691 break; 5692 case WMI_SCAN_EVENT_FOREIGN_CHAN: 5693 ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq)); 5694 break; 5695 case WMI_SCAN_EVENT_START_FAILED: 5696 ath12k_warn(ab, "received scan start failure event\n"); 5697 ath12k_wmi_event_scan_start_failed(ar); 5698 break; 5699 case WMI_SCAN_EVENT_DEQUEUED: 5700 case WMI_SCAN_EVENT_PREEMPTED: 5701 case WMI_SCAN_EVENT_RESTARTED: 5702 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 5703 default: 5704 break; 5705 } 5706 5707 spin_unlock_bh(&ar->data_lock); 5708 5709 rcu_read_unlock(); 5710 } 5711 5712 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb) 5713 { 5714 struct wmi_peer_sta_kickout_arg arg = {}; 5715 struct ieee80211_sta *sta; 5716 struct ath12k_peer *peer; 5717 struct ath12k *ar; 5718 5719 if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { 5720 ath12k_warn(ab, "failed to extract peer sta kickout event"); 5721 return; 5722 } 5723 5724 rcu_read_lock(); 5725 5726 spin_lock_bh(&ab->base_lock); 5727 5728 peer = ath12k_peer_find_by_addr(ab, arg.mac_addr); 5729 5730 if (!peer) { 5731 ath12k_warn(ab, "peer not found %pM\n", 5732 arg.mac_addr); 5733 goto exit; 5734 } 5735 5736 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); 5737 if (!ar) { 5738 ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d", 5739 peer->vdev_id); 5740 goto exit; 5741 } 5742 5743 sta = ieee80211_find_sta_by_ifaddr(ar->hw, 5744 arg.mac_addr, NULL); 5745 if (!sta) { 5746 ath12k_warn(ab, "Spurious quick kickout for STA %pM\n", 5747 arg.mac_addr); 5748 goto exit; 5749 } 5750 5751 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM", 5752 arg.mac_addr); 5753 5754 ieee80211_report_low_ack(sta, 10); 5755 5756 exit: 5757 spin_unlock_bh(&ab->base_lock); 5758 rcu_read_unlock(); 5759 } 5760 5761 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) 5762 { 5763 struct wmi_roam_event roam_ev = {}; 5764 struct ath12k *ar; 5765 5766 if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { 5767 ath12k_warn(ab, "failed to extract roam event"); 5768 return; 5769 } 5770 5771 ath12k_dbg(ab, ATH12K_DBG_WMI, 5772 "wmi roam event vdev %u reason 0x%08x rssi %d\n", 5773 roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); 5774 5775 rcu_read_lock(); 5776 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id)); 5777 if (!ar) { 5778 ath12k_warn(ab, "invalid vdev id in roam ev %d", 5779 roam_ev.vdev_id); 5780 rcu_read_unlock(); 5781 return; 5782 } 5783 5784 if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX) 5785 ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", 5786 roam_ev.reason, roam_ev.vdev_id); 5787 5788 switch (le32_to_cpu(roam_ev.reason)) { 5789 case WMI_ROAM_REASON_BEACON_MISS: 5790 /* TODO: Pending beacon miss and connection_loss_work 5791 * implementation 5792 * ath12k_mac_handle_beacon_miss(ar, vdev_id); 5793 */ 5794 break; 5795 case WMI_ROAM_REASON_BETTER_AP: 5796 case WMI_ROAM_REASON_LOW_RSSI: 5797 case WMI_ROAM_REASON_SUITABLE_AP_FOUND: 5798 case WMI_ROAM_REASON_HO_FAILED: 5799 ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", 5800 roam_ev.reason, roam_ev.vdev_id); 5801 break; 5802 } 5803 5804 rcu_read_unlock(); 5805 } 5806 5807 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 5808 { 5809 struct wmi_chan_info_event ch_info_ev = {0}; 5810 struct ath12k *ar; 5811 struct survey_info *survey; 5812 int idx; 5813 /* HW channel counters frequency value in hertz */ 5814 u32 cc_freq_hz = ab->cc_freq_hz; 5815 5816 if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) { 5817 ath12k_warn(ab, "failed to extract chan info event"); 5818 return; 5819 } 5820 5821 ath12k_dbg(ab, ATH12K_DBG_WMI, 5822 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", 5823 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, 5824 ch_info_ev.cmd_flags, ch_info_ev.noise_floor, 5825 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, 5826 ch_info_ev.mac_clk_mhz); 5827 5828 if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) { 5829 ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n"); 5830 return; 5831 } 5832 5833 rcu_read_lock(); 5834 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id)); 5835 if (!ar) { 5836 ath12k_warn(ab, "invalid vdev id in chan info ev %d", 5837 ch_info_ev.vdev_id); 5838 rcu_read_unlock(); 5839 return; 5840 } 5841 spin_lock_bh(&ar->data_lock); 5842 5843 switch (ar->scan.state) { 5844 case ATH12K_SCAN_IDLE: 5845 case ATH12K_SCAN_STARTING: 5846 ath12k_warn(ab, "received chan info event without a scan request, ignoring\n"); 5847 goto exit; 5848 case ATH12K_SCAN_RUNNING: 5849 case ATH12K_SCAN_ABORTING: 5850 break; 5851 } 5852 5853 idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq)); 5854 if (idx >= ARRAY_SIZE(ar->survey)) { 5855 ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", 5856 ch_info_ev.freq, idx); 5857 goto exit; 5858 } 5859 5860 /* If FW provides MAC clock frequency in Mhz, overriding the initialized 5861 * HW channel counters frequency value 5862 */ 5863 if (ch_info_ev.mac_clk_mhz) 5864 cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000); 5865 5866 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { 5867 survey = &ar->survey[idx]; 5868 memset(survey, 0, sizeof(*survey)); 5869 survey->noise = le32_to_cpu(ch_info_ev.noise_floor); 5870 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | 5871 SURVEY_INFO_TIME_BUSY; 5872 survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz); 5873 survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count), 5874 cc_freq_hz); 5875 } 5876 exit: 5877 spin_unlock_bh(&ar->data_lock); 5878 rcu_read_unlock(); 5879 } 5880 5881 static void 5882 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 5883 { 5884 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; 5885 struct survey_info *survey; 5886 struct ath12k *ar; 5887 u32 cc_freq_hz = ab->cc_freq_hz; 5888 u64 busy, total, tx, rx, rx_bss; 5889 int idx; 5890 5891 if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { 5892 ath12k_warn(ab, "failed to extract pdev bss chan info event"); 5893 return; 5894 } 5895 5896 busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 | 5897 le32_to_cpu(bss_ch_info_ev.rx_clear_count_low); 5898 5899 total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 | 5900 le32_to_cpu(bss_ch_info_ev.cycle_count_low); 5901 5902 tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 | 5903 le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low); 5904 5905 rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 | 5906 le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low); 5907 5908 rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 | 5909 le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low); 5910 5911 ath12k_dbg(ab, ATH12K_DBG_WMI, 5912 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", 5913 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, 5914 bss_ch_info_ev.noise_floor, busy, total, 5915 tx, rx, rx_bss); 5916 5917 rcu_read_lock(); 5918 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id)); 5919 5920 if (!ar) { 5921 ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", 5922 bss_ch_info_ev.pdev_id); 5923 rcu_read_unlock(); 5924 return; 5925 } 5926 5927 spin_lock_bh(&ar->data_lock); 5928 idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq)); 5929 if (idx >= ARRAY_SIZE(ar->survey)) { 5930 ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", 5931 bss_ch_info_ev.freq, idx); 5932 goto exit; 5933 } 5934 5935 survey = &ar->survey[idx]; 5936 5937 survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor); 5938 survey->time = div_u64(total, cc_freq_hz); 5939 survey->time_busy = div_u64(busy, cc_freq_hz); 5940 survey->time_rx = div_u64(rx_bss, cc_freq_hz); 5941 survey->time_tx = div_u64(tx, cc_freq_hz); 5942 survey->filled |= (SURVEY_INFO_NOISE_DBM | 5943 SURVEY_INFO_TIME | 5944 SURVEY_INFO_TIME_BUSY | 5945 SURVEY_INFO_TIME_RX | 5946 SURVEY_INFO_TIME_TX); 5947 exit: 5948 spin_unlock_bh(&ar->data_lock); 5949 complete(&ar->bss_survey_done); 5950 5951 rcu_read_unlock(); 5952 } 5953 5954 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab, 5955 struct sk_buff *skb) 5956 { 5957 struct wmi_vdev_install_key_complete_arg install_key_compl = {0}; 5958 struct ath12k *ar; 5959 5960 if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { 5961 ath12k_warn(ab, "failed to extract install key compl event"); 5962 return; 5963 } 5964 5965 ath12k_dbg(ab, ATH12K_DBG_WMI, 5966 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n", 5967 install_key_compl.key_idx, install_key_compl.key_flags, 5968 install_key_compl.macaddr, install_key_compl.status); 5969 5970 rcu_read_lock(); 5971 ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); 5972 if (!ar) { 5973 ath12k_warn(ab, "invalid vdev id in install key compl ev %d", 5974 install_key_compl.vdev_id); 5975 rcu_read_unlock(); 5976 return; 5977 } 5978 5979 ar->install_key_status = 0; 5980 5981 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { 5982 ath12k_warn(ab, "install key failed for %pM status %d\n", 5983 install_key_compl.macaddr, install_key_compl.status); 5984 ar->install_key_status = install_key_compl.status; 5985 } 5986 5987 complete(&ar->install_key_done); 5988 rcu_read_unlock(); 5989 } 5990 5991 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab, 5992 u16 tag, u16 len, 5993 const void *ptr, 5994 void *data) 5995 { 5996 const struct wmi_service_available_event *ev; 5997 u32 *wmi_ext2_service_bitmap; 5998 int i, j; 5999 u16 expected_len; 6000 6001 expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32); 6002 if (len < expected_len) { 6003 ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n", 6004 len, tag); 6005 return -EINVAL; 6006 } 6007 6008 switch (tag) { 6009 case WMI_TAG_SERVICE_AVAILABLE_EVENT: 6010 ev = (struct wmi_service_available_event *)ptr; 6011 for (i = 0, j = WMI_MAX_SERVICE; 6012 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; 6013 i++) { 6014 do { 6015 if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) & 6016 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 6017 set_bit(j, ab->wmi_ab.svc_map); 6018 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 6019 } 6020 6021 ath12k_dbg(ab, ATH12K_DBG_WMI, 6022 "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x", 6023 ev->wmi_service_segment_bitmap[0], 6024 ev->wmi_service_segment_bitmap[1], 6025 ev->wmi_service_segment_bitmap[2], 6026 ev->wmi_service_segment_bitmap[3]); 6027 break; 6028 case WMI_TAG_ARRAY_UINT32: 6029 wmi_ext2_service_bitmap = (u32 *)ptr; 6030 for (i = 0, j = WMI_MAX_EXT_SERVICE; 6031 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE; 6032 i++) { 6033 do { 6034 if (wmi_ext2_service_bitmap[i] & 6035 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 6036 set_bit(j, ab->wmi_ab.svc_map); 6037 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 6038 } 6039 6040 ath12k_dbg(ab, ATH12K_DBG_WMI, 6041 "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x", 6042 wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1], 6043 wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]); 6044 break; 6045 } 6046 return 0; 6047 } 6048 6049 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb) 6050 { 6051 int ret; 6052 6053 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6054 ath12k_wmi_tlv_services_parser, 6055 NULL); 6056 return ret; 6057 } 6058 6059 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb) 6060 { 6061 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0}; 6062 struct ath12k *ar; 6063 6064 if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { 6065 ath12k_warn(ab, "failed to extract peer assoc conf event"); 6066 return; 6067 } 6068 6069 ath12k_dbg(ab, ATH12K_DBG_WMI, 6070 "peer assoc conf ev vdev id %d macaddr %pM\n", 6071 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); 6072 6073 rcu_read_lock(); 6074 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); 6075 6076 if (!ar) { 6077 ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d", 6078 peer_assoc_conf.vdev_id); 6079 rcu_read_unlock(); 6080 return; 6081 } 6082 6083 complete(&ar->peer_assoc_done); 6084 rcu_read_unlock(); 6085 } 6086 6087 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) 6088 { 6089 } 6090 6091 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned 6092 * is not part of BDF CTL(Conformance test limits) table entries. 6093 */ 6094 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab, 6095 struct sk_buff *skb) 6096 { 6097 const void **tb; 6098 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 6099 int ret; 6100 6101 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6102 if (IS_ERR(tb)) { 6103 ret = PTR_ERR(tb); 6104 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6105 return; 6106 } 6107 6108 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; 6109 if (!ev) { 6110 ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); 6111 kfree(tb); 6112 return; 6113 } 6114 6115 ath12k_dbg(ab, ATH12K_DBG_WMI, 6116 "pdev ctl failsafe check ev status %d\n", 6117 ev->ctl_failsafe_status); 6118 6119 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power 6120 * to 10 dBm else the CTL power entry in the BDF would be picked up. 6121 */ 6122 if (ev->ctl_failsafe_status != 0) 6123 ath12k_warn(ab, "pdev ctl failsafe failure status %d", 6124 ev->ctl_failsafe_status); 6125 6126 kfree(tb); 6127 } 6128 6129 static void 6130 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, 6131 const struct ath12k_wmi_pdev_csa_event *ev, 6132 const u32 *vdev_ids) 6133 { 6134 int i; 6135 struct ath12k_vif *arvif; 6136 6137 /* Finish CSA once the switch count becomes NULL */ 6138 if (ev->current_switch_count) 6139 return; 6140 6141 rcu_read_lock(); 6142 for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) { 6143 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); 6144 6145 if (!arvif) { 6146 ath12k_warn(ab, "Recvd csa status for unknown vdev %d", 6147 vdev_ids[i]); 6148 continue; 6149 } 6150 6151 if (arvif->is_up && arvif->vif->bss_conf.csa_active) 6152 ieee80211_csa_finish(arvif->vif); 6153 } 6154 rcu_read_unlock(); 6155 } 6156 6157 static void 6158 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab, 6159 struct sk_buff *skb) 6160 { 6161 const void **tb; 6162 const struct ath12k_wmi_pdev_csa_event *ev; 6163 const u32 *vdev_ids; 6164 int ret; 6165 6166 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6167 if (IS_ERR(tb)) { 6168 ret = PTR_ERR(tb); 6169 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6170 return; 6171 } 6172 6173 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; 6174 vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; 6175 6176 if (!ev || !vdev_ids) { 6177 ath12k_warn(ab, "failed to fetch pdev csa switch count ev"); 6178 kfree(tb); 6179 return; 6180 } 6181 6182 ath12k_dbg(ab, ATH12K_DBG_WMI, 6183 "pdev csa switch count %d for pdev %d, num_vdevs %d", 6184 ev->current_switch_count, ev->pdev_id, 6185 ev->num_vdevs); 6186 6187 ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); 6188 6189 kfree(tb); 6190 } 6191 6192 static void 6193 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) 6194 { 6195 const void **tb; 6196 const struct ath12k_wmi_pdev_radar_event *ev; 6197 struct ath12k *ar; 6198 int ret; 6199 6200 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6201 if (IS_ERR(tb)) { 6202 ret = PTR_ERR(tb); 6203 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6204 return; 6205 } 6206 6207 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; 6208 6209 if (!ev) { 6210 ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev"); 6211 kfree(tb); 6212 return; 6213 } 6214 6215 ath12k_dbg(ab, ATH12K_DBG_WMI, 6216 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", 6217 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, 6218 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, 6219 ev->freq_offset, ev->sidx); 6220 6221 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); 6222 6223 if (!ar) { 6224 ath12k_warn(ab, "radar detected in invalid pdev %d\n", 6225 ev->pdev_id); 6226 goto exit; 6227 } 6228 6229 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", 6230 ev->pdev_id); 6231 6232 if (ar->dfs_block_radar_events) 6233 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); 6234 else 6235 ieee80211_radar_detected(ar->hw); 6236 6237 exit: 6238 kfree(tb); 6239 } 6240 6241 static void 6242 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, 6243 struct sk_buff *skb) 6244 { 6245 struct ath12k *ar; 6246 struct wmi_pdev_temperature_event ev = {0}; 6247 6248 if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) { 6249 ath12k_warn(ab, "failed to extract pdev temperature event"); 6250 return; 6251 } 6252 6253 ath12k_dbg(ab, ATH12K_DBG_WMI, 6254 "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); 6255 6256 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id)); 6257 if (!ar) { 6258 ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); 6259 return; 6260 } 6261 } 6262 6263 static void ath12k_fils_discovery_event(struct ath12k_base *ab, 6264 struct sk_buff *skb) 6265 { 6266 const void **tb; 6267 const struct wmi_fils_discovery_event *ev; 6268 int ret; 6269 6270 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6271 if (IS_ERR(tb)) { 6272 ret = PTR_ERR(tb); 6273 ath12k_warn(ab, 6274 "failed to parse FILS discovery event tlv %d\n", 6275 ret); 6276 return; 6277 } 6278 6279 ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; 6280 if (!ev) { 6281 ath12k_warn(ab, "failed to fetch FILS discovery event\n"); 6282 kfree(tb); 6283 return; 6284 } 6285 6286 ath12k_warn(ab, 6287 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", 6288 ev->vdev_id, ev->fils_tt, ev->tbtt); 6289 6290 kfree(tb); 6291 } 6292 6293 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, 6294 struct sk_buff *skb) 6295 { 6296 const void **tb; 6297 const struct wmi_probe_resp_tx_status_event *ev; 6298 int ret; 6299 6300 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6301 if (IS_ERR(tb)) { 6302 ret = PTR_ERR(tb); 6303 ath12k_warn(ab, 6304 "failed to parse probe response transmission status event tlv: %d\n", 6305 ret); 6306 return; 6307 } 6308 6309 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; 6310 if (!ev) { 6311 ath12k_warn(ab, 6312 "failed to fetch probe response transmission status event"); 6313 kfree(tb); 6314 return; 6315 } 6316 6317 if (ev->tx_status) 6318 ath12k_warn(ab, 6319 "Probe response transmission failed for vdev_id %u, status %u\n", 6320 ev->vdev_id, ev->tx_status); 6321 6322 kfree(tb); 6323 } 6324 6325 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) 6326 { 6327 struct wmi_cmd_hdr *cmd_hdr; 6328 enum wmi_tlv_event_id id; 6329 6330 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 6331 id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID); 6332 6333 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) 6334 goto out; 6335 6336 switch (id) { 6337 /* Process all the WMI events here */ 6338 case WMI_SERVICE_READY_EVENTID: 6339 ath12k_service_ready_event(ab, skb); 6340 break; 6341 case WMI_SERVICE_READY_EXT_EVENTID: 6342 ath12k_service_ready_ext_event(ab, skb); 6343 break; 6344 case WMI_SERVICE_READY_EXT2_EVENTID: 6345 ath12k_service_ready_ext2_event(ab, skb); 6346 break; 6347 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: 6348 ath12k_reg_chan_list_event(ab, skb); 6349 break; 6350 case WMI_READY_EVENTID: 6351 ath12k_ready_event(ab, skb); 6352 break; 6353 case WMI_PEER_DELETE_RESP_EVENTID: 6354 ath12k_peer_delete_resp_event(ab, skb); 6355 break; 6356 case WMI_VDEV_START_RESP_EVENTID: 6357 ath12k_vdev_start_resp_event(ab, skb); 6358 break; 6359 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: 6360 ath12k_bcn_tx_status_event(ab, skb); 6361 break; 6362 case WMI_VDEV_STOPPED_EVENTID: 6363 ath12k_vdev_stopped_event(ab, skb); 6364 break; 6365 case WMI_MGMT_RX_EVENTID: 6366 ath12k_mgmt_rx_event(ab, skb); 6367 /* mgmt_rx_event() owns the skb now! */ 6368 return; 6369 case WMI_MGMT_TX_COMPLETION_EVENTID: 6370 ath12k_mgmt_tx_compl_event(ab, skb); 6371 break; 6372 case WMI_SCAN_EVENTID: 6373 ath12k_scan_event(ab, skb); 6374 break; 6375 case WMI_PEER_STA_KICKOUT_EVENTID: 6376 ath12k_peer_sta_kickout_event(ab, skb); 6377 break; 6378 case WMI_ROAM_EVENTID: 6379 ath12k_roam_event(ab, skb); 6380 break; 6381 case WMI_CHAN_INFO_EVENTID: 6382 ath12k_chan_info_event(ab, skb); 6383 break; 6384 case WMI_PDEV_BSS_CHAN_INFO_EVENTID: 6385 ath12k_pdev_bss_chan_info_event(ab, skb); 6386 break; 6387 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 6388 ath12k_vdev_install_key_compl_event(ab, skb); 6389 break; 6390 case WMI_SERVICE_AVAILABLE_EVENTID: 6391 ath12k_service_available_event(ab, skb); 6392 break; 6393 case WMI_PEER_ASSOC_CONF_EVENTID: 6394 ath12k_peer_assoc_conf_event(ab, skb); 6395 break; 6396 case WMI_UPDATE_STATS_EVENTID: 6397 ath12k_update_stats_event(ab, skb); 6398 break; 6399 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: 6400 ath12k_pdev_ctl_failsafe_check_event(ab, skb); 6401 break; 6402 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: 6403 ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb); 6404 break; 6405 case WMI_PDEV_TEMPERATURE_EVENTID: 6406 ath12k_wmi_pdev_temperature_event(ab, skb); 6407 break; 6408 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: 6409 ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb); 6410 break; 6411 case WMI_HOST_FILS_DISCOVERY_EVENTID: 6412 ath12k_fils_discovery_event(ab, skb); 6413 break; 6414 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: 6415 ath12k_probe_resp_tx_status_event(ab, skb); 6416 break; 6417 /* add Unsupported events here */ 6418 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 6419 case WMI_PEER_OPER_MODE_CHANGE_EVENTID: 6420 case WMI_TWT_ENABLE_EVENTID: 6421 case WMI_TWT_DISABLE_EVENTID: 6422 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: 6423 ath12k_dbg(ab, ATH12K_DBG_WMI, 6424 "ignoring unsupported event 0x%x\n", id); 6425 break; 6426 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: 6427 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); 6428 break; 6429 case WMI_VDEV_DELETE_RESP_EVENTID: 6430 ath12k_vdev_delete_resp_event(ab, skb); 6431 break; 6432 /* TODO: Add remaining events */ 6433 default: 6434 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); 6435 break; 6436 } 6437 6438 out: 6439 dev_kfree_skb(skb); 6440 } 6441 6442 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, 6443 u32 pdev_idx) 6444 { 6445 int status; 6446 u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL, 6447 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, 6448 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 }; 6449 struct ath12k_htc_svc_conn_req conn_req = {}; 6450 struct ath12k_htc_svc_conn_resp conn_resp = {}; 6451 6452 /* these fields are the same for all service endpoints */ 6453 conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete; 6454 conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx; 6455 conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits; 6456 6457 /* connect to control service */ 6458 conn_req.service_id = svc_id[pdev_idx]; 6459 6460 status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); 6461 if (status) { 6462 ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", 6463 status); 6464 return status; 6465 } 6466 6467 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; 6468 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; 6469 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; 6470 6471 return 0; 6472 } 6473 6474 static int 6475 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, 6476 struct wmi_unit_test_cmd ut_cmd, 6477 u32 *test_args) 6478 { 6479 struct ath12k_wmi_pdev *wmi = ar->wmi; 6480 struct wmi_unit_test_cmd *cmd; 6481 struct sk_buff *skb; 6482 struct wmi_tlv *tlv; 6483 void *ptr; 6484 u32 *ut_cmd_args; 6485 int buf_len, arg_len; 6486 int ret; 6487 int i; 6488 6489 arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args); 6490 buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; 6491 6492 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 6493 if (!skb) 6494 return -ENOMEM; 6495 6496 cmd = (struct wmi_unit_test_cmd *)skb->data; 6497 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD, 6498 sizeof(ut_cmd)); 6499 6500 cmd->vdev_id = ut_cmd.vdev_id; 6501 cmd->module_id = ut_cmd.module_id; 6502 cmd->num_args = ut_cmd.num_args; 6503 cmd->diag_token = ut_cmd.diag_token; 6504 6505 ptr = skb->data + sizeof(ut_cmd); 6506 6507 tlv = ptr; 6508 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 6509 6510 ptr += TLV_HDR_SIZE; 6511 6512 ut_cmd_args = ptr; 6513 for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++) 6514 ut_cmd_args[i] = test_args[i]; 6515 6516 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 6517 "WMI unit test : module %d vdev %d n_args %d token %d\n", 6518 cmd->module_id, cmd->vdev_id, cmd->num_args, 6519 cmd->diag_token); 6520 6521 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); 6522 6523 if (ret) { 6524 ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", 6525 ret); 6526 dev_kfree_skb(skb); 6527 } 6528 6529 return ret; 6530 } 6531 6532 int ath12k_wmi_simulate_radar(struct ath12k *ar) 6533 { 6534 struct ath12k_vif *arvif; 6535 u32 dfs_args[DFS_MAX_TEST_ARGS]; 6536 struct wmi_unit_test_cmd wmi_ut; 6537 bool arvif_found = false; 6538 6539 list_for_each_entry(arvif, &ar->arvifs, list) { 6540 if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) { 6541 arvif_found = true; 6542 break; 6543 } 6544 } 6545 6546 if (!arvif_found) 6547 return -EINVAL; 6548 6549 dfs_args[DFS_TEST_CMDID] = 0; 6550 dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 6551 /* Currently we could pass segment_id(b0 - b1), chirp(b2) 6552 * freq offset (b3 - b10) to unit test. For simulation 6553 * purpose this can be set to 0 which is valid. 6554 */ 6555 dfs_args[DFS_TEST_RADAR_PARAM] = 0; 6556 6557 wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id); 6558 wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE); 6559 wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS); 6560 wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN); 6561 6562 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); 6563 6564 return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); 6565 } 6566 6567 int ath12k_wmi_connect(struct ath12k_base *ab) 6568 { 6569 u32 i; 6570 u8 wmi_ep_count; 6571 6572 wmi_ep_count = ab->htc.wmi_ep_count; 6573 if (wmi_ep_count > ab->hw_params->max_radios) 6574 return -1; 6575 6576 for (i = 0; i < wmi_ep_count; i++) 6577 ath12k_connect_pdev_htc_service(ab, i); 6578 6579 return 0; 6580 } 6581 6582 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id) 6583 { 6584 if (WARN_ON(pdev_id >= MAX_RADIOS)) 6585 return; 6586 6587 /* TODO: Deinit any pdev specific wmi resource */ 6588 } 6589 6590 int ath12k_wmi_pdev_attach(struct ath12k_base *ab, 6591 u8 pdev_id) 6592 { 6593 struct ath12k_wmi_pdev *wmi_handle; 6594 6595 if (pdev_id >= ab->hw_params->max_radios) 6596 return -EINVAL; 6597 6598 wmi_handle = &ab->wmi_ab.wmi[pdev_id]; 6599 6600 wmi_handle->wmi_ab = &ab->wmi_ab; 6601 6602 ab->wmi_ab.ab = ab; 6603 /* TODO: Init remaining resource specific to pdev */ 6604 6605 return 0; 6606 } 6607 6608 int ath12k_wmi_attach(struct ath12k_base *ab) 6609 { 6610 int ret; 6611 6612 ret = ath12k_wmi_pdev_attach(ab, 0); 6613 if (ret) 6614 return ret; 6615 6616 ab->wmi_ab.ab = ab; 6617 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; 6618 6619 /* It's overwritten when service_ext_ready is handled */ 6620 if (ab->hw_params->single_pdev_only) 6621 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; 6622 6623 /* TODO: Init remaining wmi soc resources required */ 6624 init_completion(&ab->wmi_ab.service_ready); 6625 init_completion(&ab->wmi_ab.unified_ready); 6626 6627 return 0; 6628 } 6629 6630 void ath12k_wmi_detach(struct ath12k_base *ab) 6631 { 6632 int i; 6633 6634 /* TODO: Deinit wmi resource specific to SOC as required */ 6635 6636 for (i = 0; i < ab->htc.wmi_ep_count; i++) 6637 ath12k_wmi_pdev_detach(ab, i); 6638 6639 ath12k_wmi_free_dbring_caps(ab); 6640 } 6641