1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 */ 6 #include <linux/skbuff.h> 7 #include <linux/ctype.h> 8 #include <net/mac80211.h> 9 #include <net/cfg80211.h> 10 #include <linux/completion.h> 11 #include <linux/if_ether.h> 12 #include <linux/types.h> 13 #include <linux/pci.h> 14 #include <linux/uuid.h> 15 #include <linux/time.h> 16 #include <linux/of.h> 17 #include "core.h" 18 #include "debug.h" 19 #include "mac.h" 20 #include "hw.h" 21 #include "peer.h" 22 #include "testmode.h" 23 #include "p2p.h" 24 25 struct wmi_tlv_policy { 26 size_t min_len; 27 }; 28 29 struct wmi_tlv_svc_ready_parse { 30 bool wmi_svc_bitmap_done; 31 }; 32 33 struct wmi_tlv_dma_ring_caps_parse { 34 struct wmi_dma_ring_capabilities *dma_ring_caps; 35 u32 n_dma_ring_caps; 36 }; 37 38 struct wmi_tlv_svc_rdy_ext_parse { 39 struct ath11k_service_ext_param param; 40 struct wmi_soc_mac_phy_hw_mode_caps *hw_caps; 41 struct wmi_hw_mode_capabilities *hw_mode_caps; 42 u32 n_hw_mode_caps; 43 u32 tot_phy_id; 44 struct wmi_hw_mode_capabilities pref_hw_mode_caps; 45 struct wmi_mac_phy_capabilities *mac_phy_caps; 46 u32 n_mac_phy_caps; 47 struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps; 48 struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps; 49 u32 n_ext_hal_reg_caps; 50 struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; 51 bool hw_mode_done; 52 bool mac_phy_done; 53 bool ext_hal_reg_done; 54 bool mac_phy_chainmask_combo_done; 55 bool mac_phy_chainmask_cap_done; 56 bool oem_dma_ring_cap_done; 57 bool dma_ring_cap_done; 58 }; 59 60 struct wmi_tlv_svc_rdy_ext2_parse { 61 struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; 62 bool dma_ring_cap_done; 63 }; 64 65 struct wmi_tlv_rdy_parse { 66 u32 num_extra_mac_addr; 67 }; 68 69 struct wmi_tlv_dma_buf_release_parse { 70 struct ath11k_wmi_dma_buf_release_fixed_param fixed; 71 struct wmi_dma_buf_release_entry *buf_entry; 72 struct wmi_dma_buf_release_meta_data *meta_data; 73 u32 num_buf_entry; 74 u32 num_meta; 75 bool buf_entry_done; 76 bool meta_data_done; 77 }; 78 79 struct wmi_tlv_fw_stats_parse { 80 const struct wmi_stats_event *ev; 81 const struct wmi_per_chain_rssi_stats *rssi; 82 struct ath11k_fw_stats *stats; 83 int rssi_num; 84 bool chain_rssi_done; 85 }; 86 87 struct wmi_tlv_mgmt_rx_parse { 88 const struct wmi_mgmt_rx_hdr *fixed; 89 const u8 *frame_buf; 90 bool frame_buf_done; 91 }; 92 93 static const struct wmi_tlv_policy wmi_tlv_policies[] = { 94 [WMI_TAG_ARRAY_BYTE] 95 = { .min_len = 0 }, 96 [WMI_TAG_ARRAY_UINT32] 97 = { .min_len = 0 }, 98 [WMI_TAG_SERVICE_READY_EVENT] 99 = { .min_len = sizeof(struct wmi_service_ready_event) }, 100 [WMI_TAG_SERVICE_READY_EXT_EVENT] 101 = { .min_len = sizeof(struct wmi_service_ready_ext_event) }, 102 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] 103 = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) }, 104 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] 105 = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) }, 106 [WMI_TAG_VDEV_START_RESPONSE_EVENT] 107 = { .min_len = sizeof(struct wmi_vdev_start_resp_event) }, 108 [WMI_TAG_PEER_DELETE_RESP_EVENT] 109 = { .min_len = sizeof(struct wmi_peer_delete_resp_event) }, 110 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] 111 = { .min_len = sizeof(struct wmi_bcn_tx_status_event) }, 112 [WMI_TAG_VDEV_STOPPED_EVENT] 113 = { .min_len = sizeof(struct wmi_vdev_stopped_event) }, 114 [WMI_TAG_REG_CHAN_LIST_CC_EVENT] 115 = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) }, 116 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] 117 = { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, 118 [WMI_TAG_MGMT_RX_HDR] 119 = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) }, 120 [WMI_TAG_MGMT_TX_COMPL_EVENT] 121 = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, 122 [WMI_TAG_SCAN_EVENT] 123 = { .min_len = sizeof(struct wmi_scan_event) }, 124 [WMI_TAG_PEER_STA_KICKOUT_EVENT] 125 = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 126 [WMI_TAG_ROAM_EVENT] 127 = { .min_len = sizeof(struct wmi_roam_event) }, 128 [WMI_TAG_CHAN_INFO_EVENT] 129 = { .min_len = sizeof(struct wmi_chan_info_event) }, 130 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] 131 = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, 132 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] 133 = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, 134 [WMI_TAG_READY_EVENT] = { 135 .min_len = sizeof(struct wmi_ready_event_min) }, 136 [WMI_TAG_SERVICE_AVAILABLE_EVENT] 137 = {.min_len = sizeof(struct wmi_service_available_event) }, 138 [WMI_TAG_PEER_ASSOC_CONF_EVENT] 139 = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, 140 [WMI_TAG_STATS_EVENT] 141 = { .min_len = sizeof(struct wmi_stats_event) }, 142 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] 143 = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, 144 [WMI_TAG_HOST_SWFDA_EVENT] = { 145 .min_len = sizeof(struct wmi_fils_discovery_event) }, 146 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { 147 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, 148 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { 149 .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, 150 [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { 151 .min_len = sizeof(struct wmi_obss_color_collision_event) }, 152 [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { 153 .min_len = sizeof(struct wmi_11d_new_cc_ev) }, 154 [WMI_TAG_PER_CHAIN_RSSI_STATS] = { 155 .min_len = sizeof(struct wmi_per_chain_rssi_stats) }, 156 [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = { 157 .min_len = sizeof(struct wmi_twt_add_dialog_event) }, 158 [WMI_TAG_P2P_NOA_INFO] = { 159 .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) }, 160 [WMI_TAG_P2P_NOA_EVENT] = { 161 .min_len = sizeof(struct wmi_p2p_noa_event) }, 162 }; 163 164 #define PRIMAP(_hw_mode_) \ 165 [_hw_mode_] = _hw_mode_##_PRI 166 167 static const int ath11k_hw_mode_pri_map[] = { 168 PRIMAP(WMI_HOST_HW_MODE_SINGLE), 169 PRIMAP(WMI_HOST_HW_MODE_DBS), 170 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), 171 PRIMAP(WMI_HOST_HW_MODE_SBS), 172 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), 173 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), 174 /* keep last */ 175 PRIMAP(WMI_HOST_HW_MODE_MAX), 176 }; 177 178 static int 179 ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 180 int (*iter)(struct ath11k_base *ab, u16 tag, u16 len, 181 const void *ptr, void *data), 182 void *data) 183 { 184 const void *begin = ptr; 185 const struct wmi_tlv *tlv; 186 u16 tlv_tag, tlv_len; 187 int ret; 188 189 while (len > 0) { 190 if (len < sizeof(*tlv)) { 191 ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 192 ptr - begin, len, sizeof(*tlv)); 193 return -EINVAL; 194 } 195 196 tlv = ptr; 197 tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header); 198 tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header); 199 ptr += sizeof(*tlv); 200 len -= sizeof(*tlv); 201 202 if (tlv_len > len) { 203 ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 204 tlv_tag, ptr - begin, len, tlv_len); 205 return -EINVAL; 206 } 207 208 if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) && 209 wmi_tlv_policies[tlv_tag].min_len && 210 wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 211 ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", 212 tlv_tag, ptr - begin, tlv_len, 213 wmi_tlv_policies[tlv_tag].min_len); 214 return -EINVAL; 215 } 216 217 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 218 if (ret) 219 return ret; 220 221 ptr += tlv_len; 222 len -= tlv_len; 223 } 224 225 return 0; 226 } 227 228 static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len, 229 const void *ptr, void *data) 230 { 231 const void **tb = data; 232 233 if (tag < WMI_TAG_MAX) 234 tb[tag] = ptr; 235 236 return 0; 237 } 238 239 static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb, 240 const void *ptr, size_t len) 241 { 242 return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse, 243 (void *)tb); 244 } 245 246 const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, 247 struct sk_buff *skb, gfp_t gfp) 248 { 249 const void **tb; 250 int ret; 251 252 tb = kzalloc_objs(*tb, WMI_TAG_MAX, gfp); 253 if (!tb) 254 return ERR_PTR(-ENOMEM); 255 256 ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len); 257 if (ret) { 258 kfree(tb); 259 return ERR_PTR(ret); 260 } 261 262 return tb; 263 } 264 265 static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, 266 u32 cmd_id) 267 { 268 struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); 269 struct ath11k_base *ab = wmi->wmi_ab->ab; 270 struct wmi_cmd_hdr *cmd_hdr; 271 int ret; 272 u32 cmd = 0; 273 274 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 275 return -ENOMEM; 276 277 cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id); 278 279 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 280 cmd_hdr->cmd_id = cmd; 281 282 trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len); 283 284 memset(skb_cb, 0, sizeof(*skb_cb)); 285 ret = ath11k_htc_send(&ab->htc, wmi->eid, skb); 286 287 if (ret) 288 goto err_pull; 289 290 return 0; 291 292 err_pull: 293 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 294 return ret; 295 } 296 297 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, 298 u32 cmd_id) 299 { 300 struct ath11k_wmi_base *wmi_ab = wmi->wmi_ab; 301 int ret = -EOPNOTSUPP; 302 struct ath11k_base *ab = wmi_ab->ab; 303 304 might_sleep(); 305 306 if (ab->hw_params.credit_flow) { 307 wait_event_timeout(wmi_ab->tx_credits_wq, ({ 308 ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); 309 310 if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, 311 &wmi_ab->ab->dev_flags)) 312 ret = -ESHUTDOWN; 313 314 (ret != -EAGAIN); 315 }), WMI_SEND_TIMEOUT_HZ); 316 } else { 317 wait_event_timeout(wmi->tx_ce_desc_wq, ({ 318 ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); 319 320 if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, 321 &wmi_ab->ab->dev_flags)) 322 ret = -ESHUTDOWN; 323 324 (ret != -ENOBUFS); 325 }), WMI_SEND_TIMEOUT_HZ); 326 } 327 328 if (ret == -EAGAIN) 329 ath11k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); 330 331 if (ret == -ENOBUFS) 332 ath11k_warn(wmi_ab->ab, "ce desc not available for wmi command %d\n", 333 cmd_id); 334 335 return ret; 336 } 337 338 static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, 339 const void *ptr, 340 struct ath11k_service_ext_param *param) 341 { 342 const struct wmi_service_ready_ext_event *ev = ptr; 343 344 if (!ev) 345 return -EINVAL; 346 347 /* Move this to host based bitmap */ 348 param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits; 349 param->default_fw_config_bits = ev->default_fw_config_bits; 350 param->he_cap_info = ev->he_cap_info; 351 param->mpdu_density = ev->mpdu_density; 352 param->max_bssid_rx_filters = ev->max_bssid_rx_filters; 353 memcpy(¶m->ppet, &ev->ppet, sizeof(param->ppet)); 354 355 return 0; 356 } 357 358 static int 359 ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, 360 struct wmi_soc_mac_phy_hw_mode_caps *hw_caps, 361 struct wmi_hw_mode_capabilities *wmi_hw_mode_caps, 362 struct wmi_soc_hal_reg_capabilities *hal_reg_caps, 363 struct wmi_mac_phy_capabilities *wmi_mac_phy_caps, 364 u8 hw_mode_id, u8 phy_id, 365 struct ath11k_pdev *pdev) 366 { 367 struct wmi_mac_phy_capabilities *mac_phy_caps; 368 struct ath11k_base *ab = wmi_handle->wmi_ab->ab; 369 struct ath11k_band_cap *cap_band; 370 struct ath11k_pdev_cap *pdev_cap = &pdev->cap; 371 u32 phy_map; 372 u32 hw_idx, phy_idx = 0; 373 374 if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps) 375 return -EINVAL; 376 377 for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) { 378 if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id) 379 break; 380 381 phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map; 382 while (phy_map) { 383 phy_map >>= 1; 384 phy_idx++; 385 } 386 } 387 388 if (hw_idx == hw_caps->num_hw_modes) 389 return -EINVAL; 390 391 phy_idx += phy_id; 392 if (phy_id >= hal_reg_caps->num_phy) 393 return -EINVAL; 394 395 mac_phy_caps = wmi_mac_phy_caps + phy_idx; 396 397 pdev->pdev_id = mac_phy_caps->pdev_id; 398 pdev_cap->supported_bands |= mac_phy_caps->supported_bands; 399 pdev_cap->ampdu_density = mac_phy_caps->ampdu_density; 400 ab->target_pdev_ids[ab->target_pdev_count].supported_bands = 401 mac_phy_caps->supported_bands; 402 ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id; 403 ab->target_pdev_count++; 404 405 if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) && 406 !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP)) 407 return -EINVAL; 408 409 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from 410 * band to band for a single radio, need to see how this should be 411 * handled. 412 */ 413 if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { 414 pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g; 415 pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g; 416 } 417 418 if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { 419 pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g; 420 pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g; 421 pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; 422 pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g; 423 pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g; 424 pdev_cap->nss_ratio_enabled = 425 WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio); 426 pdev_cap->nss_ratio_info = 427 WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); 428 } 429 430 /* tx/rx chainmask reported from fw depends on the actual hw chains used, 431 * For example, for 4x4 capable macphys, first 4 chains can be used for first 432 * mac and the remaining 4 chains can be used for the second mac or vice-versa. 433 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 434 * will be advertised for second mac or vice-versa. Compute the shift value 435 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to 436 * mac80211. 437 */ 438 pdev_cap->tx_chain_mask_shift = 439 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); 440 pdev_cap->rx_chain_mask_shift = 441 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); 442 443 if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { 444 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 445 cap_band->phy_id = mac_phy_caps->phy_id; 446 cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g; 447 cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g; 448 cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g; 449 cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext; 450 cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g; 451 memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g, 452 sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); 453 memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g, 454 sizeof(struct ath11k_ppe_threshold)); 455 } 456 457 if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { 458 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 459 cap_band->phy_id = mac_phy_caps->phy_id; 460 cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; 461 cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; 462 cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; 463 cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; 464 cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; 465 memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, 466 sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); 467 memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, 468 sizeof(struct ath11k_ppe_threshold)); 469 470 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; 471 cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; 472 cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; 473 cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; 474 cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; 475 cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; 476 memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, 477 sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); 478 memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, 479 sizeof(struct ath11k_ppe_threshold)); 480 } 481 482 return 0; 483 } 484 485 static int 486 ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle, 487 struct wmi_soc_hal_reg_capabilities *reg_caps, 488 struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap, 489 u8 phy_idx, 490 struct ath11k_hal_reg_capabilities_ext *param) 491 { 492 struct wmi_hal_reg_capabilities_ext *ext_reg_cap; 493 494 if (!reg_caps || !wmi_ext_reg_cap) 495 return -EINVAL; 496 497 if (phy_idx >= reg_caps->num_phy) 498 return -EINVAL; 499 500 ext_reg_cap = &wmi_ext_reg_cap[phy_idx]; 501 502 param->phy_id = ext_reg_cap->phy_id; 503 param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain; 504 param->eeprom_reg_domain_ext = 505 ext_reg_cap->eeprom_reg_domain_ext; 506 param->regcap1 = ext_reg_cap->regcap1; 507 param->regcap2 = ext_reg_cap->regcap2; 508 /* check if param->wireless_mode is needed */ 509 param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan; 510 param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan; 511 param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan; 512 param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan; 513 514 return 0; 515 } 516 517 static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab, 518 const void *evt_buf, 519 struct ath11k_targ_cap *cap) 520 { 521 const struct wmi_service_ready_event *ev = evt_buf; 522 523 if (!ev) { 524 ath11k_err(ab, "%s: failed by NULL param\n", 525 __func__); 526 return -EINVAL; 527 } 528 529 cap->phy_capability = ev->phy_capability; 530 cap->max_frag_entry = ev->max_frag_entry; 531 cap->num_rf_chains = ev->num_rf_chains; 532 cap->ht_cap_info = ev->ht_cap_info; 533 cap->vht_cap_info = ev->vht_cap_info; 534 cap->vht_supp_mcs = ev->vht_supp_mcs; 535 cap->hw_min_tx_power = ev->hw_min_tx_power; 536 cap->hw_max_tx_power = ev->hw_max_tx_power; 537 cap->sys_cap_info = ev->sys_cap_info; 538 cap->min_pkt_size_enable = ev->min_pkt_size_enable; 539 cap->max_bcn_ie_size = ev->max_bcn_ie_size; 540 cap->max_num_scan_channels = ev->max_num_scan_channels; 541 cap->max_supported_macs = ev->max_supported_macs; 542 cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps; 543 cap->txrx_chainmask = ev->txrx_chainmask; 544 cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; 545 cap->num_msdu_desc = ev->num_msdu_desc; 546 547 return 0; 548 } 549 550 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in 551 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each 552 * 4-byte word. 553 */ 554 static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi, 555 const u32 *wmi_svc_bm) 556 { 557 int i, j; 558 559 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { 560 do { 561 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) 562 set_bit(j, wmi->wmi_ab->svc_map); 563 } while (++j % WMI_SERVICE_BITS_IN_SIZE32); 564 } 565 } 566 567 static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, 568 const void *ptr, void *data) 569 { 570 struct wmi_tlv_svc_ready_parse *svc_ready = data; 571 struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0]; 572 u16 expect_len; 573 574 switch (tag) { 575 case WMI_TAG_SERVICE_READY_EVENT: 576 if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) 577 return -EINVAL; 578 break; 579 580 case WMI_TAG_ARRAY_UINT32: 581 if (!svc_ready->wmi_svc_bitmap_done) { 582 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); 583 if (len < expect_len) { 584 ath11k_warn(ab, "invalid len %d for the tag 0x%x\n", 585 len, tag); 586 return -EINVAL; 587 } 588 589 ath11k_wmi_service_bitmap_copy(wmi_handle, ptr); 590 591 svc_ready->wmi_svc_bitmap_done = true; 592 } 593 break; 594 default: 595 break; 596 } 597 598 return 0; 599 } 600 601 static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb) 602 { 603 struct wmi_tlv_svc_ready_parse svc_ready = { }; 604 int ret; 605 606 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 607 ath11k_wmi_tlv_svc_rdy_parse, 608 &svc_ready); 609 if (ret) { 610 ath11k_warn(ab, "failed to parse tlv %d\n", ret); 611 return ret; 612 } 613 614 ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready"); 615 616 return 0; 617 } 618 619 struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_ab, u32 len) 620 { 621 struct sk_buff *skb; 622 struct ath11k_base *ab = wmi_ab->ab; 623 u32 round_len = roundup(len, 4); 624 625 skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); 626 if (!skb) 627 return NULL; 628 629 skb_reserve(skb, WMI_SKB_HEADROOM); 630 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 631 ath11k_warn(ab, "unaligned WMI skb data\n"); 632 633 skb_put(skb, round_len); 634 memset(skb->data, 0, round_len); 635 636 return skb; 637 } 638 639 static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar, 640 struct ieee80211_tx_info *info) 641 { 642 struct ath11k_base *ab = ar->ab; 643 u32 freq = 0; 644 645 if (ab->hw_params.support_off_channel_tx && 646 ar->scan.is_roc && 647 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 648 freq = ar->scan.roc_freq; 649 650 return freq; 651 } 652 653 int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, 654 struct sk_buff *frame, bool tx_params_valid) 655 { 656 struct ath11k_pdev_wmi *wmi = ar->wmi; 657 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); 658 struct wmi_mgmt_send_cmd *cmd; 659 struct wmi_mgmt_send_params *params; 660 struct wmi_tlv *frame_tlv; 661 struct sk_buff *skb; 662 u32 buf_len; 663 int ret, len; 664 665 buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ? 666 frame->len : WMI_MGMT_SEND_DOWNLD_LEN; 667 668 len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4); 669 if (tx_params_valid) 670 len += sizeof(*params); 671 672 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 673 if (!skb) 674 return -ENOMEM; 675 676 cmd = (struct wmi_mgmt_send_cmd *)skb->data; 677 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) | 678 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 679 cmd->vdev_id = vdev_id; 680 cmd->desc_id = buf_id; 681 cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info); 682 cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr); 683 cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr); 684 cmd->frame_len = frame->len; 685 cmd->buf_len = buf_len; 686 cmd->tx_params_valid = !!tx_params_valid; 687 688 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 689 frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 690 FIELD_PREP(WMI_TLV_LEN, buf_len); 691 692 memcpy(frame_tlv->value, frame->data, buf_len); 693 694 ath11k_ce_byte_swap(frame_tlv->value, buf_len); 695 696 if (tx_params_valid) { 697 params = 698 (struct wmi_mgmt_send_params *)(skb->data + (len - sizeof(*params))); 699 params->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TX_SEND_PARAMS) | 700 FIELD_PREP(WMI_TLV_LEN, 701 sizeof(*params) - TLV_HDR_SIZE); 702 params->tx_params_dword1 |= WMI_TX_PARAMS_DWORD1_CFR_CAPTURE; 703 } 704 705 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); 706 if (ret) { 707 ath11k_warn(ar->ab, 708 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); 709 dev_kfree_skb(skb); 710 } 711 712 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd mgmt tx send"); 713 714 return ret; 715 } 716 717 int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr, 718 struct vdev_create_params *param) 719 { 720 struct ath11k_pdev_wmi *wmi = ar->wmi; 721 struct wmi_vdev_create_cmd *cmd; 722 struct sk_buff *skb; 723 struct wmi_vdev_txrx_streams *txrx_streams; 724 struct wmi_tlv *tlv; 725 int ret, len; 726 void *ptr; 727 728 /* It can be optimized my sending tx/rx chain configuration 729 * only for supported bands instead of always sending it for 730 * both the bands. 731 */ 732 len = sizeof(*cmd) + TLV_HDR_SIZE + 733 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)); 734 735 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 736 if (!skb) 737 return -ENOMEM; 738 739 cmd = (struct wmi_vdev_create_cmd *)skb->data; 740 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) | 741 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 742 743 cmd->vdev_id = param->if_id; 744 cmd->vdev_type = param->type; 745 cmd->vdev_subtype = param->subtype; 746 cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX; 747 cmd->pdev_id = param->pdev_id; 748 cmd->mbssid_flags = param->mbssid_flags; 749 cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id; 750 751 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); 752 753 ptr = skb->data + sizeof(*cmd); 754 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 755 756 tlv = ptr; 757 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 758 FIELD_PREP(WMI_TLV_LEN, len); 759 760 ptr += TLV_HDR_SIZE; 761 txrx_streams = ptr; 762 len = sizeof(*txrx_streams); 763 txrx_streams->tlv_header = 764 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) | 765 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 766 txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; 767 txrx_streams->supported_tx_streams = 768 param->chains[NL80211_BAND_2GHZ].tx; 769 txrx_streams->supported_rx_streams = 770 param->chains[NL80211_BAND_2GHZ].rx; 771 772 txrx_streams++; 773 txrx_streams->tlv_header = 774 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) | 775 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 776 txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; 777 txrx_streams->supported_tx_streams = 778 param->chains[NL80211_BAND_5GHZ].tx; 779 txrx_streams->supported_rx_streams = 780 param->chains[NL80211_BAND_5GHZ].rx; 781 782 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); 783 if (ret) { 784 ath11k_warn(ar->ab, 785 "failed to submit WMI_VDEV_CREATE_CMDID\n"); 786 dev_kfree_skb(skb); 787 } 788 789 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 790 "cmd vdev create id %d type %d subtype %d macaddr %pM pdevid %d\n", 791 param->if_id, param->type, param->subtype, 792 macaddr, param->pdev_id); 793 794 return ret; 795 } 796 797 int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id) 798 { 799 struct ath11k_pdev_wmi *wmi = ar->wmi; 800 struct wmi_vdev_delete_cmd *cmd; 801 struct sk_buff *skb; 802 int ret; 803 804 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 805 if (!skb) 806 return -ENOMEM; 807 808 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 809 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) | 810 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 811 cmd->vdev_id = vdev_id; 812 813 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); 814 if (ret) { 815 ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); 816 dev_kfree_skb(skb); 817 } 818 819 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev delete id %d\n", vdev_id); 820 821 return ret; 822 } 823 824 int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id) 825 { 826 struct ath11k_pdev_wmi *wmi = ar->wmi; 827 struct wmi_vdev_stop_cmd *cmd; 828 struct sk_buff *skb; 829 int ret; 830 831 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 832 if (!skb) 833 return -ENOMEM; 834 835 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 836 837 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) | 838 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 839 cmd->vdev_id = vdev_id; 840 841 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); 842 if (ret) { 843 ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); 844 dev_kfree_skb(skb); 845 } 846 847 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev stop id 0x%x\n", vdev_id); 848 849 return ret; 850 } 851 852 int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id) 853 { 854 struct ath11k_pdev_wmi *wmi = ar->wmi; 855 struct wmi_vdev_down_cmd *cmd; 856 struct sk_buff *skb; 857 int ret; 858 859 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 860 if (!skb) 861 return -ENOMEM; 862 863 cmd = (struct wmi_vdev_down_cmd *)skb->data; 864 865 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) | 866 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 867 cmd->vdev_id = vdev_id; 868 869 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); 870 if (ret) { 871 ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); 872 dev_kfree_skb(skb); 873 } 874 875 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev down id 0x%x\n", vdev_id); 876 877 return ret; 878 } 879 880 static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan, 881 struct wmi_vdev_start_req_arg *arg) 882 { 883 u32 center_freq1 = arg->channel.band_center_freq1; 884 885 memset(chan, 0, sizeof(*chan)); 886 887 chan->mhz = arg->channel.freq; 888 chan->band_center_freq1 = arg->channel.band_center_freq1; 889 890 if (arg->channel.mode == MODE_11AX_HE160) { 891 if (arg->channel.freq > arg->channel.band_center_freq1) 892 chan->band_center_freq1 = center_freq1 + 40; 893 else 894 chan->band_center_freq1 = center_freq1 - 40; 895 896 chan->band_center_freq2 = arg->channel.band_center_freq1; 897 898 } else if ((arg->channel.mode == MODE_11AC_VHT80_80) || 899 (arg->channel.mode == MODE_11AX_HE80_80)) { 900 chan->band_center_freq2 = arg->channel.band_center_freq2; 901 } else { 902 chan->band_center_freq2 = 0; 903 } 904 905 chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode); 906 if (arg->channel.passive) 907 chan->info |= WMI_CHAN_INFO_PASSIVE; 908 if (arg->channel.allow_ibss) 909 chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED; 910 if (arg->channel.allow_ht) 911 chan->info |= WMI_CHAN_INFO_ALLOW_HT; 912 if (arg->channel.allow_vht) 913 chan->info |= WMI_CHAN_INFO_ALLOW_VHT; 914 if (arg->channel.allow_he) 915 chan->info |= WMI_CHAN_INFO_ALLOW_HE; 916 if (arg->channel.ht40plus) 917 chan->info |= WMI_CHAN_INFO_HT40_PLUS; 918 if (arg->channel.chan_radar) 919 chan->info |= WMI_CHAN_INFO_DFS; 920 if (arg->channel.freq2_radar) 921 chan->info |= WMI_CHAN_INFO_DFS_FREQ2; 922 923 chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, 924 arg->channel.max_power) | 925 FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, 926 arg->channel.max_reg_power); 927 928 chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, 929 arg->channel.max_antenna_gain) | 930 FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, 931 arg->channel.max_power); 932 } 933 934 int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, 935 bool restart) 936 { 937 struct ath11k_pdev_wmi *wmi = ar->wmi; 938 struct wmi_vdev_start_request_cmd *cmd; 939 struct sk_buff *skb; 940 struct wmi_channel *chan; 941 struct wmi_tlv *tlv; 942 void *ptr; 943 int ret, len; 944 945 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 946 return -EINVAL; 947 948 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; 949 950 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 951 if (!skb) 952 return -ENOMEM; 953 954 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 955 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 956 WMI_TAG_VDEV_START_REQUEST_CMD) | 957 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 958 cmd->vdev_id = arg->vdev_id; 959 cmd->beacon_interval = arg->bcn_intval; 960 cmd->bcn_tx_rate = arg->bcn_tx_rate; 961 cmd->dtim_period = arg->dtim_period; 962 cmd->num_noa_descriptors = arg->num_noa_descriptors; 963 cmd->preferred_rx_streams = arg->pref_rx_streams; 964 cmd->preferred_tx_streams = arg->pref_tx_streams; 965 cmd->cac_duration_ms = arg->cac_duration_ms; 966 cmd->regdomain = arg->regdomain; 967 cmd->he_ops = arg->he_ops; 968 cmd->mbssid_flags = arg->mbssid_flags; 969 cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id; 970 971 if (!restart) { 972 if (arg->ssid) { 973 cmd->ssid.ssid_len = arg->ssid_len; 974 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 975 } 976 if (arg->hidden_ssid) 977 cmd->flags |= WMI_VDEV_START_HIDDEN_SSID; 978 if (arg->pmf_enabled) 979 cmd->flags |= WMI_VDEV_START_PMF_ENABLED; 980 } 981 982 cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED; 983 if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) 984 cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED; 985 986 ptr = skb->data + sizeof(*cmd); 987 chan = ptr; 988 989 ath11k_wmi_put_wmi_channel(chan, arg); 990 991 chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) | 992 FIELD_PREP(WMI_TLV_LEN, 993 sizeof(*chan) - TLV_HDR_SIZE); 994 ptr += sizeof(*chan); 995 996 tlv = ptr; 997 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 998 FIELD_PREP(WMI_TLV_LEN, 0); 999 1000 /* Note: This is a nested TLV containing: 1001 * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv].. 1002 */ 1003 1004 ptr += sizeof(*tlv); 1005 1006 if (restart) 1007 ret = ath11k_wmi_cmd_send(wmi, skb, 1008 WMI_VDEV_RESTART_REQUEST_CMDID); 1009 else 1010 ret = ath11k_wmi_cmd_send(wmi, skb, 1011 WMI_VDEV_START_REQUEST_CMDID); 1012 if (ret) { 1013 ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n", 1014 restart ? "restart" : "start"); 1015 dev_kfree_skb(skb); 1016 } 1017 1018 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev %s id 0x%x freq 0x%x mode 0x%x\n", 1019 restart ? "restart" : "start", arg->vdev_id, 1020 arg->channel.freq, arg->channel.mode); 1021 1022 return ret; 1023 } 1024 1025 int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid, 1026 u8 *tx_bssid, u32 nontx_profile_idx, u32 nontx_profile_cnt) 1027 { 1028 struct ath11k_pdev_wmi *wmi = ar->wmi; 1029 struct wmi_vdev_up_cmd *cmd; 1030 struct ieee80211_bss_conf *bss_conf; 1031 struct ath11k_vif *arvif; 1032 struct sk_buff *skb; 1033 int ret; 1034 1035 arvif = ath11k_mac_get_arvif(ar, vdev_id); 1036 1037 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1038 if (!skb) 1039 return -ENOMEM; 1040 1041 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1042 1043 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) | 1044 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1045 cmd->vdev_id = vdev_id; 1046 cmd->vdev_assoc_id = aid; 1047 1048 ether_addr_copy(cmd->vdev_bssid.addr, bssid); 1049 1050 cmd->nontx_profile_idx = nontx_profile_idx; 1051 cmd->nontx_profile_cnt = nontx_profile_cnt; 1052 if (tx_bssid) 1053 ether_addr_copy(cmd->tx_vdev_bssid.addr, tx_bssid); 1054 1055 if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) { 1056 bss_conf = &arvif->vif->bss_conf; 1057 1058 if (bss_conf->nontransmitted) { 1059 ether_addr_copy(cmd->tx_vdev_bssid.addr, 1060 bss_conf->transmitter_bssid); 1061 cmd->nontx_profile_idx = bss_conf->bssid_index; 1062 cmd->nontx_profile_cnt = bss_conf->bssid_indicator; 1063 } 1064 } 1065 1066 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); 1067 if (ret) { 1068 ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); 1069 dev_kfree_skb(skb); 1070 } 1071 1072 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1073 "cmd vdev up id 0x%x assoc id %d bssid %pM\n", 1074 vdev_id, aid, bssid); 1075 1076 return ret; 1077 } 1078 1079 int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar, 1080 struct peer_create_params *param) 1081 { 1082 struct ath11k_pdev_wmi *wmi = ar->wmi; 1083 struct wmi_peer_create_cmd *cmd; 1084 struct sk_buff *skb; 1085 int ret; 1086 1087 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1088 if (!skb) 1089 return -ENOMEM; 1090 1091 cmd = (struct wmi_peer_create_cmd *)skb->data; 1092 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) | 1093 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1094 1095 ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr); 1096 cmd->peer_type = param->peer_type; 1097 cmd->vdev_id = param->vdev_id; 1098 1099 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); 1100 if (ret) { 1101 ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); 1102 dev_kfree_skb(skb); 1103 } 1104 1105 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1106 "cmd peer create vdev_id %d peer_addr %pM\n", 1107 param->vdev_id, param->peer_addr); 1108 1109 return ret; 1110 } 1111 1112 int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar, 1113 const u8 *peer_addr, u8 vdev_id) 1114 { 1115 struct ath11k_pdev_wmi *wmi = ar->wmi; 1116 struct wmi_peer_delete_cmd *cmd; 1117 struct sk_buff *skb; 1118 int ret; 1119 1120 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1121 if (!skb) 1122 return -ENOMEM; 1123 1124 cmd = (struct wmi_peer_delete_cmd *)skb->data; 1125 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) | 1126 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1127 1128 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1129 cmd->vdev_id = vdev_id; 1130 1131 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); 1132 if (ret) { 1133 ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); 1134 dev_kfree_skb(skb); 1135 } 1136 1137 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1138 "cmd peer delete vdev_id %d peer_addr %pM\n", 1139 vdev_id, peer_addr); 1140 1141 return ret; 1142 } 1143 1144 int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar, 1145 struct pdev_set_regdomain_params *param) 1146 { 1147 struct ath11k_pdev_wmi *wmi = ar->wmi; 1148 struct wmi_pdev_set_regdomain_cmd *cmd; 1149 struct sk_buff *skb; 1150 int ret; 1151 1152 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1153 if (!skb) 1154 return -ENOMEM; 1155 1156 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 1157 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 1158 WMI_TAG_PDEV_SET_REGDOMAIN_CMD) | 1159 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1160 1161 cmd->reg_domain = param->current_rd_in_use; 1162 cmd->reg_domain_2g = param->current_rd_2g; 1163 cmd->reg_domain_5g = param->current_rd_5g; 1164 cmd->conformance_test_limit_2g = param->ctl_2g; 1165 cmd->conformance_test_limit_5g = param->ctl_5g; 1166 cmd->dfs_domain = param->dfs_domain; 1167 cmd->pdev_id = param->pdev_id; 1168 1169 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 1170 if (ret) { 1171 ath11k_warn(ar->ab, 1172 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); 1173 dev_kfree_skb(skb); 1174 } 1175 1176 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1177 "cmd pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", 1178 param->current_rd_in_use, param->current_rd_2g, 1179 param->current_rd_5g, param->dfs_domain, param->pdev_id); 1180 1181 return ret; 1182 } 1183 1184 int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, 1185 u32 vdev_id, u32 param_id, u32 param_val) 1186 { 1187 struct ath11k_pdev_wmi *wmi = ar->wmi; 1188 struct wmi_peer_set_param_cmd *cmd; 1189 struct sk_buff *skb; 1190 int ret; 1191 1192 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1193 if (!skb) 1194 return -ENOMEM; 1195 1196 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 1197 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) | 1198 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1199 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1200 cmd->vdev_id = vdev_id; 1201 cmd->param_id = param_id; 1202 cmd->param_value = param_val; 1203 1204 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); 1205 if (ret) { 1206 ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); 1207 dev_kfree_skb(skb); 1208 } 1209 1210 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1211 "cmd peer set param vdev %d peer 0x%pM set param %d value %d\n", 1212 vdev_id, peer_addr, param_id, param_val); 1213 1214 return ret; 1215 } 1216 1217 int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar, 1218 u8 peer_addr[ETH_ALEN], 1219 struct peer_flush_params *param) 1220 { 1221 struct ath11k_pdev_wmi *wmi = ar->wmi; 1222 struct wmi_peer_flush_tids_cmd *cmd; 1223 struct sk_buff *skb; 1224 int ret; 1225 1226 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1227 if (!skb) 1228 return -ENOMEM; 1229 1230 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 1231 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) | 1232 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1233 1234 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1235 cmd->peer_tid_bitmap = param->peer_tid_bitmap; 1236 cmd->vdev_id = param->vdev_id; 1237 1238 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); 1239 if (ret) { 1240 ath11k_warn(ar->ab, 1241 "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); 1242 dev_kfree_skb(skb); 1243 } 1244 1245 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1246 "cmd peer flush tids vdev_id %d peer_addr %pM tids %08x\n", 1247 param->vdev_id, peer_addr, param->peer_tid_bitmap); 1248 1249 return ret; 1250 } 1251 1252 int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, 1253 int vdev_id, const u8 *addr, 1254 dma_addr_t paddr, u8 tid, 1255 u8 ba_window_size_valid, 1256 u32 ba_window_size) 1257 { 1258 struct wmi_peer_reorder_queue_setup_cmd *cmd; 1259 struct sk_buff *skb; 1260 int ret; 1261 1262 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 1263 if (!skb) 1264 return -ENOMEM; 1265 1266 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; 1267 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 1268 WMI_TAG_REORDER_QUEUE_SETUP_CMD) | 1269 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1270 1271 ether_addr_copy(cmd->peer_macaddr.addr, addr); 1272 cmd->vdev_id = vdev_id; 1273 cmd->tid = tid; 1274 cmd->queue_ptr_lo = lower_32_bits(paddr); 1275 cmd->queue_ptr_hi = upper_32_bits(paddr); 1276 cmd->queue_no = tid; 1277 cmd->ba_window_size_valid = ba_window_size_valid; 1278 cmd->ba_window_size = ba_window_size; 1279 1280 ret = ath11k_wmi_cmd_send(ar->wmi, skb, 1281 WMI_PEER_REORDER_QUEUE_SETUP_CMDID); 1282 if (ret) { 1283 ath11k_warn(ar->ab, 1284 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); 1285 dev_kfree_skb(skb); 1286 } 1287 1288 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1289 "cmd peer reorder queue setup addr %pM vdev_id %d tid %d\n", 1290 addr, vdev_id, tid); 1291 1292 return ret; 1293 } 1294 1295 int 1296 ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar, 1297 struct rx_reorder_queue_remove_params *param) 1298 { 1299 struct ath11k_pdev_wmi *wmi = ar->wmi; 1300 struct wmi_peer_reorder_queue_remove_cmd *cmd; 1301 struct sk_buff *skb; 1302 int ret; 1303 1304 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1305 if (!skb) 1306 return -ENOMEM; 1307 1308 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; 1309 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 1310 WMI_TAG_REORDER_QUEUE_REMOVE_CMD) | 1311 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1312 1313 ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr); 1314 cmd->vdev_id = param->vdev_id; 1315 cmd->tid_mask = param->peer_tid_bitmap; 1316 1317 ret = ath11k_wmi_cmd_send(wmi, skb, 1318 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); 1319 if (ret) { 1320 ath11k_warn(ar->ab, 1321 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); 1322 dev_kfree_skb(skb); 1323 } 1324 1325 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1326 "cmd peer reorder queue remove peer_macaddr %pM vdev_id %d tid_map %d", 1327 param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap); 1328 1329 return ret; 1330 } 1331 1332 int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, 1333 u32 param_value, u8 pdev_id) 1334 { 1335 struct ath11k_pdev_wmi *wmi = ar->wmi; 1336 struct wmi_pdev_set_param_cmd *cmd; 1337 struct sk_buff *skb; 1338 int ret; 1339 1340 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1341 if (!skb) 1342 return -ENOMEM; 1343 1344 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 1345 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) | 1346 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1347 cmd->pdev_id = pdev_id; 1348 cmd->param_id = param_id; 1349 cmd->param_value = param_value; 1350 1351 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); 1352 if (ret) { 1353 ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1354 dev_kfree_skb(skb); 1355 } 1356 1357 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1358 "cmd pdev set param %d pdev id %d value %d\n", 1359 param_id, pdev_id, param_value); 1360 1361 return ret; 1362 } 1363 1364 int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, 1365 enum wmi_sta_ps_mode psmode) 1366 { 1367 struct ath11k_pdev_wmi *wmi = ar->wmi; 1368 struct wmi_pdev_set_ps_mode_cmd *cmd; 1369 struct sk_buff *skb; 1370 int ret; 1371 1372 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1373 if (!skb) 1374 return -ENOMEM; 1375 1376 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; 1377 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) | 1378 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1379 cmd->vdev_id = vdev_id; 1380 cmd->sta_ps_mode = psmode; 1381 1382 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); 1383 if (ret) { 1384 ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1385 dev_kfree_skb(skb); 1386 } 1387 1388 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1389 "cmd sta powersave mode psmode %d vdev id %d\n", 1390 psmode, vdev_id); 1391 1392 return ret; 1393 } 1394 1395 int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, 1396 u32 pdev_id) 1397 { 1398 struct ath11k_pdev_wmi *wmi = ar->wmi; 1399 struct wmi_pdev_suspend_cmd *cmd; 1400 struct sk_buff *skb; 1401 int ret; 1402 1403 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1404 if (!skb) 1405 return -ENOMEM; 1406 1407 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 1408 1409 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) | 1410 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1411 1412 cmd->suspend_opt = suspend_opt; 1413 cmd->pdev_id = pdev_id; 1414 1415 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); 1416 if (ret) { 1417 ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); 1418 dev_kfree_skb(skb); 1419 } 1420 1421 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1422 "cmd pdev suspend pdev_id %d\n", pdev_id); 1423 1424 return ret; 1425 } 1426 1427 int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id) 1428 { 1429 struct ath11k_pdev_wmi *wmi = ar->wmi; 1430 struct wmi_pdev_resume_cmd *cmd; 1431 struct sk_buff *skb; 1432 int ret; 1433 1434 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1435 if (!skb) 1436 return -ENOMEM; 1437 1438 cmd = (struct wmi_pdev_resume_cmd *)skb->data; 1439 1440 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) | 1441 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1442 cmd->pdev_id = pdev_id; 1443 1444 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); 1445 if (ret) { 1446 ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); 1447 dev_kfree_skb(skb); 1448 } 1449 1450 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1451 "cmd pdev resume pdev id %d\n", pdev_id); 1452 1453 return ret; 1454 } 1455 1456 /* TODO FW Support for the cmd is not available yet. 1457 * Can be tested once the command and corresponding 1458 * event is implemented in FW 1459 */ 1460 int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, 1461 enum wmi_bss_chan_info_req_type type) 1462 { 1463 struct ath11k_pdev_wmi *wmi = ar->wmi; 1464 struct wmi_pdev_bss_chan_info_req_cmd *cmd; 1465 struct sk_buff *skb; 1466 int ret; 1467 1468 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1469 if (!skb) 1470 return -ENOMEM; 1471 1472 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; 1473 1474 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 1475 WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) | 1476 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1477 cmd->req_type = type; 1478 cmd->pdev_id = ar->pdev->pdev_id; 1479 1480 ret = ath11k_wmi_cmd_send(wmi, skb, 1481 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); 1482 if (ret) { 1483 ath11k_warn(ar->ab, 1484 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); 1485 dev_kfree_skb(skb); 1486 } 1487 1488 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1489 "cmd pdev bss chan info request type %d\n", type); 1490 1491 return ret; 1492 } 1493 1494 int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr, 1495 struct ap_ps_params *param) 1496 { 1497 struct ath11k_pdev_wmi *wmi = ar->wmi; 1498 struct wmi_ap_ps_peer_cmd *cmd; 1499 struct sk_buff *skb; 1500 int ret; 1501 1502 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1503 if (!skb) 1504 return -ENOMEM; 1505 1506 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 1507 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) | 1508 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1509 1510 cmd->vdev_id = param->vdev_id; 1511 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1512 cmd->param = param->param; 1513 cmd->value = param->value; 1514 1515 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); 1516 if (ret) { 1517 ath11k_warn(ar->ab, 1518 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); 1519 dev_kfree_skb(skb); 1520 } 1521 1522 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1523 "cmd ap ps peer param vdev id %d peer %pM param %d value %d\n", 1524 param->vdev_id, peer_addr, param->param, param->value); 1525 1526 return ret; 1527 } 1528 1529 int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id, 1530 u32 param, u32 param_value) 1531 { 1532 struct ath11k_pdev_wmi *wmi = ar->wmi; 1533 struct wmi_sta_powersave_param_cmd *cmd; 1534 struct sk_buff *skb; 1535 int ret; 1536 1537 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1538 if (!skb) 1539 return -ENOMEM; 1540 1541 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 1542 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 1543 WMI_TAG_STA_POWERSAVE_PARAM_CMD) | 1544 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1545 1546 cmd->vdev_id = vdev_id; 1547 cmd->param = param; 1548 cmd->value = param_value; 1549 1550 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 1551 if (ret) { 1552 ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); 1553 dev_kfree_skb(skb); 1554 } 1555 1556 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1557 "cmd set powersave param vdev_id %d param %d value %d\n", 1558 vdev_id, param, param_value); 1559 1560 return ret; 1561 } 1562 1563 int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms) 1564 { 1565 struct ath11k_pdev_wmi *wmi = ar->wmi; 1566 struct wmi_force_fw_hang_cmd *cmd; 1567 struct sk_buff *skb; 1568 int ret, len; 1569 1570 len = sizeof(*cmd); 1571 1572 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 1573 if (!skb) 1574 return -ENOMEM; 1575 1576 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 1577 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) | 1578 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 1579 1580 cmd->type = type; 1581 cmd->delay_time_ms = delay_time_ms; 1582 1583 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); 1584 1585 if (ret) { 1586 ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); 1587 dev_kfree_skb(skb); 1588 } 1589 1590 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd force fw hang"); 1591 1592 return ret; 1593 } 1594 1595 int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id, 1596 u32 param_id, u32 param_value) 1597 { 1598 struct ath11k_pdev_wmi *wmi = ar->wmi; 1599 struct wmi_vdev_set_param_cmd *cmd; 1600 struct sk_buff *skb; 1601 int ret; 1602 1603 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1604 if (!skb) 1605 return -ENOMEM; 1606 1607 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 1608 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) | 1609 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1610 1611 cmd->vdev_id = vdev_id; 1612 cmd->param_id = param_id; 1613 cmd->param_value = param_value; 1614 1615 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); 1616 if (ret) { 1617 ath11k_warn(ar->ab, 1618 "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); 1619 dev_kfree_skb(skb); 1620 } 1621 1622 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1623 "cmd vdev set param vdev 0x%x param %d value %d\n", 1624 vdev_id, param_id, param_value); 1625 1626 return ret; 1627 } 1628 1629 int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar, 1630 struct stats_request_params *param) 1631 { 1632 struct ath11k_pdev_wmi *wmi = ar->wmi; 1633 struct wmi_request_stats_cmd *cmd; 1634 struct sk_buff *skb; 1635 int ret; 1636 1637 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1638 if (!skb) 1639 return -ENOMEM; 1640 1641 cmd = (struct wmi_request_stats_cmd *)skb->data; 1642 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) | 1643 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1644 1645 cmd->stats_id = param->stats_id; 1646 cmd->vdev_id = param->vdev_id; 1647 cmd->pdev_id = param->pdev_id; 1648 1649 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); 1650 if (ret) { 1651 ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); 1652 dev_kfree_skb(skb); 1653 } 1654 1655 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1656 "cmd request stats 0x%x vdev id %d pdev id %d\n", 1657 param->stats_id, param->vdev_id, param->pdev_id); 1658 1659 return ret; 1660 } 1661 1662 int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar) 1663 { 1664 struct ath11k_pdev_wmi *wmi = ar->wmi; 1665 struct wmi_get_pdev_temperature_cmd *cmd; 1666 struct sk_buff *skb; 1667 int ret; 1668 1669 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1670 if (!skb) 1671 return -ENOMEM; 1672 1673 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; 1674 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) | 1675 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1676 cmd->pdev_id = ar->pdev->pdev_id; 1677 1678 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); 1679 if (ret) { 1680 ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); 1681 dev_kfree_skb(skb); 1682 } 1683 1684 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1685 "cmd pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); 1686 1687 return ret; 1688 } 1689 1690 int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, 1691 u32 vdev_id, u32 bcn_ctrl_op) 1692 { 1693 struct ath11k_pdev_wmi *wmi = ar->wmi; 1694 struct wmi_bcn_offload_ctrl_cmd *cmd; 1695 struct sk_buff *skb; 1696 int ret; 1697 1698 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1699 if (!skb) 1700 return -ENOMEM; 1701 1702 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; 1703 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 1704 WMI_TAG_BCN_OFFLOAD_CTRL_CMD) | 1705 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1706 1707 cmd->vdev_id = vdev_id; 1708 cmd->bcn_ctrl_op = bcn_ctrl_op; 1709 1710 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); 1711 if (ret) { 1712 ath11k_warn(ar->ab, 1713 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); 1714 dev_kfree_skb(skb); 1715 } 1716 1717 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1718 "cmd bcn offload ctrl vdev id %d ctrl_op %d\n", 1719 vdev_id, bcn_ctrl_op); 1720 1721 return ret; 1722 } 1723 1724 int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id, 1725 const u8 *p2p_ie) 1726 { 1727 struct ath11k_pdev_wmi *wmi = ar->wmi; 1728 struct wmi_p2p_go_set_beacon_ie_cmd *cmd; 1729 size_t p2p_ie_len, aligned_len; 1730 struct wmi_tlv *tlv; 1731 struct sk_buff *skb; 1732 int ret, len; 1733 1734 p2p_ie_len = p2p_ie[1] + 2; 1735 aligned_len = roundup(p2p_ie_len, 4); 1736 1737 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 1738 1739 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 1740 if (!skb) 1741 return -ENOMEM; 1742 1743 cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data; 1744 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) | 1745 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1746 cmd->vdev_id = vdev_id; 1747 cmd->ie_buf_len = p2p_ie_len; 1748 1749 tlv = (struct wmi_tlv *)cmd->tlv; 1750 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 1751 FIELD_PREP(WMI_TLV_LEN, aligned_len); 1752 memcpy(tlv->value, p2p_ie, p2p_ie_len); 1753 1754 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); 1755 if (ret) { 1756 ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); 1757 dev_kfree_skb(skb); 1758 } 1759 1760 return ret; 1761 } 1762 1763 int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, 1764 struct ieee80211_mutable_offsets *offs, 1765 struct sk_buff *bcn, u32 ema_params) 1766 { 1767 struct ath11k_pdev_wmi *wmi = ar->wmi; 1768 struct wmi_bcn_tmpl_cmd *cmd; 1769 struct wmi_bcn_prb_info *bcn_prb_info; 1770 struct wmi_tlv *tlv; 1771 struct sk_buff *skb; 1772 void *ptr; 1773 int ret, len; 1774 size_t aligned_len = roundup(bcn->len, 4); 1775 struct ieee80211_vif *vif; 1776 struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id); 1777 1778 if (!arvif) { 1779 ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id); 1780 return -EINVAL; 1781 } 1782 1783 vif = arvif->vif; 1784 1785 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; 1786 1787 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 1788 if (!skb) 1789 return -ENOMEM; 1790 1791 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; 1792 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) | 1793 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1794 cmd->vdev_id = vdev_id; 1795 cmd->tim_ie_offset = offs->tim_offset; 1796 1797 if (vif->bss_conf.csa_active) { 1798 cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; 1799 cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; 1800 } 1801 1802 cmd->buf_len = bcn->len; 1803 cmd->mbssid_ie_offset = offs->mbssid_off; 1804 cmd->ema_params = ema_params; 1805 1806 ptr = skb->data + sizeof(*cmd); 1807 1808 bcn_prb_info = ptr; 1809 len = sizeof(*bcn_prb_info); 1810 bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, 1811 WMI_TAG_BCN_PRB_INFO) | 1812 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 1813 bcn_prb_info->caps = 0; 1814 bcn_prb_info->erp = 0; 1815 1816 ptr += sizeof(*bcn_prb_info); 1817 1818 tlv = ptr; 1819 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 1820 FIELD_PREP(WMI_TLV_LEN, aligned_len); 1821 memcpy(tlv->value, bcn->data, bcn->len); 1822 1823 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); 1824 if (ret) { 1825 ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 1826 dev_kfree_skb(skb); 1827 } 1828 1829 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn tmpl"); 1830 1831 return ret; 1832 } 1833 1834 int ath11k_wmi_vdev_install_key(struct ath11k *ar, 1835 struct wmi_vdev_install_key_arg *arg) 1836 { 1837 struct ath11k_pdev_wmi *wmi = ar->wmi; 1838 struct wmi_vdev_install_key_cmd *cmd; 1839 struct wmi_tlv *tlv; 1840 struct sk_buff *skb; 1841 int ret, len; 1842 int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t)); 1843 1844 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; 1845 1846 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 1847 if (!skb) 1848 return -ENOMEM; 1849 1850 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 1851 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) | 1852 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 1853 cmd->vdev_id = arg->vdev_id; 1854 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 1855 cmd->key_idx = arg->key_idx; 1856 cmd->key_flags = arg->key_flags; 1857 cmd->key_cipher = arg->key_cipher; 1858 cmd->key_len = arg->key_len; 1859 cmd->key_txmic_len = arg->key_txmic_len; 1860 cmd->key_rxmic_len = arg->key_rxmic_len; 1861 1862 if (arg->key_rsc_counter) 1863 memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter, 1864 sizeof(struct wmi_key_seq_counter)); 1865 1866 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 1867 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 1868 FIELD_PREP(WMI_TLV_LEN, key_len_aligned); 1869 if (arg->key_data) 1870 memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); 1871 1872 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); 1873 if (ret) { 1874 ath11k_warn(ar->ab, 1875 "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); 1876 dev_kfree_skb(skb); 1877 } 1878 1879 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 1880 "cmd vdev install key idx %d cipher %d len %d\n", 1881 arg->key_idx, arg->key_cipher, arg->key_len); 1882 1883 return ret; 1884 } 1885 1886 static inline void 1887 ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, 1888 struct peer_assoc_params *param, 1889 bool hw_crypto_disabled) 1890 { 1891 cmd->peer_flags = 0; 1892 1893 if (param->is_wme_set) { 1894 if (param->qos_flag) 1895 cmd->peer_flags |= WMI_PEER_QOS; 1896 if (param->apsd_flag) 1897 cmd->peer_flags |= WMI_PEER_APSD; 1898 if (param->ht_flag) 1899 cmd->peer_flags |= WMI_PEER_HT; 1900 if (param->bw_40) 1901 cmd->peer_flags |= WMI_PEER_40MHZ; 1902 if (param->bw_80) 1903 cmd->peer_flags |= WMI_PEER_80MHZ; 1904 if (param->bw_160) 1905 cmd->peer_flags |= WMI_PEER_160MHZ; 1906 1907 /* Typically if STBC is enabled for VHT it should be enabled 1908 * for HT as well 1909 **/ 1910 if (param->stbc_flag) 1911 cmd->peer_flags |= WMI_PEER_STBC; 1912 1913 /* Typically if LDPC is enabled for VHT it should be enabled 1914 * for HT as well 1915 **/ 1916 if (param->ldpc_flag) 1917 cmd->peer_flags |= WMI_PEER_LDPC; 1918 1919 if (param->static_mimops_flag) 1920 cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS; 1921 if (param->dynamic_mimops_flag) 1922 cmd->peer_flags |= WMI_PEER_DYN_MIMOPS; 1923 if (param->spatial_mux_flag) 1924 cmd->peer_flags |= WMI_PEER_SPATIAL_MUX; 1925 if (param->vht_flag) 1926 cmd->peer_flags |= WMI_PEER_VHT; 1927 if (param->he_flag) 1928 cmd->peer_flags |= WMI_PEER_HE; 1929 if (param->twt_requester) 1930 cmd->peer_flags |= WMI_PEER_TWT_REQ; 1931 if (param->twt_responder) 1932 cmd->peer_flags |= WMI_PEER_TWT_RESP; 1933 } 1934 1935 /* Suppress authorization for all AUTH modes that need 4-way handshake 1936 * (during re-association). 1937 * Authorization will be done for these modes on key installation. 1938 */ 1939 if (param->auth_flag) 1940 cmd->peer_flags |= WMI_PEER_AUTH; 1941 if (param->need_ptk_4_way) { 1942 cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; 1943 if (!hw_crypto_disabled && param->is_assoc) 1944 cmd->peer_flags &= ~WMI_PEER_AUTH; 1945 } 1946 if (param->need_gtk_2_way) 1947 cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; 1948 /* safe mode bypass the 4-way handshake */ 1949 if (param->safe_mode_enabled) 1950 cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY | 1951 WMI_PEER_NEED_GTK_2_WAY); 1952 1953 if (param->is_pmf_enabled) 1954 cmd->peer_flags |= WMI_PEER_PMF; 1955 1956 /* Disable AMSDU for station transmit, if user configures it */ 1957 /* Disable AMSDU for AP transmit to 11n Stations, if user configures 1958 * it 1959 * if (param->amsdu_disable) Add after FW support 1960 **/ 1961 1962 /* Target asserts if node is marked HT and all MCS is set to 0. 1963 * Mark the node as non-HT if all the mcs rates are disabled through 1964 * iwpriv 1965 **/ 1966 if (param->peer_ht_rates.num_rates == 0) 1967 cmd->peer_flags &= ~WMI_PEER_HT; 1968 } 1969 1970 int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, 1971 struct peer_assoc_params *param) 1972 { 1973 struct ath11k_pdev_wmi *wmi = ar->wmi; 1974 struct wmi_peer_assoc_complete_cmd *cmd; 1975 struct wmi_vht_rate_set *mcs; 1976 struct wmi_he_rate_set *he_mcs; 1977 struct sk_buff *skb; 1978 struct wmi_tlv *tlv; 1979 void *ptr; 1980 u32 peer_legacy_rates_align; 1981 u32 peer_ht_rates_align; 1982 int i, ret, len; 1983 1984 peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates, 1985 sizeof(u32)); 1986 peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates, 1987 sizeof(u32)); 1988 1989 len = sizeof(*cmd) + 1990 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + 1991 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + 1992 sizeof(*mcs) + TLV_HDR_SIZE + 1993 (sizeof(*he_mcs) * param->peer_he_mcs_count); 1994 1995 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 1996 if (!skb) 1997 return -ENOMEM; 1998 1999 ptr = skb->data; 2000 2001 cmd = ptr; 2002 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 2003 WMI_TAG_PEER_ASSOC_COMPLETE_CMD) | 2004 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2005 2006 cmd->vdev_id = param->vdev_id; 2007 2008 cmd->peer_new_assoc = param->peer_new_assoc; 2009 cmd->peer_associd = param->peer_associd; 2010 2011 ath11k_wmi_copy_peer_flags(cmd, param, 2012 test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, 2013 &ar->ab->dev_flags)); 2014 2015 ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac); 2016 2017 cmd->peer_rate_caps = param->peer_rate_caps; 2018 cmd->peer_caps = param->peer_caps; 2019 cmd->peer_listen_intval = param->peer_listen_intval; 2020 cmd->peer_ht_caps = param->peer_ht_caps; 2021 cmd->peer_max_mpdu = param->peer_max_mpdu; 2022 cmd->peer_mpdu_density = param->peer_mpdu_density; 2023 cmd->peer_vht_caps = param->peer_vht_caps; 2024 cmd->peer_phymode = param->peer_phymode; 2025 2026 /* Update 11ax capabilities */ 2027 cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0]; 2028 cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1]; 2029 cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal; 2030 cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz; 2031 cmd->peer_he_ops = param->peer_he_ops; 2032 memcpy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo, 2033 sizeof(param->peer_he_cap_phyinfo)); 2034 memcpy(&cmd->peer_ppet, ¶m->peer_ppet, 2035 sizeof(param->peer_ppet)); 2036 2037 /* Update peer legacy rate information */ 2038 ptr += sizeof(*cmd); 2039 2040 tlv = ptr; 2041 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 2042 FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align); 2043 2044 ptr += TLV_HDR_SIZE; 2045 2046 cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates; 2047 memcpy(ptr, param->peer_legacy_rates.rates, 2048 param->peer_legacy_rates.num_rates); 2049 2050 /* Update peer HT rate information */ 2051 ptr += peer_legacy_rates_align; 2052 2053 tlv = ptr; 2054 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 2055 FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align); 2056 ptr += TLV_HDR_SIZE; 2057 cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates; 2058 memcpy(ptr, param->peer_ht_rates.rates, 2059 param->peer_ht_rates.num_rates); 2060 2061 /* VHT Rates */ 2062 ptr += peer_ht_rates_align; 2063 2064 mcs = ptr; 2065 2066 mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) | 2067 FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE); 2068 2069 cmd->peer_nss = param->peer_nss; 2070 2071 /* Update bandwidth-NSS mapping */ 2072 cmd->peer_bw_rxnss_override = 0; 2073 cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; 2074 2075 if (param->vht_capable) { 2076 /* firmware interprets mcs->tx_mcs_set field as peer's 2077 * RX capability 2078 */ 2079 mcs->tx_max_rate = param->rx_max_rate; 2080 mcs->tx_mcs_set = param->rx_mcs_set; 2081 mcs->rx_max_rate = param->tx_max_rate; 2082 mcs->rx_mcs_set = param->tx_mcs_set; 2083 } 2084 2085 /* HE Rates */ 2086 cmd->peer_he_mcs = param->peer_he_mcs_count; 2087 cmd->min_data_rate = param->min_data_rate; 2088 2089 ptr += sizeof(*mcs); 2090 2091 len = param->peer_he_mcs_count * sizeof(*he_mcs); 2092 2093 tlv = ptr; 2094 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 2095 FIELD_PREP(WMI_TLV_LEN, len); 2096 ptr += TLV_HDR_SIZE; 2097 2098 /* Loop through the HE rate set */ 2099 for (i = 0; i < param->peer_he_mcs_count; i++) { 2100 he_mcs = ptr; 2101 he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, 2102 WMI_TAG_HE_RATE_SET) | 2103 FIELD_PREP(WMI_TLV_LEN, 2104 sizeof(*he_mcs) - TLV_HDR_SIZE); 2105 2106 /* firmware interprets mcs->rx_mcs_set field as peer's 2107 * RX capability 2108 */ 2109 he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i]; 2110 he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i]; 2111 ptr += sizeof(*he_mcs); 2112 } 2113 2114 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); 2115 if (ret) { 2116 ath11k_warn(ar->ab, 2117 "failed to send WMI_PEER_ASSOC_CMDID\n"); 2118 dev_kfree_skb(skb); 2119 } 2120 2121 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2122 "cmd peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", 2123 cmd->vdev_id, cmd->peer_associd, param->peer_mac, 2124 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, 2125 cmd->peer_listen_intval, cmd->peer_ht_caps, 2126 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, 2127 cmd->peer_mpdu_density, 2128 cmd->peer_vht_caps, cmd->peer_he_cap_info, 2129 cmd->peer_he_ops, cmd->peer_he_cap_info_ext, 2130 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], 2131 cmd->peer_he_cap_phy[2], 2132 cmd->peer_bw_rxnss_override); 2133 2134 return ret; 2135 } 2136 2137 void ath11k_wmi_start_scan_init(struct ath11k *ar, 2138 struct scan_req_params *arg) 2139 { 2140 /* setup commonly used values */ 2141 arg->scan_req_id = 1; 2142 if (ar->state_11d == ATH11K_11D_PREPARING) 2143 arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; 2144 else 2145 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2146 arg->dwell_time_active = 50; 2147 arg->dwell_time_active_2g = 0; 2148 arg->dwell_time_passive = 150; 2149 arg->dwell_time_active_6g = 40; 2150 arg->dwell_time_passive_6g = 30; 2151 arg->min_rest_time = 50; 2152 arg->max_rest_time = 500; 2153 arg->repeat_probe_time = 0; 2154 arg->probe_spacing_time = 0; 2155 arg->idle_time = 0; 2156 arg->max_scan_time = 20000; 2157 arg->probe_delay = 5; 2158 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | 2159 WMI_SCAN_EVENT_COMPLETED | 2160 WMI_SCAN_EVENT_BSS_CHANNEL | 2161 WMI_SCAN_EVENT_FOREIGN_CHAN | 2162 WMI_SCAN_EVENT_DEQUEUED; 2163 arg->scan_f_chan_stat_evnt = 1; 2164 2165 if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE, 2166 ar->ab->wmi_ab.svc_map)) 2167 arg->scan_ctrl_flags_ext |= 2168 WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE; 2169 2170 arg->num_bssid = 1; 2171 2172 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be 2173 * ZEROs in probe request 2174 */ 2175 eth_broadcast_addr(arg->bssid_list[0].addr); 2176 } 2177 2178 static inline void 2179 ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, 2180 struct scan_req_params *param) 2181 { 2182 /* Scan events subscription */ 2183 if (param->scan_ev_started) 2184 cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED; 2185 if (param->scan_ev_completed) 2186 cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED; 2187 if (param->scan_ev_bss_chan) 2188 cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL; 2189 if (param->scan_ev_foreign_chan) 2190 cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN; 2191 if (param->scan_ev_dequeued) 2192 cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED; 2193 if (param->scan_ev_preempted) 2194 cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED; 2195 if (param->scan_ev_start_failed) 2196 cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED; 2197 if (param->scan_ev_restarted) 2198 cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED; 2199 if (param->scan_ev_foreign_chn_exit) 2200 cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT; 2201 if (param->scan_ev_suspended) 2202 cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED; 2203 if (param->scan_ev_resumed) 2204 cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED; 2205 2206 /** Set scan control flags */ 2207 cmd->scan_ctrl_flags = 0; 2208 if (param->scan_f_passive) 2209 cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 2210 if (param->scan_f_strict_passive_pch) 2211 cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN; 2212 if (param->scan_f_promisc_mode) 2213 cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS; 2214 if (param->scan_f_capture_phy_err) 2215 cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR; 2216 if (param->scan_f_half_rate) 2217 cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT; 2218 if (param->scan_f_quarter_rate) 2219 cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT; 2220 if (param->scan_f_cck_rates) 2221 cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; 2222 if (param->scan_f_ofdm_rates) 2223 cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; 2224 if (param->scan_f_chan_stat_evnt) 2225 cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; 2226 if (param->scan_f_filter_prb_req) 2227 cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 2228 if (param->scan_f_bcast_probe) 2229 cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ; 2230 if (param->scan_f_offchan_mgmt_tx) 2231 cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX; 2232 if (param->scan_f_offchan_data_tx) 2233 cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX; 2234 if (param->scan_f_force_active_dfs_chn) 2235 cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS; 2236 if (param->scan_f_add_tpc_ie_in_probe) 2237 cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ; 2238 if (param->scan_f_add_ds_ie_in_probe) 2239 cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ; 2240 if (param->scan_f_add_spoofed_mac_in_probe) 2241 cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ; 2242 if (param->scan_f_add_rand_seq_in_probe) 2243 cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ; 2244 if (param->scan_f_en_ie_whitelist_in_probe) 2245 cmd->scan_ctrl_flags |= 2246 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ; 2247 2248 /* for adaptive scan mode using 3 bits (21 - 23 bits) */ 2249 WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, 2250 param->adaptive_dwell_time_mode); 2251 2252 cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext; 2253 } 2254 2255 int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, 2256 struct scan_req_params *params) 2257 { 2258 struct ath11k_pdev_wmi *wmi = ar->wmi; 2259 struct wmi_start_scan_cmd *cmd; 2260 struct wmi_ssid *ssid = NULL; 2261 struct wmi_mac_addr *bssid; 2262 struct sk_buff *skb; 2263 struct wmi_tlv *tlv; 2264 void *ptr; 2265 int i, ret, len; 2266 u32 *tmp_ptr; 2267 u16 extraie_len_with_pad = 0; 2268 struct hint_short_ssid *s_ssid = NULL; 2269 struct hint_bssid *hint_bssid = NULL; 2270 2271 len = sizeof(*cmd); 2272 2273 len += TLV_HDR_SIZE; 2274 if (params->num_chan) 2275 len += params->num_chan * sizeof(u32); 2276 2277 len += TLV_HDR_SIZE; 2278 if (params->num_ssids) 2279 len += params->num_ssids * sizeof(*ssid); 2280 2281 len += TLV_HDR_SIZE; 2282 if (params->num_bssid) 2283 len += sizeof(*bssid) * params->num_bssid; 2284 2285 len += TLV_HDR_SIZE; 2286 if (params->extraie.len && params->extraie.len <= 0xFFFF) 2287 extraie_len_with_pad = 2288 roundup(params->extraie.len, sizeof(u32)); 2289 len += extraie_len_with_pad; 2290 2291 if (params->num_hint_bssid) 2292 len += TLV_HDR_SIZE + 2293 params->num_hint_bssid * sizeof(struct hint_bssid); 2294 2295 if (params->num_hint_s_ssid) 2296 len += TLV_HDR_SIZE + 2297 params->num_hint_s_ssid * sizeof(struct hint_short_ssid); 2298 2299 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 2300 if (!skb) 2301 return -ENOMEM; 2302 2303 ptr = skb->data; 2304 2305 cmd = ptr; 2306 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) | 2307 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2308 2309 cmd->scan_id = params->scan_id; 2310 cmd->scan_req_id = params->scan_req_id; 2311 cmd->vdev_id = params->vdev_id; 2312 cmd->scan_priority = params->scan_priority; 2313 cmd->notify_scan_events = params->notify_scan_events; 2314 2315 ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params); 2316 2317 cmd->dwell_time_active = params->dwell_time_active; 2318 cmd->dwell_time_active_2g = params->dwell_time_active_2g; 2319 cmd->dwell_time_passive = params->dwell_time_passive; 2320 cmd->dwell_time_active_6g = params->dwell_time_active_6g; 2321 cmd->dwell_time_passive_6g = params->dwell_time_passive_6g; 2322 cmd->min_rest_time = params->min_rest_time; 2323 cmd->max_rest_time = params->max_rest_time; 2324 cmd->repeat_probe_time = params->repeat_probe_time; 2325 cmd->probe_spacing_time = params->probe_spacing_time; 2326 cmd->idle_time = params->idle_time; 2327 cmd->max_scan_time = params->max_scan_time; 2328 cmd->probe_delay = params->probe_delay; 2329 cmd->burst_duration = params->burst_duration; 2330 cmd->num_chan = params->num_chan; 2331 cmd->num_bssid = params->num_bssid; 2332 cmd->num_ssids = params->num_ssids; 2333 cmd->ie_len = params->extraie.len; 2334 cmd->n_probes = params->n_probes; 2335 ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr); 2336 ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr); 2337 2338 ptr += sizeof(*cmd); 2339 2340 len = params->num_chan * sizeof(u32); 2341 2342 tlv = ptr; 2343 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | 2344 FIELD_PREP(WMI_TLV_LEN, len); 2345 ptr += TLV_HDR_SIZE; 2346 tmp_ptr = ptr; 2347 2348 for (i = 0; i < params->num_chan; ++i) 2349 tmp_ptr[i] = params->chan_list[i]; 2350 2351 ptr += len; 2352 2353 len = params->num_ssids * sizeof(*ssid); 2354 tlv = ptr; 2355 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | 2356 FIELD_PREP(WMI_TLV_LEN, len); 2357 2358 ptr += TLV_HDR_SIZE; 2359 2360 if (params->num_ssids) { 2361 ssid = ptr; 2362 for (i = 0; i < params->num_ssids; ++i) { 2363 ssid->ssid_len = params->ssid[i].length; 2364 memcpy(ssid->ssid, params->ssid[i].ssid, 2365 params->ssid[i].length); 2366 ssid++; 2367 } 2368 } 2369 2370 ptr += (params->num_ssids * sizeof(*ssid)); 2371 len = params->num_bssid * sizeof(*bssid); 2372 tlv = ptr; 2373 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | 2374 FIELD_PREP(WMI_TLV_LEN, len); 2375 2376 ptr += TLV_HDR_SIZE; 2377 bssid = ptr; 2378 2379 if (params->num_bssid) { 2380 for (i = 0; i < params->num_bssid; ++i) { 2381 ether_addr_copy(bssid->addr, 2382 params->bssid_list[i].addr); 2383 bssid++; 2384 } 2385 } 2386 2387 ptr += params->num_bssid * sizeof(*bssid); 2388 2389 len = extraie_len_with_pad; 2390 tlv = ptr; 2391 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 2392 FIELD_PREP(WMI_TLV_LEN, len); 2393 ptr += TLV_HDR_SIZE; 2394 2395 if (extraie_len_with_pad) 2396 memcpy(ptr, params->extraie.ptr, 2397 params->extraie.len); 2398 2399 ptr += extraie_len_with_pad; 2400 2401 if (params->num_hint_s_ssid) { 2402 len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid); 2403 tlv = ptr; 2404 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | 2405 FIELD_PREP(WMI_TLV_LEN, len); 2406 ptr += TLV_HDR_SIZE; 2407 s_ssid = ptr; 2408 for (i = 0; i < params->num_hint_s_ssid; ++i) { 2409 s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; 2410 s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; 2411 s_ssid++; 2412 } 2413 ptr += len; 2414 } 2415 2416 if (params->num_hint_bssid) { 2417 len = params->num_hint_bssid * sizeof(struct hint_bssid); 2418 tlv = ptr; 2419 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | 2420 FIELD_PREP(WMI_TLV_LEN, len); 2421 ptr += TLV_HDR_SIZE; 2422 hint_bssid = ptr; 2423 for (i = 0; i < params->num_hint_bssid; ++i) { 2424 hint_bssid->freq_flags = 2425 params->hint_bssid[i].freq_flags; 2426 ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0], 2427 &hint_bssid->bssid.addr[0]); 2428 hint_bssid++; 2429 } 2430 } 2431 2432 ret = ath11k_wmi_cmd_send(wmi, skb, 2433 WMI_START_SCAN_CMDID); 2434 if (ret) { 2435 ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); 2436 dev_kfree_skb(skb); 2437 } 2438 2439 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd start scan"); 2440 2441 return ret; 2442 } 2443 2444 int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar, 2445 u32 vdev_id, 2446 struct ath11k_reg_tpc_power_info *param) 2447 { 2448 struct ath11k_pdev_wmi *wmi = ar->wmi; 2449 struct wmi_vdev_set_tpc_power_cmd *cmd; 2450 struct wmi_vdev_ch_power_info *ch; 2451 struct sk_buff *skb; 2452 struct wmi_tlv *tlv; 2453 u8 *ptr; 2454 int i, ret, len, array_len; 2455 2456 array_len = sizeof(*ch) * param->num_pwr_levels; 2457 len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; 2458 2459 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 2460 if (!skb) 2461 return -ENOMEM; 2462 2463 ptr = skb->data; 2464 2465 cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; 2466 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) | 2467 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2468 cmd->vdev_id = vdev_id; 2469 cmd->psd_power = param->is_psd_power; 2470 cmd->eirp_power = param->eirp_power; 2471 cmd->power_type_6ghz = param->ap_power_type; 2472 2473 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2474 "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", 2475 vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); 2476 2477 ptr += sizeof(*cmd); 2478 tlv = (struct wmi_tlv *)ptr; 2479 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 2480 FIELD_PREP(WMI_TLV_LEN, array_len); 2481 2482 ptr += TLV_HDR_SIZE; 2483 ch = (struct wmi_vdev_ch_power_info *)ptr; 2484 2485 for (i = 0; i < param->num_pwr_levels; i++, ch++) { 2486 ch->tlv_header = FIELD_PREP(WMI_TLV_TAG, 2487 WMI_TAG_VDEV_CH_POWER_INFO) | 2488 FIELD_PREP(WMI_TLV_LEN, 2489 sizeof(*ch) - TLV_HDR_SIZE); 2490 2491 ch->chan_cfreq = param->chan_power_info[i].chan_cfreq; 2492 ch->tx_power = param->chan_power_info[i].tx_power; 2493 2494 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n", 2495 ch->chan_cfreq, ch->tx_power); 2496 } 2497 2498 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); 2499 if (ret) { 2500 ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); 2501 dev_kfree_skb(skb); 2502 return ret; 2503 } 2504 2505 return 0; 2506 } 2507 2508 int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, 2509 struct scan_cancel_param *param) 2510 { 2511 struct ath11k_pdev_wmi *wmi = ar->wmi; 2512 struct wmi_stop_scan_cmd *cmd; 2513 struct sk_buff *skb; 2514 int ret; 2515 2516 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2517 if (!skb) 2518 return -ENOMEM; 2519 2520 cmd = (struct wmi_stop_scan_cmd *)skb->data; 2521 2522 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) | 2523 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2524 2525 cmd->vdev_id = param->vdev_id; 2526 cmd->requestor = param->requester; 2527 cmd->scan_id = param->scan_id; 2528 cmd->pdev_id = param->pdev_id; 2529 /* stop the scan with the corresponding scan_id */ 2530 if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { 2531 /* Cancelling all scans */ 2532 cmd->req_type = WMI_SCAN_STOP_ALL; 2533 } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { 2534 /* Cancelling VAP scans */ 2535 cmd->req_type = WMI_SCN_STOP_VAP_ALL; 2536 } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) { 2537 /* Cancelling specific scan */ 2538 cmd->req_type = WMI_SCAN_STOP_ONE; 2539 } else { 2540 ath11k_warn(ar->ab, "invalid scan cancel param %d", 2541 param->req_type); 2542 dev_kfree_skb(skb); 2543 return -EINVAL; 2544 } 2545 2546 ret = ath11k_wmi_cmd_send(wmi, skb, 2547 WMI_STOP_SCAN_CMDID); 2548 if (ret) { 2549 ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); 2550 dev_kfree_skb(skb); 2551 } 2552 2553 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd stop scan"); 2554 2555 return ret; 2556 } 2557 2558 int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, 2559 struct scan_chan_list_params *chan_list) 2560 { 2561 struct ath11k_pdev_wmi *wmi = ar->wmi; 2562 struct wmi_scan_chan_list_cmd *cmd; 2563 struct sk_buff *skb; 2564 struct wmi_channel *chan_info; 2565 struct channel_param *tchan_info; 2566 struct wmi_tlv *tlv; 2567 void *ptr; 2568 int i, ret, len; 2569 u16 num_send_chans, num_sends = 0, max_chan_limit = 0; 2570 u32 *reg1, *reg2; 2571 2572 tchan_info = chan_list->ch_param; 2573 while (chan_list->nallchans) { 2574 len = sizeof(*cmd) + TLV_HDR_SIZE; 2575 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / 2576 sizeof(*chan_info); 2577 2578 if (chan_list->nallchans > max_chan_limit) 2579 num_send_chans = max_chan_limit; 2580 else 2581 num_send_chans = chan_list->nallchans; 2582 2583 chan_list->nallchans -= num_send_chans; 2584 len += sizeof(*chan_info) * num_send_chans; 2585 2586 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 2587 if (!skb) 2588 return -ENOMEM; 2589 2590 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 2591 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) | 2592 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2593 cmd->pdev_id = chan_list->pdev_id; 2594 cmd->num_scan_chans = num_send_chans; 2595 if (num_sends) 2596 cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG; 2597 2598 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2599 "no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", 2600 num_send_chans, len, cmd->pdev_id, num_sends); 2601 2602 ptr = skb->data + sizeof(*cmd); 2603 2604 len = sizeof(*chan_info) * num_send_chans; 2605 tlv = ptr; 2606 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 2607 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 2608 ptr += TLV_HDR_SIZE; 2609 2610 for (i = 0; i < num_send_chans; ++i) { 2611 chan_info = ptr; 2612 memset(chan_info, 0, sizeof(*chan_info)); 2613 len = sizeof(*chan_info); 2614 chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, 2615 WMI_TAG_CHANNEL) | 2616 FIELD_PREP(WMI_TLV_LEN, 2617 len - TLV_HDR_SIZE); 2618 2619 reg1 = &chan_info->reg_info_1; 2620 reg2 = &chan_info->reg_info_2; 2621 chan_info->mhz = tchan_info->mhz; 2622 chan_info->band_center_freq1 = tchan_info->cfreq1; 2623 chan_info->band_center_freq2 = tchan_info->cfreq2; 2624 2625 if (tchan_info->is_chan_passive) 2626 chan_info->info |= WMI_CHAN_INFO_PASSIVE; 2627 if (tchan_info->allow_he) 2628 chan_info->info |= WMI_CHAN_INFO_ALLOW_HE; 2629 else if (tchan_info->allow_vht) 2630 chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT; 2631 else if (tchan_info->allow_ht) 2632 chan_info->info |= WMI_CHAN_INFO_ALLOW_HT; 2633 if (tchan_info->half_rate) 2634 chan_info->info |= WMI_CHAN_INFO_HALF_RATE; 2635 if (tchan_info->quarter_rate) 2636 chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE; 2637 if (tchan_info->psc_channel) 2638 chan_info->info |= WMI_CHAN_INFO_PSC; 2639 if (tchan_info->dfs_set) 2640 chan_info->info |= WMI_CHAN_INFO_DFS; 2641 2642 chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, 2643 tchan_info->phy_mode); 2644 *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR, 2645 tchan_info->minpower); 2646 *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, 2647 tchan_info->maxpower); 2648 *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, 2649 tchan_info->maxregpower); 2650 *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS, 2651 tchan_info->reg_class_id); 2652 *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, 2653 tchan_info->antennamax); 2654 *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, 2655 tchan_info->maxregpower); 2656 2657 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2658 "chan scan list chan[%d] = %u, chan_info->info %8x\n", 2659 i, chan_info->mhz, chan_info->info); 2660 2661 ptr += sizeof(*chan_info); 2662 2663 tchan_info++; 2664 } 2665 2666 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); 2667 if (ret) { 2668 ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); 2669 dev_kfree_skb(skb); 2670 return ret; 2671 } 2672 2673 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd scan chan list channels %d", 2674 num_send_chans); 2675 2676 num_sends++; 2677 } 2678 2679 return 0; 2680 } 2681 2682 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, 2683 struct wmi_wmm_params_all_arg *param, 2684 enum wmi_wmm_params_type wmm_param_type) 2685 { 2686 struct ath11k_pdev_wmi *wmi = ar->wmi; 2687 struct wmi_vdev_set_wmm_params_cmd *cmd; 2688 struct wmi_wmm_params *wmm_param; 2689 struct wmi_wmm_params_arg *wmi_wmm_arg; 2690 struct sk_buff *skb; 2691 int ret, ac; 2692 2693 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2694 if (!skb) 2695 return -ENOMEM; 2696 2697 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; 2698 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 2699 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) | 2700 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2701 2702 cmd->vdev_id = vdev_id; 2703 cmd->wmm_param_type = wmm_param_type; 2704 2705 for (ac = 0; ac < WME_NUM_AC; ac++) { 2706 switch (ac) { 2707 case WME_AC_BE: 2708 wmi_wmm_arg = ¶m->ac_be; 2709 break; 2710 case WME_AC_BK: 2711 wmi_wmm_arg = ¶m->ac_bk; 2712 break; 2713 case WME_AC_VI: 2714 wmi_wmm_arg = ¶m->ac_vi; 2715 break; 2716 case WME_AC_VO: 2717 wmi_wmm_arg = ¶m->ac_vo; 2718 break; 2719 } 2720 2721 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; 2722 wmm_param->tlv_header = 2723 FIELD_PREP(WMI_TLV_TAG, 2724 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) | 2725 FIELD_PREP(WMI_TLV_LEN, 2726 sizeof(*wmm_param) - TLV_HDR_SIZE); 2727 2728 wmm_param->aifs = wmi_wmm_arg->aifs; 2729 wmm_param->cwmin = wmi_wmm_arg->cwmin; 2730 wmm_param->cwmax = wmi_wmm_arg->cwmax; 2731 wmm_param->txoplimit = wmi_wmm_arg->txop; 2732 wmm_param->acm = wmi_wmm_arg->acm; 2733 wmm_param->no_ack = wmi_wmm_arg->no_ack; 2734 2735 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2736 "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 2737 wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin, 2738 wmm_param->cwmax, wmm_param->txoplimit, 2739 wmm_param->acm, wmm_param->no_ack); 2740 } 2741 ret = ath11k_wmi_cmd_send(wmi, skb, 2742 WMI_VDEV_SET_WMM_PARAMS_CMDID); 2743 if (ret) { 2744 ath11k_warn(ar->ab, 2745 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); 2746 dev_kfree_skb(skb); 2747 } 2748 2749 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set wmm params"); 2750 2751 return ret; 2752 } 2753 2754 int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar, 2755 u32 pdev_id) 2756 { 2757 struct ath11k_pdev_wmi *wmi = ar->wmi; 2758 struct wmi_dfs_phyerr_offload_cmd *cmd; 2759 struct sk_buff *skb; 2760 int ret; 2761 2762 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2763 if (!skb) 2764 return -ENOMEM; 2765 2766 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; 2767 cmd->tlv_header = 2768 FIELD_PREP(WMI_TLV_TAG, 2769 WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) | 2770 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2771 2772 cmd->pdev_id = pdev_id; 2773 2774 ret = ath11k_wmi_cmd_send(wmi, skb, 2775 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); 2776 if (ret) { 2777 ath11k_warn(ar->ab, 2778 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); 2779 dev_kfree_skb(skb); 2780 } 2781 2782 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2783 "cmd pdev dfs phyerr offload enable pdev id %d\n", pdev_id); 2784 2785 return ret; 2786 } 2787 2788 int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, 2789 u32 tid, u32 initiator, u32 reason) 2790 { 2791 struct ath11k_pdev_wmi *wmi = ar->wmi; 2792 struct wmi_delba_send_cmd *cmd; 2793 struct sk_buff *skb; 2794 int ret; 2795 2796 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2797 if (!skb) 2798 return -ENOMEM; 2799 2800 cmd = (struct wmi_delba_send_cmd *)skb->data; 2801 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) | 2802 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2803 cmd->vdev_id = vdev_id; 2804 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2805 cmd->tid = tid; 2806 cmd->initiator = initiator; 2807 cmd->reasoncode = reason; 2808 2809 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); 2810 2811 if (ret) { 2812 ath11k_warn(ar->ab, 2813 "failed to send WMI_DELBA_SEND_CMDID cmd\n"); 2814 dev_kfree_skb(skb); 2815 } 2816 2817 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2818 "cmd delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", 2819 vdev_id, mac, tid, initiator, reason); 2820 2821 return ret; 2822 } 2823 2824 int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac, 2825 u32 tid, u32 status) 2826 { 2827 struct ath11k_pdev_wmi *wmi = ar->wmi; 2828 struct wmi_addba_setresponse_cmd *cmd; 2829 struct sk_buff *skb; 2830 int ret; 2831 2832 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2833 if (!skb) 2834 return -ENOMEM; 2835 2836 cmd = (struct wmi_addba_setresponse_cmd *)skb->data; 2837 cmd->tlv_header = 2838 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) | 2839 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2840 cmd->vdev_id = vdev_id; 2841 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2842 cmd->tid = tid; 2843 cmd->statuscode = status; 2844 2845 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); 2846 2847 if (ret) { 2848 ath11k_warn(ar->ab, 2849 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); 2850 dev_kfree_skb(skb); 2851 } 2852 2853 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2854 "cmd addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", 2855 vdev_id, mac, tid, status); 2856 2857 return ret; 2858 } 2859 2860 int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, 2861 u32 tid, u32 buf_size) 2862 { 2863 struct ath11k_pdev_wmi *wmi = ar->wmi; 2864 struct wmi_addba_send_cmd *cmd; 2865 struct sk_buff *skb; 2866 int ret; 2867 2868 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2869 if (!skb) 2870 return -ENOMEM; 2871 2872 cmd = (struct wmi_addba_send_cmd *)skb->data; 2873 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) | 2874 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2875 cmd->vdev_id = vdev_id; 2876 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2877 cmd->tid = tid; 2878 cmd->buffersize = buf_size; 2879 2880 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); 2881 2882 if (ret) { 2883 ath11k_warn(ar->ab, 2884 "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); 2885 dev_kfree_skb(skb); 2886 } 2887 2888 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2889 "cmd addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", 2890 vdev_id, mac, tid, buf_size); 2891 2892 return ret; 2893 } 2894 2895 int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac) 2896 { 2897 struct ath11k_pdev_wmi *wmi = ar->wmi; 2898 struct wmi_addba_clear_resp_cmd *cmd; 2899 struct sk_buff *skb; 2900 int ret; 2901 2902 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2903 if (!skb) 2904 return -ENOMEM; 2905 2906 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; 2907 cmd->tlv_header = 2908 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) | 2909 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2910 cmd->vdev_id = vdev_id; 2911 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2912 2913 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); 2914 2915 if (ret) { 2916 ath11k_warn(ar->ab, 2917 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); 2918 dev_kfree_skb(skb); 2919 } 2920 2921 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2922 "cmd addba clear resp vdev_id 0x%X mac_addr %pM\n", 2923 vdev_id, mac); 2924 2925 return ret; 2926 } 2927 2928 int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable) 2929 { 2930 struct ath11k_pdev_wmi *wmi = ar->wmi; 2931 struct wmi_pdev_pktlog_filter_cmd *cmd; 2932 struct wmi_pdev_pktlog_filter_info *info; 2933 struct sk_buff *skb; 2934 struct wmi_tlv *tlv; 2935 void *ptr; 2936 int ret, len; 2937 2938 len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE; 2939 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 2940 if (!skb) 2941 return -ENOMEM; 2942 2943 cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data; 2944 2945 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) | 2946 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2947 2948 cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); 2949 cmd->num_mac = 1; 2950 cmd->enable = enable; 2951 2952 ptr = skb->data + sizeof(*cmd); 2953 2954 tlv = ptr; 2955 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 2956 FIELD_PREP(WMI_TLV_LEN, sizeof(*info)); 2957 2958 ptr += TLV_HDR_SIZE; 2959 info = ptr; 2960 2961 ether_addr_copy(info->peer_macaddr.addr, addr); 2962 info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) | 2963 FIELD_PREP(WMI_TLV_LEN, 2964 sizeof(*info) - TLV_HDR_SIZE); 2965 2966 ret = ath11k_wmi_cmd_send(wmi, skb, 2967 WMI_PDEV_PKTLOG_FILTER_CMDID); 2968 if (ret) { 2969 ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); 2970 dev_kfree_skb(skb); 2971 } 2972 2973 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog filter"); 2974 2975 return ret; 2976 } 2977 2978 int 2979 ath11k_wmi_send_init_country_cmd(struct ath11k *ar, 2980 struct wmi_init_country_params init_cc_params) 2981 { 2982 struct ath11k_pdev_wmi *wmi = ar->wmi; 2983 struct wmi_init_country_cmd *cmd; 2984 struct sk_buff *skb; 2985 int ret; 2986 2987 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2988 if (!skb) 2989 return -ENOMEM; 2990 2991 cmd = (struct wmi_init_country_cmd *)skb->data; 2992 cmd->tlv_header = 2993 FIELD_PREP(WMI_TLV_TAG, 2994 WMI_TAG_SET_INIT_COUNTRY_CMD) | 2995 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2996 2997 cmd->pdev_id = ar->pdev->pdev_id; 2998 2999 switch (init_cc_params.flags) { 3000 case ALPHA_IS_SET: 3001 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; 3002 memcpy((u8 *)&cmd->cc_info.alpha2, 3003 init_cc_params.cc_info.alpha2, 3); 3004 break; 3005 case CC_IS_SET: 3006 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE; 3007 cmd->cc_info.country_code = init_cc_params.cc_info.country_code; 3008 break; 3009 case REGDMN_IS_SET: 3010 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN; 3011 cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id; 3012 break; 3013 default: 3014 ath11k_warn(ar->ab, "unknown cc params flags: 0x%x", 3015 init_cc_params.flags); 3016 ret = -EINVAL; 3017 goto err; 3018 } 3019 3020 ret = ath11k_wmi_cmd_send(wmi, skb, 3021 WMI_SET_INIT_COUNTRY_CMDID); 3022 if (ret) { 3023 ath11k_warn(ar->ab, 3024 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", 3025 ret); 3026 goto err; 3027 } 3028 3029 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set init country"); 3030 3031 return 0; 3032 3033 err: 3034 dev_kfree_skb(skb); 3035 return ret; 3036 } 3037 3038 int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar, 3039 struct wmi_set_current_country_params *param) 3040 { 3041 struct ath11k_pdev_wmi *wmi = ar->wmi; 3042 struct wmi_set_current_country_cmd *cmd; 3043 struct sk_buff *skb; 3044 int ret; 3045 3046 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3047 if (!skb) 3048 return -ENOMEM; 3049 3050 cmd = (struct wmi_set_current_country_cmd *)skb->data; 3051 cmd->tlv_header = 3052 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) | 3053 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3054 3055 cmd->pdev_id = ar->pdev->pdev_id; 3056 memcpy(&cmd->new_alpha2, ¶m->alpha2, 3); 3057 3058 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); 3059 if (ret) { 3060 ath11k_warn(ar->ab, 3061 "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); 3062 dev_kfree_skb(skb); 3063 } 3064 3065 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3066 "cmd set current country pdev id %d alpha2 %c%c\n", 3067 ar->pdev->pdev_id, 3068 param->alpha2[0], 3069 param->alpha2[1]); 3070 3071 return ret; 3072 } 3073 3074 int 3075 ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar, 3076 struct thermal_mitigation_params *param) 3077 { 3078 struct ath11k_pdev_wmi *wmi = ar->wmi; 3079 struct wmi_therm_throt_config_request_cmd *cmd; 3080 struct wmi_therm_throt_level_config_info *lvl_conf; 3081 struct wmi_tlv *tlv; 3082 struct sk_buff *skb; 3083 int i, ret, len; 3084 3085 len = sizeof(*cmd) + TLV_HDR_SIZE + 3086 THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info); 3087 3088 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3089 if (!skb) 3090 return -ENOMEM; 3091 3092 cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data; 3093 3094 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) | 3095 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3096 3097 cmd->pdev_id = ar->pdev->pdev_id; 3098 cmd->enable = param->enable; 3099 cmd->dc = param->dc; 3100 cmd->dc_per_event = param->dc_per_event; 3101 cmd->therm_throt_levels = THERMAL_LEVELS; 3102 3103 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 3104 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 3105 FIELD_PREP(WMI_TLV_LEN, 3106 (THERMAL_LEVELS * 3107 sizeof(struct wmi_therm_throt_level_config_info))); 3108 3109 lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data + 3110 sizeof(*cmd) + 3111 TLV_HDR_SIZE); 3112 for (i = 0; i < THERMAL_LEVELS; i++) { 3113 lvl_conf->tlv_header = 3114 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) | 3115 FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE); 3116 3117 lvl_conf->temp_lwm = param->levelconf[i].tmplwm; 3118 lvl_conf->temp_hwm = param->levelconf[i].tmphwm; 3119 lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent; 3120 lvl_conf->prio = param->levelconf[i].priority; 3121 lvl_conf++; 3122 } 3123 3124 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID); 3125 if (ret) { 3126 ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n"); 3127 dev_kfree_skb(skb); 3128 } 3129 3130 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3131 "cmd therm throt set conf pdev_id %d enable %d dc %d dc_per_event %x levels %d\n", 3132 ar->pdev->pdev_id, param->enable, param->dc, 3133 param->dc_per_event, THERMAL_LEVELS); 3134 3135 return ret; 3136 } 3137 3138 int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar, 3139 struct wmi_11d_scan_start_params *param) 3140 { 3141 struct ath11k_pdev_wmi *wmi = ar->wmi; 3142 struct wmi_11d_scan_start_cmd *cmd; 3143 struct sk_buff *skb; 3144 int ret; 3145 3146 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3147 if (!skb) 3148 return -ENOMEM; 3149 3150 cmd = (struct wmi_11d_scan_start_cmd *)skb->data; 3151 cmd->tlv_header = 3152 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) | 3153 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3154 3155 cmd->vdev_id = param->vdev_id; 3156 cmd->scan_period_msec = param->scan_period_msec; 3157 cmd->start_interval_msec = param->start_interval_msec; 3158 3159 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); 3160 if (ret) { 3161 ath11k_warn(ar->ab, 3162 "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); 3163 dev_kfree_skb(skb); 3164 } 3165 3166 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3167 "cmd 11d scan start vdev id %d period %d ms internal %d ms\n", 3168 cmd->vdev_id, 3169 cmd->scan_period_msec, 3170 cmd->start_interval_msec); 3171 3172 return ret; 3173 } 3174 3175 int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id) 3176 { 3177 struct ath11k_pdev_wmi *wmi = ar->wmi; 3178 struct wmi_11d_scan_stop_cmd *cmd; 3179 struct sk_buff *skb; 3180 int ret; 3181 3182 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3183 if (!skb) 3184 return -ENOMEM; 3185 3186 cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; 3187 cmd->tlv_header = 3188 FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) | 3189 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3190 3191 cmd->vdev_id = vdev_id; 3192 3193 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); 3194 if (ret) { 3195 ath11k_warn(ar->ab, 3196 "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); 3197 dev_kfree_skb(skb); 3198 } 3199 3200 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3201 "cmd 11d scan stop vdev id %d\n", 3202 cmd->vdev_id); 3203 3204 return ret; 3205 } 3206 3207 int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter) 3208 { 3209 struct ath11k_pdev_wmi *wmi = ar->wmi; 3210 struct wmi_pktlog_enable_cmd *cmd; 3211 struct sk_buff *skb; 3212 int ret; 3213 3214 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3215 if (!skb) 3216 return -ENOMEM; 3217 3218 cmd = (struct wmi_pktlog_enable_cmd *)skb->data; 3219 3220 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) | 3221 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3222 3223 cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); 3224 cmd->evlist = pktlog_filter; 3225 cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE; 3226 3227 ret = ath11k_wmi_cmd_send(wmi, skb, 3228 WMI_PDEV_PKTLOG_ENABLE_CMDID); 3229 if (ret) { 3230 ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); 3231 dev_kfree_skb(skb); 3232 } 3233 3234 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog enable"); 3235 3236 return ret; 3237 } 3238 3239 int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar) 3240 { 3241 struct ath11k_pdev_wmi *wmi = ar->wmi; 3242 struct wmi_pktlog_disable_cmd *cmd; 3243 struct sk_buff *skb; 3244 int ret; 3245 3246 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3247 if (!skb) 3248 return -ENOMEM; 3249 3250 cmd = (struct wmi_pktlog_disable_cmd *)skb->data; 3251 3252 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) | 3253 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3254 3255 cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); 3256 3257 ret = ath11k_wmi_cmd_send(wmi, skb, 3258 WMI_PDEV_PKTLOG_DISABLE_CMDID); 3259 if (ret) { 3260 ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); 3261 dev_kfree_skb(skb); 3262 } 3263 3264 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog disable"); 3265 3266 return ret; 3267 } 3268 3269 void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params) 3270 { 3271 twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS; 3272 twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE; 3273 twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP; 3274 twt_params->congestion_thresh_teardown = 3275 ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN; 3276 twt_params->congestion_thresh_critical = 3277 ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL; 3278 twt_params->interference_thresh_teardown = 3279 ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN; 3280 twt_params->interference_thresh_setup = 3281 ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP; 3282 twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP; 3283 twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN; 3284 twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS; 3285 twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS; 3286 twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT; 3287 twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL; 3288 twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL; 3289 twt_params->remove_sta_slot_interval = 3290 ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL; 3291 /* TODO add MBSSID support */ 3292 twt_params->mbss_support = 0; 3293 } 3294 3295 int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id, 3296 struct wmi_twt_enable_params *params) 3297 { 3298 struct ath11k_pdev_wmi *wmi = ar->wmi; 3299 struct ath11k_base *ab = wmi->wmi_ab->ab; 3300 struct wmi_twt_enable_params_cmd *cmd; 3301 struct sk_buff *skb; 3302 int ret, len; 3303 3304 len = sizeof(*cmd); 3305 3306 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3307 if (!skb) 3308 return -ENOMEM; 3309 3310 cmd = (struct wmi_twt_enable_params_cmd *)skb->data; 3311 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) | 3312 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3313 cmd->pdev_id = pdev_id; 3314 cmd->sta_cong_timer_ms = params->sta_cong_timer_ms; 3315 cmd->default_slot_size = params->default_slot_size; 3316 cmd->congestion_thresh_setup = params->congestion_thresh_setup; 3317 cmd->congestion_thresh_teardown = params->congestion_thresh_teardown; 3318 cmd->congestion_thresh_critical = params->congestion_thresh_critical; 3319 cmd->interference_thresh_teardown = params->interference_thresh_teardown; 3320 cmd->interference_thresh_setup = params->interference_thresh_setup; 3321 cmd->min_no_sta_setup = params->min_no_sta_setup; 3322 cmd->min_no_sta_teardown = params->min_no_sta_teardown; 3323 cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots; 3324 cmd->min_no_twt_slots = params->min_no_twt_slots; 3325 cmd->max_no_sta_twt = params->max_no_sta_twt; 3326 cmd->mode_check_interval = params->mode_check_interval; 3327 cmd->add_sta_slot_interval = params->add_sta_slot_interval; 3328 cmd->remove_sta_slot_interval = params->remove_sta_slot_interval; 3329 cmd->mbss_support = params->mbss_support; 3330 3331 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID); 3332 if (ret) { 3333 ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); 3334 dev_kfree_skb(skb); 3335 return ret; 3336 } 3337 3338 ar->twt_enabled = 1; 3339 3340 ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt enable"); 3341 3342 return 0; 3343 } 3344 3345 int 3346 ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id) 3347 { 3348 struct ath11k_pdev_wmi *wmi = ar->wmi; 3349 struct ath11k_base *ab = wmi->wmi_ab->ab; 3350 struct wmi_twt_disable_params_cmd *cmd; 3351 struct sk_buff *skb; 3352 int ret, len; 3353 3354 len = sizeof(*cmd); 3355 3356 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3357 if (!skb) 3358 return -ENOMEM; 3359 3360 cmd = (struct wmi_twt_disable_params_cmd *)skb->data; 3361 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) | 3362 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3363 cmd->pdev_id = pdev_id; 3364 3365 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID); 3366 if (ret) { 3367 ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); 3368 dev_kfree_skb(skb); 3369 return ret; 3370 } 3371 3372 ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt disable"); 3373 3374 ar->twt_enabled = 0; 3375 3376 return 0; 3377 } 3378 3379 int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar, 3380 struct wmi_twt_add_dialog_params *params) 3381 { 3382 struct ath11k_pdev_wmi *wmi = ar->wmi; 3383 struct ath11k_base *ab = wmi->wmi_ab->ab; 3384 struct wmi_twt_add_dialog_params_cmd *cmd; 3385 struct sk_buff *skb; 3386 int ret, len; 3387 3388 len = sizeof(*cmd); 3389 3390 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3391 if (!skb) 3392 return -ENOMEM; 3393 3394 cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data; 3395 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) | 3396 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3397 3398 cmd->vdev_id = params->vdev_id; 3399 ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); 3400 cmd->dialog_id = params->dialog_id; 3401 cmd->wake_intvl_us = params->wake_intvl_us; 3402 cmd->wake_intvl_mantis = params->wake_intvl_mantis; 3403 cmd->wake_dura_us = params->wake_dura_us; 3404 cmd->sp_offset_us = params->sp_offset_us; 3405 cmd->flags = params->twt_cmd; 3406 if (params->flag_bcast) 3407 cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST; 3408 if (params->flag_trigger) 3409 cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER; 3410 if (params->flag_flow_type) 3411 cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE; 3412 if (params->flag_protection) 3413 cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION; 3414 3415 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID); 3416 if (ret) { 3417 ath11k_warn(ab, 3418 "failed to send wmi command to add twt dialog: %d", 3419 ret); 3420 dev_kfree_skb(skb); 3421 return ret; 3422 } 3423 3424 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3425 "cmd twt add dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n", 3426 cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us, 3427 cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us, 3428 cmd->flags); 3429 3430 return 0; 3431 } 3432 3433 int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar, 3434 struct wmi_twt_del_dialog_params *params) 3435 { 3436 struct ath11k_pdev_wmi *wmi = ar->wmi; 3437 struct ath11k_base *ab = wmi->wmi_ab->ab; 3438 struct wmi_twt_del_dialog_params_cmd *cmd; 3439 struct sk_buff *skb; 3440 int ret, len; 3441 3442 len = sizeof(*cmd); 3443 3444 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3445 if (!skb) 3446 return -ENOMEM; 3447 3448 cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data; 3449 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) | 3450 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3451 3452 cmd->vdev_id = params->vdev_id; 3453 ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); 3454 cmd->dialog_id = params->dialog_id; 3455 3456 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID); 3457 if (ret) { 3458 ath11k_warn(ab, 3459 "failed to send wmi command to delete twt dialog: %d", 3460 ret); 3461 dev_kfree_skb(skb); 3462 return ret; 3463 } 3464 3465 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3466 "cmd twt del dialog vdev %u dialog id %u\n", 3467 cmd->vdev_id, cmd->dialog_id); 3468 3469 return 0; 3470 } 3471 3472 int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar, 3473 struct wmi_twt_pause_dialog_params *params) 3474 { 3475 struct ath11k_pdev_wmi *wmi = ar->wmi; 3476 struct ath11k_base *ab = wmi->wmi_ab->ab; 3477 struct wmi_twt_pause_dialog_params_cmd *cmd; 3478 struct sk_buff *skb; 3479 int ret, len; 3480 3481 len = sizeof(*cmd); 3482 3483 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3484 if (!skb) 3485 return -ENOMEM; 3486 3487 cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data; 3488 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 3489 WMI_TAG_TWT_PAUSE_DIALOG_CMD) | 3490 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3491 3492 cmd->vdev_id = params->vdev_id; 3493 ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); 3494 cmd->dialog_id = params->dialog_id; 3495 3496 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID); 3497 if (ret) { 3498 ath11k_warn(ab, 3499 "failed to send wmi command to pause twt dialog: %d", 3500 ret); 3501 dev_kfree_skb(skb); 3502 return ret; 3503 } 3504 3505 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3506 "cmd twt pause dialog vdev %u dialog id %u\n", 3507 cmd->vdev_id, cmd->dialog_id); 3508 3509 return 0; 3510 } 3511 3512 int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar, 3513 struct wmi_twt_resume_dialog_params *params) 3514 { 3515 struct ath11k_pdev_wmi *wmi = ar->wmi; 3516 struct ath11k_base *ab = wmi->wmi_ab->ab; 3517 struct wmi_twt_resume_dialog_params_cmd *cmd; 3518 struct sk_buff *skb; 3519 int ret, len; 3520 3521 len = sizeof(*cmd); 3522 3523 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3524 if (!skb) 3525 return -ENOMEM; 3526 3527 cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data; 3528 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 3529 WMI_TAG_TWT_RESUME_DIALOG_CMD) | 3530 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3531 3532 cmd->vdev_id = params->vdev_id; 3533 ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); 3534 cmd->dialog_id = params->dialog_id; 3535 cmd->sp_offset_us = params->sp_offset_us; 3536 cmd->next_twt_size = params->next_twt_size; 3537 3538 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID); 3539 if (ret) { 3540 ath11k_warn(ab, 3541 "failed to send wmi command to resume twt dialog: %d", 3542 ret); 3543 dev_kfree_skb(skb); 3544 return ret; 3545 } 3546 3547 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3548 "cmd twt resume dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n", 3549 cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us, 3550 cmd->next_twt_size); 3551 3552 return 0; 3553 } 3554 3555 int 3556 ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id, 3557 struct ieee80211_he_obss_pd *he_obss_pd) 3558 { 3559 struct ath11k_pdev_wmi *wmi = ar->wmi; 3560 struct ath11k_base *ab = wmi->wmi_ab->ab; 3561 struct wmi_obss_spatial_reuse_params_cmd *cmd; 3562 struct sk_buff *skb; 3563 int ret, len; 3564 3565 len = sizeof(*cmd); 3566 3567 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3568 if (!skb) 3569 return -ENOMEM; 3570 3571 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; 3572 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 3573 WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) | 3574 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3575 cmd->vdev_id = vdev_id; 3576 cmd->enable = he_obss_pd->enable; 3577 cmd->obss_min = he_obss_pd->min_offset; 3578 cmd->obss_max = he_obss_pd->max_offset; 3579 3580 ret = ath11k_wmi_cmd_send(wmi, skb, 3581 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); 3582 if (ret) { 3583 ath11k_warn(ab, 3584 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); 3585 dev_kfree_skb(skb); 3586 return ret; 3587 } 3588 3589 ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev obss pd spatial reuse"); 3590 3591 return 0; 3592 } 3593 3594 int 3595 ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap) 3596 { 3597 struct ath11k_pdev_wmi *wmi = ar->wmi; 3598 struct ath11k_base *ab = wmi->wmi_ab->ab; 3599 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3600 struct sk_buff *skb; 3601 int ret, len; 3602 3603 len = sizeof(*cmd); 3604 3605 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3606 if (!skb) 3607 return -ENOMEM; 3608 3609 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3610 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 3611 WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) | 3612 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3613 cmd->pdev_id = ar->pdev->pdev_id; 3614 memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); 3615 3616 ret = ath11k_wmi_cmd_send(wmi, skb, 3617 WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID); 3618 if (ret) { 3619 ath11k_warn(ab, 3620 "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID"); 3621 dev_kfree_skb(skb); 3622 return ret; 3623 } 3624 3625 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3626 "cmd pdev set srg bss color bitmap pdev_id %d bss color bitmap %08x %08x\n", 3627 cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); 3628 3629 return 0; 3630 } 3631 3632 int 3633 ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap) 3634 { 3635 struct ath11k_pdev_wmi *wmi = ar->wmi; 3636 struct ath11k_base *ab = wmi->wmi_ab->ab; 3637 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3638 struct sk_buff *skb; 3639 int ret, len; 3640 3641 len = sizeof(*cmd); 3642 3643 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3644 if (!skb) 3645 return -ENOMEM; 3646 3647 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3648 cmd->tlv_header = 3649 FIELD_PREP(WMI_TLV_TAG, 3650 WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) | 3651 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3652 cmd->pdev_id = ar->pdev->pdev_id; 3653 memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); 3654 3655 ret = ath11k_wmi_cmd_send(wmi, skb, 3656 WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID); 3657 if (ret) { 3658 ath11k_warn(ab, 3659 "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID"); 3660 dev_kfree_skb(skb); 3661 return ret; 3662 } 3663 3664 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3665 "cmd pdev set srg partial bssid bitmap pdev_id %d partial bssid bitmap %08x %08x\n", 3666 cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); 3667 3668 return 0; 3669 } 3670 3671 int 3672 ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) 3673 { 3674 struct ath11k_pdev_wmi *wmi = ar->wmi; 3675 struct ath11k_base *ab = wmi->wmi_ab->ab; 3676 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3677 struct sk_buff *skb; 3678 int ret, len; 3679 3680 len = sizeof(*cmd); 3681 3682 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3683 if (!skb) 3684 return -ENOMEM; 3685 3686 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3687 cmd->tlv_header = 3688 FIELD_PREP(WMI_TLV_TAG, 3689 WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) | 3690 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3691 cmd->pdev_id = ar->pdev->pdev_id; 3692 memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); 3693 3694 ret = ath11k_wmi_cmd_send(wmi, skb, 3695 WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); 3696 if (ret) { 3697 ath11k_warn(ab, 3698 "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); 3699 dev_kfree_skb(skb); 3700 return ret; 3701 } 3702 3703 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3704 "cmd pdev set srg obsscolor enable pdev_id %d bss color enable bitmap %08x %08x\n", 3705 cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); 3706 3707 return 0; 3708 } 3709 3710 int 3711 ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) 3712 { 3713 struct ath11k_pdev_wmi *wmi = ar->wmi; 3714 struct ath11k_base *ab = wmi->wmi_ab->ab; 3715 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3716 struct sk_buff *skb; 3717 int ret, len; 3718 3719 len = sizeof(*cmd); 3720 3721 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3722 if (!skb) 3723 return -ENOMEM; 3724 3725 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3726 cmd->tlv_header = 3727 FIELD_PREP(WMI_TLV_TAG, 3728 WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) | 3729 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3730 cmd->pdev_id = ar->pdev->pdev_id; 3731 memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); 3732 3733 ret = ath11k_wmi_cmd_send(wmi, skb, 3734 WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); 3735 if (ret) { 3736 ath11k_warn(ab, 3737 "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); 3738 dev_kfree_skb(skb); 3739 return ret; 3740 } 3741 3742 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3743 "cmd pdev set srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", 3744 cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); 3745 3746 return 0; 3747 } 3748 3749 int 3750 ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) 3751 { 3752 struct ath11k_pdev_wmi *wmi = ar->wmi; 3753 struct ath11k_base *ab = wmi->wmi_ab->ab; 3754 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3755 struct sk_buff *skb; 3756 int ret, len; 3757 3758 len = sizeof(*cmd); 3759 3760 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3761 if (!skb) 3762 return -ENOMEM; 3763 3764 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3765 cmd->tlv_header = 3766 FIELD_PREP(WMI_TLV_TAG, 3767 WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) | 3768 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3769 cmd->pdev_id = ar->pdev->pdev_id; 3770 memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); 3771 3772 ret = ath11k_wmi_cmd_send(wmi, skb, 3773 WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); 3774 if (ret) { 3775 ath11k_warn(ab, 3776 "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); 3777 dev_kfree_skb(skb); 3778 return ret; 3779 } 3780 3781 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3782 "cmd pdev set non srg obss color enable bitmap pdev_id %d bss color enable bitmap %08x %08x\n", 3783 cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); 3784 3785 return 0; 3786 } 3787 3788 int 3789 ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) 3790 { 3791 struct ath11k_pdev_wmi *wmi = ar->wmi; 3792 struct ath11k_base *ab = wmi->wmi_ab->ab; 3793 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3794 struct sk_buff *skb; 3795 int ret, len; 3796 3797 len = sizeof(*cmd); 3798 3799 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3800 if (!skb) 3801 return -ENOMEM; 3802 3803 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3804 cmd->tlv_header = 3805 FIELD_PREP(WMI_TLV_TAG, 3806 WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) | 3807 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3808 cmd->pdev_id = ar->pdev->pdev_id; 3809 memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); 3810 3811 ret = ath11k_wmi_cmd_send(wmi, skb, 3812 WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); 3813 if (ret) { 3814 ath11k_warn(ab, 3815 "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); 3816 dev_kfree_skb(skb); 3817 return ret; 3818 } 3819 3820 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3821 "cmd pdev set non srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", 3822 cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); 3823 3824 return 0; 3825 } 3826 3827 int 3828 ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id, 3829 u8 bss_color, u32 period, 3830 bool enable) 3831 { 3832 struct ath11k_pdev_wmi *wmi = ar->wmi; 3833 struct ath11k_base *ab = wmi->wmi_ab->ab; 3834 struct wmi_obss_color_collision_cfg_params_cmd *cmd; 3835 struct sk_buff *skb; 3836 int ret, len; 3837 3838 len = sizeof(*cmd); 3839 3840 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3841 if (!skb) 3842 return -ENOMEM; 3843 3844 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; 3845 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 3846 WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) | 3847 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3848 cmd->vdev_id = vdev_id; 3849 cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION : 3850 ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE; 3851 cmd->current_bss_color = bss_color; 3852 cmd->detection_period_ms = period; 3853 cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS; 3854 cmd->free_slot_expiry_time_ms = 0; 3855 cmd->flags = 0; 3856 3857 ret = ath11k_wmi_cmd_send(wmi, skb, 3858 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); 3859 if (ret) { 3860 ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); 3861 dev_kfree_skb(skb); 3862 return ret; 3863 } 3864 3865 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3866 "cmd obss color collision det config id %d type %d bss_color %d detect_period %d scan_period %d\n", 3867 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, 3868 cmd->detection_period_ms, cmd->scan_period_ms); 3869 3870 return 0; 3871 } 3872 3873 int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, 3874 bool enable) 3875 { 3876 struct ath11k_pdev_wmi *wmi = ar->wmi; 3877 struct ath11k_base *ab = wmi->wmi_ab->ab; 3878 struct wmi_bss_color_change_enable_params_cmd *cmd; 3879 struct sk_buff *skb; 3880 int ret, len; 3881 3882 len = sizeof(*cmd); 3883 3884 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 3885 if (!skb) 3886 return -ENOMEM; 3887 3888 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; 3889 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) | 3890 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 3891 cmd->vdev_id = vdev_id; 3892 cmd->enable = enable ? 1 : 0; 3893 3894 ret = ath11k_wmi_cmd_send(wmi, skb, 3895 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); 3896 if (ret) { 3897 ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); 3898 dev_kfree_skb(skb); 3899 return ret; 3900 } 3901 3902 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3903 "cmd bss color change enable id %d enable %d\n", 3904 cmd->vdev_id, cmd->enable); 3905 3906 return 0; 3907 } 3908 3909 int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, 3910 struct sk_buff *tmpl) 3911 { 3912 struct wmi_tlv *tlv; 3913 struct sk_buff *skb; 3914 void *ptr; 3915 int ret, len; 3916 size_t aligned_len; 3917 struct wmi_fils_discovery_tmpl_cmd *cmd; 3918 3919 aligned_len = roundup(tmpl->len, 4); 3920 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 3921 3922 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3923 "vdev %i set FILS discovery template\n", vdev_id); 3924 3925 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3926 if (!skb) 3927 return -ENOMEM; 3928 3929 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; 3930 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 3931 WMI_TAG_FILS_DISCOVERY_TMPL_CMD) | 3932 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3933 cmd->vdev_id = vdev_id; 3934 cmd->buf_len = tmpl->len; 3935 ptr = skb->data + sizeof(*cmd); 3936 3937 tlv = ptr; 3938 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 3939 FIELD_PREP(WMI_TLV_LEN, aligned_len); 3940 memcpy(tlv->value, tmpl->data, tmpl->len); 3941 3942 ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); 3943 if (ret) { 3944 ath11k_warn(ar->ab, 3945 "WMI vdev %i failed to send FILS discovery template command\n", 3946 vdev_id); 3947 dev_kfree_skb(skb); 3948 return ret; 3949 } 3950 3951 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd fils discovery tmpl"); 3952 3953 return 0; 3954 } 3955 3956 int ath11k_wmi_peer_set_cfr_capture_conf(struct ath11k *ar, 3957 u32 vdev_id, const u8 *mac_addr, 3958 struct wmi_peer_cfr_capture_conf_arg *arg) 3959 { 3960 struct ath11k_pdev_wmi *wmi = ar->wmi; 3961 struct wmi_peer_cfr_capture_cmd_fixed_param *cmd; 3962 struct sk_buff *skb; 3963 int ret; 3964 3965 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3966 if (!skb) 3967 return -ENOMEM; 3968 3969 cmd = (struct wmi_peer_cfr_capture_cmd_fixed_param *)skb->data; 3970 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 3971 WMI_TAG_PEER_CFR_CAPTURE_CMD) | 3972 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 3973 3974 memcpy(&cmd->mac_addr, mac_addr, ETH_ALEN); 3975 cmd->request = arg->request; 3976 cmd->vdev_id = vdev_id; 3977 cmd->periodicity = arg->periodicity; 3978 cmd->bandwidth = arg->bw; 3979 cmd->capture_method = arg->method; 3980 3981 ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PEER_CFR_CAPTURE_CMDID); 3982 if (ret) { 3983 ath11k_warn(ar->ab, 3984 "WMI vdev %d failed to send peer cfr capture cmd: %d\n", 3985 vdev_id, ret); 3986 dev_kfree_skb(skb); 3987 } 3988 3989 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 3990 "WMI peer CFR capture cmd req %u id %u period %u bw %u mode %u\n", 3991 arg->request, vdev_id, arg->periodicity, 3992 arg->bw, arg->method); 3993 3994 return ret; 3995 } 3996 3997 int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, 3998 struct sk_buff *tmpl) 3999 { 4000 struct wmi_probe_tmpl_cmd *cmd; 4001 struct wmi_bcn_prb_info *probe_info; 4002 struct wmi_tlv *tlv; 4003 struct sk_buff *skb; 4004 void *ptr; 4005 int ret, len; 4006 size_t aligned_len = roundup(tmpl->len, 4); 4007 4008 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 4009 "vdev %i set probe response template\n", vdev_id); 4010 4011 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; 4012 4013 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 4014 if (!skb) 4015 return -ENOMEM; 4016 4017 cmd = (struct wmi_probe_tmpl_cmd *)skb->data; 4018 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) | 4019 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 4020 cmd->vdev_id = vdev_id; 4021 cmd->buf_len = tmpl->len; 4022 4023 ptr = skb->data + sizeof(*cmd); 4024 4025 probe_info = ptr; 4026 len = sizeof(*probe_info); 4027 probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, 4028 WMI_TAG_BCN_PRB_INFO) | 4029 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 4030 probe_info->caps = 0; 4031 probe_info->erp = 0; 4032 4033 ptr += sizeof(*probe_info); 4034 4035 tlv = ptr; 4036 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 4037 FIELD_PREP(WMI_TLV_LEN, aligned_len); 4038 memcpy(tlv->value, tmpl->data, tmpl->len); 4039 4040 ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); 4041 if (ret) { 4042 ath11k_warn(ar->ab, 4043 "WMI vdev %i failed to send probe response template command\n", 4044 vdev_id); 4045 dev_kfree_skb(skb); 4046 return ret; 4047 } 4048 4049 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd "); 4050 4051 return 0; 4052 } 4053 4054 int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, 4055 bool unsol_bcast_probe_resp_enabled) 4056 { 4057 struct sk_buff *skb; 4058 int ret, len; 4059 struct wmi_fils_discovery_cmd *cmd; 4060 4061 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 4062 "vdev %i set %s interval to %u TU\n", 4063 vdev_id, unsol_bcast_probe_resp_enabled ? 4064 "unsolicited broadcast probe response" : "FILS discovery", 4065 interval); 4066 4067 len = sizeof(*cmd); 4068 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 4069 if (!skb) 4070 return -ENOMEM; 4071 4072 cmd = (struct wmi_fils_discovery_cmd *)skb->data; 4073 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) | 4074 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 4075 cmd->vdev_id = vdev_id; 4076 cmd->interval = interval; 4077 cmd->config = unsol_bcast_probe_resp_enabled; 4078 4079 ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); 4080 if (ret) { 4081 ath11k_warn(ar->ab, 4082 "WMI vdev %i failed to send FILS discovery enable/disable command\n", 4083 vdev_id); 4084 dev_kfree_skb(skb); 4085 return ret; 4086 } 4087 4088 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd enable fils"); 4089 4090 return 0; 4091 } 4092 4093 static void 4094 ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb) 4095 { 4096 const void **tb; 4097 const struct wmi_obss_color_collision_event *ev; 4098 struct ath11k_vif *arvif; 4099 int ret; 4100 4101 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4102 if (IS_ERR(tb)) { 4103 ret = PTR_ERR(tb); 4104 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 4105 return; 4106 } 4107 4108 ath11k_dbg(ab, ATH11K_DBG_WMI, "event obss color collision"); 4109 4110 rcu_read_lock(); 4111 4112 ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; 4113 if (!ev) { 4114 ath11k_warn(ab, "failed to fetch obss color collision ev"); 4115 goto exit; 4116 } 4117 4118 arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); 4119 if (!arvif) { 4120 ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n", 4121 ev->vdev_id); 4122 goto exit; 4123 } 4124 4125 switch (ev->evt_type) { 4126 case WMI_BSS_COLOR_COLLISION_DETECTION: 4127 ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap, 4128 0); 4129 ath11k_dbg(ab, ATH11K_DBG_WMI, 4130 "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", 4131 ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); 4132 break; 4133 case WMI_BSS_COLOR_COLLISION_DISABLE: 4134 case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: 4135 case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: 4136 break; 4137 default: 4138 ath11k_warn(ab, "received unknown obss color collision detection event\n"); 4139 } 4140 4141 exit: 4142 kfree(tb); 4143 rcu_read_unlock(); 4144 } 4145 4146 static void 4147 ath11k_fill_band_to_mac_param(struct ath11k_base *soc, 4148 struct wmi_host_pdev_band_to_mac *band_to_mac) 4149 { 4150 u8 i; 4151 struct ath11k_hal_reg_capabilities_ext *hal_reg_cap; 4152 struct ath11k_pdev *pdev; 4153 4154 for (i = 0; i < soc->num_radios; i++) { 4155 pdev = &soc->pdevs[i]; 4156 hal_reg_cap = &soc->hal_reg_cap[i]; 4157 band_to_mac[i].pdev_id = pdev->pdev_id; 4158 4159 switch (pdev->cap.supported_bands) { 4160 case WMI_HOST_WLAN_2G_5G_CAP: 4161 band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; 4162 band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; 4163 break; 4164 case WMI_HOST_WLAN_2G_CAP: 4165 band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; 4166 band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan; 4167 break; 4168 case WMI_HOST_WLAN_5G_CAP: 4169 band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan; 4170 band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; 4171 break; 4172 default: 4173 break; 4174 } 4175 } 4176 } 4177 4178 static void 4179 ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg, 4180 struct target_resource_config *tg_cfg) 4181 { 4182 wmi_cfg->num_vdevs = tg_cfg->num_vdevs; 4183 wmi_cfg->num_peers = tg_cfg->num_peers; 4184 wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers; 4185 wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs; 4186 wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys; 4187 wmi_cfg->num_tids = tg_cfg->num_tids; 4188 wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit; 4189 wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask; 4190 wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask; 4191 wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0]; 4192 wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1]; 4193 wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2]; 4194 wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3]; 4195 wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode; 4196 wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req; 4197 wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev; 4198 wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev; 4199 wmi_cfg->roam_offload_max_ap_profiles = 4200 tg_cfg->roam_offload_max_ap_profiles; 4201 wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups; 4202 wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems; 4203 wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode; 4204 wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size; 4205 wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries; 4206 wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size; 4207 wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim; 4208 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = 4209 tg_cfg->rx_skip_defrag_timeout_dup_detection_check; 4210 wmi_cfg->vow_config = tg_cfg->vow_config; 4211 wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev; 4212 wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc; 4213 wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries; 4214 wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs; 4215 wmi_cfg->num_tdls_conn_table_entries = 4216 tg_cfg->num_tdls_conn_table_entries; 4217 wmi_cfg->beacon_tx_offload_max_vdev = 4218 tg_cfg->beacon_tx_offload_max_vdev; 4219 wmi_cfg->num_multicast_filter_entries = 4220 tg_cfg->num_multicast_filter_entries; 4221 wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters; 4222 wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern; 4223 wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size; 4224 wmi_cfg->max_tdls_concurrent_sleep_sta = 4225 tg_cfg->max_tdls_concurrent_sleep_sta; 4226 wmi_cfg->max_tdls_concurrent_buffer_sta = 4227 tg_cfg->max_tdls_concurrent_buffer_sta; 4228 wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate; 4229 wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs; 4230 wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels; 4231 wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules; 4232 wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size; 4233 wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters; 4234 wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id; 4235 wmi_cfg->flag1 = tg_cfg->flag1; 4236 wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support; 4237 wmi_cfg->sched_params = tg_cfg->sched_params; 4238 wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count; 4239 wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count; 4240 wmi_cfg->host_service_flags &= 4241 ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); 4242 wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported << 4243 WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); 4244 wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET; 4245 wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt; 4246 wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period; 4247 } 4248 4249 static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, 4250 struct wmi_init_cmd_param *param) 4251 { 4252 struct ath11k_base *ab = wmi->wmi_ab->ab; 4253 struct sk_buff *skb; 4254 struct wmi_init_cmd *cmd; 4255 struct wmi_resource_config *cfg; 4256 struct wmi_pdev_set_hw_mode_cmd_param *hw_mode; 4257 struct wmi_pdev_band_to_mac *band_to_mac; 4258 struct wlan_host_mem_chunk *host_mem_chunks; 4259 struct wmi_tlv *tlv; 4260 size_t ret, len; 4261 void *ptr; 4262 u32 hw_mode_len = 0; 4263 u16 idx; 4264 4265 if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) 4266 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + 4267 (param->num_band_to_mac * sizeof(*band_to_mac)); 4268 4269 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + 4270 (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); 4271 4272 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 4273 if (!skb) 4274 return -ENOMEM; 4275 4276 cmd = (struct wmi_init_cmd *)skb->data; 4277 4278 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) | 4279 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 4280 4281 ptr = skb->data + sizeof(*cmd); 4282 cfg = ptr; 4283 4284 ath11k_wmi_copy_resource_config(cfg, param->res_cfg); 4285 4286 cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) | 4287 FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE); 4288 4289 ptr += sizeof(*cfg); 4290 host_mem_chunks = ptr + TLV_HDR_SIZE; 4291 len = sizeof(struct wlan_host_mem_chunk); 4292 4293 for (idx = 0; idx < param->num_mem_chunks; ++idx) { 4294 host_mem_chunks[idx].tlv_header = 4295 FIELD_PREP(WMI_TLV_TAG, 4296 WMI_TAG_WLAN_HOST_MEMORY_CHUNK) | 4297 FIELD_PREP(WMI_TLV_LEN, len); 4298 4299 host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr; 4300 host_mem_chunks[idx].size = param->mem_chunks[idx].len; 4301 host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; 4302 4303 ath11k_dbg(ab, ATH11K_DBG_WMI, 4304 "host mem chunk req_id %d paddr 0x%llx len %d\n", 4305 param->mem_chunks[idx].req_id, 4306 (u64)param->mem_chunks[idx].paddr, 4307 param->mem_chunks[idx].len); 4308 } 4309 cmd->num_host_mem_chunks = param->num_mem_chunks; 4310 len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks; 4311 4312 /* num_mem_chunks is zero */ 4313 tlv = ptr; 4314 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 4315 FIELD_PREP(WMI_TLV_LEN, len); 4316 ptr += TLV_HDR_SIZE + len; 4317 4318 if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { 4319 hw_mode = ptr; 4320 hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG, 4321 WMI_TAG_PDEV_SET_HW_MODE_CMD) | 4322 FIELD_PREP(WMI_TLV_LEN, 4323 sizeof(*hw_mode) - TLV_HDR_SIZE); 4324 4325 hw_mode->hw_mode_index = param->hw_mode_id; 4326 hw_mode->num_band_to_mac = param->num_band_to_mac; 4327 4328 ptr += sizeof(*hw_mode); 4329 4330 len = param->num_band_to_mac * sizeof(*band_to_mac); 4331 tlv = ptr; 4332 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 4333 FIELD_PREP(WMI_TLV_LEN, len); 4334 4335 ptr += TLV_HDR_SIZE; 4336 len = sizeof(*band_to_mac); 4337 4338 for (idx = 0; idx < param->num_band_to_mac; idx++) { 4339 band_to_mac = ptr; 4340 4341 band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG, 4342 WMI_TAG_PDEV_BAND_TO_MAC) | 4343 FIELD_PREP(WMI_TLV_LEN, 4344 len - TLV_HDR_SIZE); 4345 band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id; 4346 band_to_mac->start_freq = 4347 param->band_to_mac[idx].start_freq; 4348 band_to_mac->end_freq = 4349 param->band_to_mac[idx].end_freq; 4350 ptr += sizeof(*band_to_mac); 4351 } 4352 } 4353 4354 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); 4355 if (ret) { 4356 ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n"); 4357 dev_kfree_skb(skb); 4358 return ret; 4359 } 4360 4361 ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd wmi init"); 4362 4363 return 0; 4364 } 4365 4366 int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, 4367 int pdev_id) 4368 { 4369 struct ath11k_wmi_pdev_lro_config_cmd *cmd; 4370 struct sk_buff *skb; 4371 int ret; 4372 4373 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4374 if (!skb) 4375 return -ENOMEM; 4376 4377 cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data; 4378 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) | 4379 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 4380 4381 get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE); 4382 get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE); 4383 4384 cmd->pdev_id = pdev_id; 4385 4386 ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); 4387 if (ret) { 4388 ath11k_warn(ar->ab, 4389 "failed to send lro cfg req wmi cmd\n"); 4390 goto err; 4391 } 4392 4393 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 4394 "cmd lro config pdev_id 0x%x\n", pdev_id); 4395 return 0; 4396 err: 4397 dev_kfree_skb(skb); 4398 return ret; 4399 } 4400 4401 int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab) 4402 { 4403 unsigned long time_left; 4404 4405 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, 4406 WMI_SERVICE_READY_TIMEOUT_HZ); 4407 if (!time_left) 4408 return -ETIMEDOUT; 4409 4410 return 0; 4411 } 4412 4413 int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab) 4414 { 4415 unsigned long time_left; 4416 4417 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, 4418 WMI_SERVICE_READY_TIMEOUT_HZ); 4419 if (!time_left) 4420 return -ETIMEDOUT; 4421 4422 return 0; 4423 } 4424 4425 int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, 4426 enum wmi_host_hw_mode_config_type mode) 4427 { 4428 struct wmi_pdev_set_hw_mode_cmd_param *cmd; 4429 struct sk_buff *skb; 4430 struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; 4431 int len; 4432 int ret; 4433 4434 len = sizeof(*cmd); 4435 4436 skb = ath11k_wmi_alloc_skb(wmi_ab, len); 4437 if (!skb) 4438 return -ENOMEM; 4439 4440 cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data; 4441 4442 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | 4443 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 4444 4445 cmd->pdev_id = WMI_PDEV_ID_SOC; 4446 cmd->hw_mode_index = mode; 4447 4448 ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); 4449 if (ret) { 4450 ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); 4451 dev_kfree_skb(skb); 4452 return ret; 4453 } 4454 4455 ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev set hw mode %d", cmd->hw_mode_index); 4456 4457 return 0; 4458 } 4459 4460 int ath11k_wmi_cmd_init(struct ath11k_base *ab) 4461 { 4462 struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; 4463 struct wmi_init_cmd_param init_param; 4464 struct target_resource_config config; 4465 4466 memset(&init_param, 0, sizeof(init_param)); 4467 memset(&config, 0, sizeof(config)); 4468 4469 ab->hw_params.hw_ops->wmi_init_config(ab, &config); 4470 4471 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 4472 ab->wmi_ab.svc_map)) 4473 config.is_reg_cc_ext_event_supported = 1; 4474 4475 memcpy(&wmi_ab->wlan_resource_config, &config, sizeof(config)); 4476 4477 init_param.res_cfg = &wmi_ab->wlan_resource_config; 4478 init_param.num_mem_chunks = wmi_ab->num_mem_chunks; 4479 init_param.hw_mode_id = wmi_ab->preferred_hw_mode; 4480 init_param.mem_chunks = wmi_ab->mem_chunks; 4481 4482 if (ab->hw_params.single_pdev_only) 4483 init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; 4484 4485 init_param.num_band_to_mac = ab->num_radios; 4486 ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); 4487 4488 return ath11k_init_cmd_send(&wmi_ab->wmi[0], &init_param); 4489 } 4490 4491 int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, 4492 struct ath11k_wmi_vdev_spectral_conf_param *param) 4493 { 4494 struct ath11k_wmi_vdev_spectral_conf_cmd *cmd; 4495 struct sk_buff *skb; 4496 int ret; 4497 4498 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4499 if (!skb) 4500 return -ENOMEM; 4501 4502 cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data; 4503 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 4504 WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) | 4505 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 4506 4507 memcpy(&cmd->param, param, sizeof(*param)); 4508 4509 ret = ath11k_wmi_cmd_send(ar->wmi, skb, 4510 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); 4511 if (ret) { 4512 ath11k_warn(ar->ab, 4513 "failed to send spectral scan config wmi cmd\n"); 4514 goto err; 4515 } 4516 4517 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 4518 "cmd vdev spectral scan configure vdev_id 0x%x\n", 4519 param->vdev_id); 4520 4521 return 0; 4522 err: 4523 dev_kfree_skb(skb); 4524 return ret; 4525 } 4526 4527 int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id, 4528 u32 trigger, u32 enable) 4529 { 4530 struct ath11k_wmi_vdev_spectral_enable_cmd *cmd; 4531 struct sk_buff *skb; 4532 int ret; 4533 4534 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4535 if (!skb) 4536 return -ENOMEM; 4537 4538 cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data; 4539 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 4540 WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) | 4541 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 4542 4543 cmd->vdev_id = vdev_id; 4544 cmd->trigger_cmd = trigger; 4545 cmd->enable_cmd = enable; 4546 4547 ret = ath11k_wmi_cmd_send(ar->wmi, skb, 4548 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); 4549 if (ret) { 4550 ath11k_warn(ar->ab, 4551 "failed to send spectral enable wmi cmd\n"); 4552 goto err; 4553 } 4554 4555 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 4556 "cmd vdev spectral scan enable vdev id 0x%x\n", 4557 vdev_id); 4558 4559 return 0; 4560 err: 4561 dev_kfree_skb(skb); 4562 return ret; 4563 } 4564 4565 int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, 4566 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param) 4567 { 4568 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; 4569 struct sk_buff *skb; 4570 int ret; 4571 4572 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4573 if (!skb) 4574 return -ENOMEM; 4575 4576 cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; 4577 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) | 4578 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 4579 4580 cmd->pdev_id = param->pdev_id; 4581 cmd->module_id = param->module_id; 4582 cmd->base_paddr_lo = param->base_paddr_lo; 4583 cmd->base_paddr_hi = param->base_paddr_hi; 4584 cmd->head_idx_paddr_lo = param->head_idx_paddr_lo; 4585 cmd->head_idx_paddr_hi = param->head_idx_paddr_hi; 4586 cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo; 4587 cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi; 4588 cmd->num_elems = param->num_elems; 4589 cmd->buf_size = param->buf_size; 4590 cmd->num_resp_per_event = param->num_resp_per_event; 4591 cmd->event_timeout_ms = param->event_timeout_ms; 4592 4593 ret = ath11k_wmi_cmd_send(ar->wmi, skb, 4594 WMI_PDEV_DMA_RING_CFG_REQ_CMDID); 4595 if (ret) { 4596 ath11k_warn(ar->ab, 4597 "failed to send dma ring cfg req wmi cmd\n"); 4598 goto err; 4599 } 4600 4601 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 4602 "cmd pdev dma ring cfg req pdev_id 0x%x\n", 4603 param->pdev_id); 4604 4605 return 0; 4606 err: 4607 dev_kfree_skb(skb); 4608 return ret; 4609 } 4610 4611 static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc, 4612 u16 tag, u16 len, 4613 const void *ptr, void *data) 4614 { 4615 struct wmi_tlv_dma_buf_release_parse *parse = data; 4616 4617 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) 4618 return -EPROTO; 4619 4620 if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry) 4621 return -ENOBUFS; 4622 4623 parse->num_buf_entry++; 4624 return 0; 4625 } 4626 4627 static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc, 4628 u16 tag, u16 len, 4629 const void *ptr, void *data) 4630 { 4631 struct wmi_tlv_dma_buf_release_parse *parse = data; 4632 4633 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) 4634 return -EPROTO; 4635 4636 if (parse->num_meta >= parse->fixed.num_meta_data_entry) 4637 return -ENOBUFS; 4638 4639 parse->num_meta++; 4640 return 0; 4641 } 4642 4643 static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab, 4644 u16 tag, u16 len, 4645 const void *ptr, void *data) 4646 { 4647 struct wmi_tlv_dma_buf_release_parse *parse = data; 4648 int ret; 4649 4650 switch (tag) { 4651 case WMI_TAG_DMA_BUF_RELEASE: 4652 memcpy(&parse->fixed, ptr, 4653 sizeof(struct ath11k_wmi_dma_buf_release_fixed_param)); 4654 parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id); 4655 break; 4656 case WMI_TAG_ARRAY_STRUCT: 4657 if (!parse->buf_entry_done) { 4658 parse->num_buf_entry = 0; 4659 parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr; 4660 4661 ret = ath11k_wmi_tlv_iter(ab, ptr, len, 4662 ath11k_wmi_tlv_dma_buf_entry_parse, 4663 parse); 4664 if (ret) { 4665 ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n", 4666 ret); 4667 return ret; 4668 } 4669 4670 parse->buf_entry_done = true; 4671 } else if (!parse->meta_data_done) { 4672 parse->num_meta = 0; 4673 parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr; 4674 4675 ret = ath11k_wmi_tlv_iter(ab, ptr, len, 4676 ath11k_wmi_tlv_dma_buf_meta_parse, 4677 parse); 4678 if (ret) { 4679 ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n", 4680 ret); 4681 return ret; 4682 } 4683 4684 parse->meta_data_done = true; 4685 } 4686 break; 4687 default: 4688 break; 4689 } 4690 return 0; 4691 } 4692 4693 static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab, 4694 struct sk_buff *skb) 4695 { 4696 struct wmi_tlv_dma_buf_release_parse parse = { }; 4697 struct ath11k_dbring_buf_release_event param; 4698 int ret; 4699 4700 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 4701 ath11k_wmi_tlv_dma_buf_parse, 4702 &parse); 4703 if (ret) { 4704 ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); 4705 return; 4706 } 4707 4708 ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dma ring buf release"); 4709 4710 param.fixed = parse.fixed; 4711 param.buf_entry = parse.buf_entry; 4712 param.num_buf_entry = parse.num_buf_entry; 4713 param.meta_data = parse.meta_data; 4714 param.num_meta = parse.num_meta; 4715 4716 ret = ath11k_dbring_buffer_release_event(ab, ¶m); 4717 if (ret) { 4718 ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret); 4719 return; 4720 } 4721 } 4722 4723 static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc, 4724 u16 tag, u16 len, 4725 const void *ptr, void *data) 4726 { 4727 struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; 4728 struct wmi_hw_mode_capabilities *hw_mode_cap; 4729 u32 phy_map = 0; 4730 4731 if (tag != WMI_TAG_HW_MODE_CAPABILITIES) 4732 return -EPROTO; 4733 4734 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes) 4735 return -ENOBUFS; 4736 4737 hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities, 4738 hw_mode_id); 4739 svc_rdy_ext->n_hw_mode_caps++; 4740 4741 phy_map = hw_mode_cap->phy_id_map; 4742 while (phy_map) { 4743 svc_rdy_ext->tot_phy_id++; 4744 phy_map = phy_map >> 1; 4745 } 4746 4747 return 0; 4748 } 4749 4750 static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc, 4751 u16 len, const void *ptr, void *data) 4752 { 4753 struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; 4754 struct wmi_hw_mode_capabilities *hw_mode_caps; 4755 enum wmi_host_hw_mode_config_type mode, pref; 4756 u32 i; 4757 int ret; 4758 4759 svc_rdy_ext->n_hw_mode_caps = 0; 4760 svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr; 4761 4762 ret = ath11k_wmi_tlv_iter(soc, ptr, len, 4763 ath11k_wmi_tlv_hw_mode_caps_parse, 4764 svc_rdy_ext); 4765 if (ret) { 4766 ath11k_warn(soc, "failed to parse tlv %d\n", ret); 4767 return ret; 4768 } 4769 4770 i = 0; 4771 while (i < svc_rdy_ext->n_hw_mode_caps) { 4772 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; 4773 mode = hw_mode_caps->hw_mode_id; 4774 pref = soc->wmi_ab.preferred_hw_mode; 4775 4776 if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) { 4777 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; 4778 soc->wmi_ab.preferred_hw_mode = mode; 4779 } 4780 i++; 4781 } 4782 4783 ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n", 4784 soc->wmi_ab.preferred_hw_mode); 4785 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) 4786 return -EINVAL; 4787 4788 return 0; 4789 } 4790 4791 static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc, 4792 u16 tag, u16 len, 4793 const void *ptr, void *data) 4794 { 4795 struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; 4796 4797 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) 4798 return -EPROTO; 4799 4800 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) 4801 return -ENOBUFS; 4802 4803 len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities)); 4804 if (!svc_rdy_ext->n_mac_phy_caps) { 4805 svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id, 4806 len, GFP_ATOMIC); 4807 if (!svc_rdy_ext->mac_phy_caps) 4808 return -ENOMEM; 4809 } 4810 4811 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); 4812 svc_rdy_ext->n_mac_phy_caps++; 4813 return 0; 4814 } 4815 4816 static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc, 4817 u16 tag, u16 len, 4818 const void *ptr, void *data) 4819 { 4820 struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; 4821 4822 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) 4823 return -EPROTO; 4824 4825 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy) 4826 return -ENOBUFS; 4827 4828 svc_rdy_ext->n_ext_hal_reg_caps++; 4829 return 0; 4830 } 4831 4832 static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc, 4833 u16 len, const void *ptr, void *data) 4834 { 4835 struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0]; 4836 struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; 4837 struct ath11k_hal_reg_capabilities_ext reg_cap; 4838 int ret; 4839 u32 i; 4840 4841 svc_rdy_ext->n_ext_hal_reg_caps = 0; 4842 svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr; 4843 ret = ath11k_wmi_tlv_iter(soc, ptr, len, 4844 ath11k_wmi_tlv_ext_hal_reg_caps_parse, 4845 svc_rdy_ext); 4846 if (ret) { 4847 ath11k_warn(soc, "failed to parse tlv %d\n", ret); 4848 return ret; 4849 } 4850 4851 for (i = 0; i < svc_rdy_ext->param.num_phy; i++) { 4852 ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle, 4853 svc_rdy_ext->soc_hal_reg_caps, 4854 svc_rdy_ext->ext_hal_reg_caps, i, 4855 ®_cap); 4856 if (ret) { 4857 ath11k_warn(soc, "failed to extract reg cap %d\n", i); 4858 return ret; 4859 } 4860 4861 memcpy(&soc->hal_reg_cap[reg_cap.phy_id], 4862 ®_cap, sizeof(reg_cap)); 4863 } 4864 return 0; 4865 } 4866 4867 static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc, 4868 u16 len, const void *ptr, 4869 void *data) 4870 { 4871 struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0]; 4872 struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; 4873 u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id; 4874 u32 phy_id_map; 4875 int pdev_index = 0; 4876 int ret; 4877 4878 svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr; 4879 svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy; 4880 4881 soc->num_radios = 0; 4882 soc->target_pdev_count = 0; 4883 phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map; 4884 4885 while (phy_id_map && soc->num_radios < MAX_RADIOS) { 4886 ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, 4887 svc_rdy_ext->hw_caps, 4888 svc_rdy_ext->hw_mode_caps, 4889 svc_rdy_ext->soc_hal_reg_caps, 4890 svc_rdy_ext->mac_phy_caps, 4891 hw_mode_id, soc->num_radios, 4892 &soc->pdevs[pdev_index]); 4893 if (ret) { 4894 ath11k_warn(soc, "failed to extract mac caps, idx :%d\n", 4895 soc->num_radios); 4896 return ret; 4897 } 4898 4899 soc->num_radios++; 4900 4901 /* For QCA6390, save mac_phy capability in the same pdev */ 4902 if (soc->hw_params.single_pdev_only) 4903 pdev_index = 0; 4904 else 4905 pdev_index = soc->num_radios; 4906 4907 /* TODO: mac_phy_cap prints */ 4908 phy_id_map >>= 1; 4909 } 4910 4911 /* For QCA6390, set num_radios to 1 because host manages 4912 * both 2G and 5G radio in one pdev. 4913 * Set pdev_id = 0 and 0 means soc level. 4914 */ 4915 if (soc->hw_params.single_pdev_only) { 4916 soc->num_radios = 1; 4917 soc->pdevs[0].pdev_id = 0; 4918 } 4919 4920 if (!soc->reg_info_store) { 4921 soc->reg_info_store = kzalloc_objs(*soc->reg_info_store, 4922 soc->num_radios, GFP_ATOMIC); 4923 if (!soc->reg_info_store) 4924 return -ENOMEM; 4925 } 4926 4927 return 0; 4928 } 4929 4930 static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc, 4931 u16 tag, u16 len, 4932 const void *ptr, void *data) 4933 { 4934 struct wmi_tlv_dma_ring_caps_parse *parse = data; 4935 4936 if (tag != WMI_TAG_DMA_RING_CAPABILITIES) 4937 return -EPROTO; 4938 4939 parse->n_dma_ring_caps++; 4940 return 0; 4941 } 4942 4943 static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab, 4944 u32 num_cap) 4945 { 4946 size_t sz; 4947 void *ptr; 4948 4949 sz = num_cap * sizeof(struct ath11k_dbring_cap); 4950 ptr = kzalloc(sz, GFP_ATOMIC); 4951 if (!ptr) 4952 return -ENOMEM; 4953 4954 ab->db_caps = ptr; 4955 ab->num_db_cap = num_cap; 4956 4957 return 0; 4958 } 4959 4960 static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab) 4961 { 4962 kfree(ab->db_caps); 4963 ab->db_caps = NULL; 4964 ab->num_db_cap = 0; 4965 } 4966 4967 static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab, 4968 u16 len, const void *ptr, void *data) 4969 { 4970 struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data; 4971 struct wmi_dma_ring_capabilities *dma_caps; 4972 struct ath11k_dbring_cap *dir_buff_caps; 4973 int ret; 4974 u32 i; 4975 4976 dma_caps_parse->n_dma_ring_caps = 0; 4977 dma_caps = (struct wmi_dma_ring_capabilities *)ptr; 4978 ret = ath11k_wmi_tlv_iter(ab, ptr, len, 4979 ath11k_wmi_tlv_dma_ring_caps_parse, 4980 dma_caps_parse); 4981 if (ret) { 4982 ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); 4983 return ret; 4984 } 4985 4986 if (!dma_caps_parse->n_dma_ring_caps) 4987 return 0; 4988 4989 if (ab->num_db_cap) { 4990 ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n"); 4991 return 0; 4992 } 4993 4994 ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); 4995 if (ret) 4996 return ret; 4997 4998 dir_buff_caps = ab->db_caps; 4999 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { 5000 if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) { 5001 ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id); 5002 ret = -EINVAL; 5003 goto free_dir_buff; 5004 } 5005 5006 dir_buff_caps[i].id = dma_caps[i].module_id; 5007 dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id); 5008 dir_buff_caps[i].min_elem = dma_caps[i].min_elem; 5009 dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz; 5010 dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align; 5011 } 5012 5013 return 0; 5014 5015 free_dir_buff: 5016 ath11k_wmi_free_dbring_caps(ab); 5017 return ret; 5018 } 5019 5020 static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab, 5021 u16 tag, u16 len, 5022 const void *ptr, void *data) 5023 { 5024 struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0]; 5025 struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; 5026 int ret; 5027 5028 switch (tag) { 5029 case WMI_TAG_SERVICE_READY_EXT_EVENT: 5030 ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr, 5031 &svc_rdy_ext->param); 5032 if (ret) { 5033 ath11k_warn(ab, "unable to extract ext params\n"); 5034 return ret; 5035 } 5036 break; 5037 5038 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: 5039 svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr; 5040 svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes; 5041 break; 5042 5043 case WMI_TAG_SOC_HAL_REG_CAPABILITIES: 5044 ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr, 5045 svc_rdy_ext); 5046 if (ret) 5047 return ret; 5048 break; 5049 5050 case WMI_TAG_ARRAY_STRUCT: 5051 if (!svc_rdy_ext->hw_mode_done) { 5052 ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr, 5053 svc_rdy_ext); 5054 if (ret) 5055 return ret; 5056 5057 svc_rdy_ext->hw_mode_done = true; 5058 } else if (!svc_rdy_ext->mac_phy_done) { 5059 svc_rdy_ext->n_mac_phy_caps = 0; 5060 ret = ath11k_wmi_tlv_iter(ab, ptr, len, 5061 ath11k_wmi_tlv_mac_phy_caps_parse, 5062 svc_rdy_ext); 5063 if (ret) { 5064 ath11k_warn(ab, "failed to parse tlv %d\n", ret); 5065 return ret; 5066 } 5067 5068 svc_rdy_ext->mac_phy_done = true; 5069 } else if (!svc_rdy_ext->ext_hal_reg_done) { 5070 ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr, 5071 svc_rdy_ext); 5072 if (ret) 5073 return ret; 5074 5075 svc_rdy_ext->ext_hal_reg_done = true; 5076 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { 5077 svc_rdy_ext->mac_phy_chainmask_combo_done = true; 5078 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { 5079 svc_rdy_ext->mac_phy_chainmask_cap_done = true; 5080 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { 5081 svc_rdy_ext->oem_dma_ring_cap_done = true; 5082 } else if (!svc_rdy_ext->dma_ring_cap_done) { 5083 ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, 5084 &svc_rdy_ext->dma_caps_parse); 5085 if (ret) 5086 return ret; 5087 5088 svc_rdy_ext->dma_ring_cap_done = true; 5089 } 5090 break; 5091 5092 default: 5093 break; 5094 } 5095 return 0; 5096 } 5097 5098 static int ath11k_service_ready_ext_event(struct ath11k_base *ab, 5099 struct sk_buff *skb) 5100 { 5101 struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { }; 5102 int ret; 5103 5104 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 5105 ath11k_wmi_tlv_svc_rdy_ext_parse, 5106 &svc_rdy_ext); 5107 if (ret) { 5108 ath11k_warn(ab, "failed to parse tlv %d\n", ret); 5109 goto err; 5110 } 5111 5112 ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext"); 5113 5114 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) 5115 complete(&ab->wmi_ab.service_ready); 5116 5117 kfree(svc_rdy_ext.mac_phy_caps); 5118 return 0; 5119 5120 err: 5121 ath11k_wmi_free_dbring_caps(ab); 5122 return ret; 5123 } 5124 5125 static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab, 5126 u16 tag, u16 len, 5127 const void *ptr, void *data) 5128 { 5129 struct wmi_tlv_svc_rdy_ext2_parse *parse = data; 5130 int ret; 5131 5132 switch (tag) { 5133 case WMI_TAG_ARRAY_STRUCT: 5134 if (!parse->dma_ring_cap_done) { 5135 ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, 5136 &parse->dma_caps_parse); 5137 if (ret) 5138 return ret; 5139 5140 parse->dma_ring_cap_done = true; 5141 } 5142 break; 5143 default: 5144 break; 5145 } 5146 5147 return 0; 5148 } 5149 5150 static int ath11k_service_ready_ext2_event(struct ath11k_base *ab, 5151 struct sk_buff *skb) 5152 { 5153 struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { }; 5154 int ret; 5155 5156 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 5157 ath11k_wmi_tlv_svc_rdy_ext2_parse, 5158 &svc_rdy_ext2); 5159 if (ret) { 5160 ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); 5161 goto err; 5162 } 5163 5164 ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext2"); 5165 5166 complete(&ab->wmi_ab.service_ready); 5167 5168 return 0; 5169 5170 err: 5171 ath11k_wmi_free_dbring_caps(ab); 5172 return ret; 5173 } 5174 5175 static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb, 5176 struct wmi_vdev_start_resp_event *vdev_rsp) 5177 { 5178 const void **tb; 5179 const struct wmi_vdev_start_resp_event *ev; 5180 int ret; 5181 5182 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5183 if (IS_ERR(tb)) { 5184 ret = PTR_ERR(tb); 5185 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 5186 return ret; 5187 } 5188 5189 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; 5190 if (!ev) { 5191 ath11k_warn(ab, "failed to fetch vdev start resp ev"); 5192 kfree(tb); 5193 return -EPROTO; 5194 } 5195 5196 memset(vdev_rsp, 0, sizeof(*vdev_rsp)); 5197 5198 vdev_rsp->vdev_id = ev->vdev_id; 5199 vdev_rsp->requestor_id = ev->requestor_id; 5200 vdev_rsp->resp_type = ev->resp_type; 5201 vdev_rsp->status = ev->status; 5202 vdev_rsp->chain_mask = ev->chain_mask; 5203 vdev_rsp->smps_mode = ev->smps_mode; 5204 vdev_rsp->mac_id = ev->mac_id; 5205 vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; 5206 vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; 5207 vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power; 5208 5209 kfree(tb); 5210 return 0; 5211 } 5212 5213 static void ath11k_print_reg_rule(struct ath11k_base *ab, const char *band, 5214 u32 num_reg_rules, 5215 struct cur_reg_rule *reg_rule_ptr) 5216 { 5217 struct cur_reg_rule *reg_rule = reg_rule_ptr; 5218 u32 count; 5219 5220 ath11k_dbg(ab, ATH11K_DBG_WMI, "number of reg rules in %s band: %d\n", 5221 band, num_reg_rules); 5222 5223 for (count = 0; count < num_reg_rules; count++) { 5224 ath11k_dbg(ab, ATH11K_DBG_WMI, 5225 "reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n", 5226 count + 1, reg_rule->start_freq, reg_rule->end_freq, 5227 reg_rule->max_bw, reg_rule->ant_gain, 5228 reg_rule->reg_power, reg_rule->flags); 5229 reg_rule++; 5230 } 5231 } 5232 5233 static struct cur_reg_rule 5234 *create_reg_rules_from_wmi(u32 num_reg_rules, 5235 struct wmi_regulatory_rule_struct *wmi_reg_rule) 5236 { 5237 struct cur_reg_rule *reg_rule_ptr; 5238 u32 count; 5239 5240 reg_rule_ptr = kzalloc_objs(*reg_rule_ptr, num_reg_rules, GFP_ATOMIC); 5241 5242 if (!reg_rule_ptr) 5243 return NULL; 5244 5245 for (count = 0; count < num_reg_rules; count++) { 5246 reg_rule_ptr[count].start_freq = 5247 FIELD_GET(REG_RULE_START_FREQ, 5248 wmi_reg_rule[count].freq_info); 5249 reg_rule_ptr[count].end_freq = 5250 FIELD_GET(REG_RULE_END_FREQ, 5251 wmi_reg_rule[count].freq_info); 5252 reg_rule_ptr[count].max_bw = 5253 FIELD_GET(REG_RULE_MAX_BW, 5254 wmi_reg_rule[count].bw_pwr_info); 5255 reg_rule_ptr[count].reg_power = 5256 FIELD_GET(REG_RULE_REG_PWR, 5257 wmi_reg_rule[count].bw_pwr_info); 5258 reg_rule_ptr[count].ant_gain = 5259 FIELD_GET(REG_RULE_ANT_GAIN, 5260 wmi_reg_rule[count].bw_pwr_info); 5261 reg_rule_ptr[count].flags = 5262 FIELD_GET(REG_RULE_FLAGS, 5263 wmi_reg_rule[count].flag_info); 5264 } 5265 5266 return reg_rule_ptr; 5267 } 5268 5269 static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, 5270 struct sk_buff *skb, 5271 struct cur_regulatory_info *reg_info) 5272 { 5273 const void **tb; 5274 const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr; 5275 struct wmi_regulatory_rule_struct *wmi_reg_rule; 5276 u32 num_2ghz_reg_rules, num_5ghz_reg_rules; 5277 int ret; 5278 5279 ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n"); 5280 5281 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5282 if (IS_ERR(tb)) { 5283 ret = PTR_ERR(tb); 5284 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 5285 return ret; 5286 } 5287 5288 chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT]; 5289 if (!chan_list_event_hdr) { 5290 ath11k_warn(ab, "failed to fetch reg chan list update ev\n"); 5291 kfree(tb); 5292 return -EPROTO; 5293 } 5294 5295 reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules; 5296 reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules; 5297 5298 if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) { 5299 ath11k_warn(ab, "No regulatory rules available in the event info\n"); 5300 kfree(tb); 5301 return -EINVAL; 5302 } 5303 5304 memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, 5305 REG_ALPHA2_LEN); 5306 reg_info->dfs_region = chan_list_event_hdr->dfs_region; 5307 reg_info->phybitmap = chan_list_event_hdr->phybitmap; 5308 reg_info->num_phy = chan_list_event_hdr->num_phy; 5309 reg_info->phy_id = chan_list_event_hdr->phy_id; 5310 reg_info->ctry_code = chan_list_event_hdr->country_id; 5311 reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code; 5312 5313 ath11k_dbg(ab, ATH11K_DBG_WMI, 5314 "status_code %s", 5315 ath11k_cc_status_to_str(reg_info->status_code)); 5316 5317 reg_info->status_code = 5318 ath11k_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code); 5319 5320 reg_info->is_ext_reg_event = false; 5321 5322 reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz; 5323 reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz; 5324 reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz; 5325 reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz; 5326 5327 num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; 5328 num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; 5329 5330 ath11k_dbg(ab, ATH11K_DBG_WMI, 5331 "cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d", 5332 reg_info->alpha2, reg_info->dfs_region, 5333 reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, 5334 reg_info->min_bw_5ghz, reg_info->max_bw_5ghz); 5335 5336 ath11k_dbg(ab, ATH11K_DBG_WMI, 5337 "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", 5338 num_2ghz_reg_rules, num_5ghz_reg_rules); 5339 5340 wmi_reg_rule = 5341 (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr 5342 + sizeof(*chan_list_event_hdr) 5343 + sizeof(struct wmi_tlv)); 5344 5345 if (num_2ghz_reg_rules) { 5346 reg_info->reg_rules_2ghz_ptr = 5347 create_reg_rules_from_wmi(num_2ghz_reg_rules, 5348 wmi_reg_rule); 5349 if (!reg_info->reg_rules_2ghz_ptr) { 5350 kfree(tb); 5351 ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); 5352 return -ENOMEM; 5353 } 5354 5355 ath11k_print_reg_rule(ab, "2 GHz", 5356 num_2ghz_reg_rules, 5357 reg_info->reg_rules_2ghz_ptr); 5358 } 5359 5360 if (num_5ghz_reg_rules) { 5361 wmi_reg_rule += num_2ghz_reg_rules; 5362 reg_info->reg_rules_5ghz_ptr = 5363 create_reg_rules_from_wmi(num_5ghz_reg_rules, 5364 wmi_reg_rule); 5365 if (!reg_info->reg_rules_5ghz_ptr) { 5366 kfree(tb); 5367 ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); 5368 return -ENOMEM; 5369 } 5370 5371 ath11k_print_reg_rule(ab, "5 GHz", 5372 num_5ghz_reg_rules, 5373 reg_info->reg_rules_5ghz_ptr); 5374 } 5375 5376 ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n"); 5377 5378 kfree(tb); 5379 return 0; 5380 } 5381 5382 static struct cur_reg_rule 5383 *create_ext_reg_rules_from_wmi(u32 num_reg_rules, 5384 struct wmi_regulatory_ext_rule *wmi_reg_rule) 5385 { 5386 struct cur_reg_rule *reg_rule_ptr; 5387 u32 count; 5388 5389 reg_rule_ptr = kzalloc_objs(*reg_rule_ptr, num_reg_rules, GFP_ATOMIC); 5390 5391 if (!reg_rule_ptr) 5392 return NULL; 5393 5394 for (count = 0; count < num_reg_rules; count++) { 5395 reg_rule_ptr[count].start_freq = 5396 u32_get_bits(wmi_reg_rule[count].freq_info, 5397 REG_RULE_START_FREQ); 5398 reg_rule_ptr[count].end_freq = 5399 u32_get_bits(wmi_reg_rule[count].freq_info, 5400 REG_RULE_END_FREQ); 5401 reg_rule_ptr[count].max_bw = 5402 u32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5403 REG_RULE_MAX_BW); 5404 reg_rule_ptr[count].reg_power = 5405 u32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5406 REG_RULE_REG_PWR); 5407 reg_rule_ptr[count].ant_gain = 5408 u32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5409 REG_RULE_ANT_GAIN); 5410 reg_rule_ptr[count].flags = 5411 u32_get_bits(wmi_reg_rule[count].flag_info, 5412 REG_RULE_FLAGS); 5413 reg_rule_ptr[count].psd_flag = 5414 u32_get_bits(wmi_reg_rule[count].psd_power_info, 5415 REG_RULE_PSD_INFO); 5416 reg_rule_ptr[count].psd_eirp = 5417 u32_get_bits(wmi_reg_rule[count].psd_power_info, 5418 REG_RULE_PSD_EIRP); 5419 } 5420 5421 return reg_rule_ptr; 5422 } 5423 5424 static u8 5425 ath11k_invalid_5ghz_reg_ext_rules_from_wmi(u32 num_reg_rules, 5426 const struct wmi_regulatory_ext_rule *rule) 5427 { 5428 u8 num_invalid_5ghz_rules = 0; 5429 u32 count, start_freq; 5430 5431 for (count = 0; count < num_reg_rules; count++) { 5432 start_freq = u32_get_bits(rule[count].freq_info, 5433 REG_RULE_START_FREQ); 5434 5435 if (start_freq >= ATH11K_MIN_6G_FREQ) 5436 num_invalid_5ghz_rules++; 5437 } 5438 5439 return num_invalid_5ghz_rules; 5440 } 5441 5442 static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab, 5443 struct sk_buff *skb, 5444 struct cur_regulatory_info *reg_info) 5445 { 5446 const void **tb; 5447 const struct wmi_reg_chan_list_cc_ext_event *ev; 5448 struct wmi_regulatory_ext_rule *ext_wmi_reg_rule; 5449 u32 num_2ghz_reg_rules, num_5ghz_reg_rules; 5450 u32 num_6ghz_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; 5451 u32 num_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; 5452 u32 total_reg_rules = 0; 5453 int ret, i, j, num_invalid_5ghz_ext_rules = 0; 5454 5455 ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n"); 5456 5457 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5458 if (IS_ERR(tb)) { 5459 ret = PTR_ERR(tb); 5460 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 5461 return ret; 5462 } 5463 5464 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; 5465 if (!ev) { 5466 ath11k_warn(ab, "failed to fetch reg chan list ext update ev\n"); 5467 kfree(tb); 5468 return -EPROTO; 5469 } 5470 5471 reg_info->num_2ghz_reg_rules = ev->num_2ghz_reg_rules; 5472 reg_info->num_5ghz_reg_rules = ev->num_5ghz_reg_rules; 5473 reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] = 5474 ev->num_6ghz_reg_rules_ap_lpi; 5475 reg_info->num_6ghz_rules_ap[WMI_REG_STANDARD_POWER_AP] = 5476 ev->num_6ghz_reg_rules_ap_sp; 5477 reg_info->num_6ghz_rules_ap[WMI_REG_VERY_LOW_POWER_AP] = 5478 ev->num_6ghz_reg_rules_ap_vlp; 5479 5480 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5481 reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i] = 5482 ev->num_6ghz_reg_rules_client_lpi[i]; 5483 reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i] = 5484 ev->num_6ghz_reg_rules_client_sp[i]; 5485 reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i] = 5486 ev->num_6ghz_reg_rules_client_vlp[i]; 5487 } 5488 5489 num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; 5490 num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; 5491 5492 total_reg_rules += num_2ghz_reg_rules; 5493 total_reg_rules += num_5ghz_reg_rules; 5494 5495 if ((num_2ghz_reg_rules > MAX_REG_RULES) || 5496 (num_5ghz_reg_rules > MAX_REG_RULES)) { 5497 ath11k_warn(ab, "Num reg rules for 2.4 GHz/5 GHz exceeds max limit (num_2ghz_reg_rules: %d num_5ghz_reg_rules: %d max_rules: %d)\n", 5498 num_2ghz_reg_rules, num_5ghz_reg_rules, MAX_REG_RULES); 5499 kfree(tb); 5500 return -EINVAL; 5501 } 5502 5503 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5504 num_6ghz_reg_rules_ap[i] = reg_info->num_6ghz_rules_ap[i]; 5505 5506 if (num_6ghz_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { 5507 ath11k_warn(ab, "Num 6 GHz reg rules for AP mode(%d) exceeds max limit (num_6ghz_reg_rules_ap: %d, max_rules: %d)\n", 5508 i, num_6ghz_reg_rules_ap[i], MAX_6GHZ_REG_RULES); 5509 kfree(tb); 5510 return -EINVAL; 5511 } 5512 5513 total_reg_rules += num_6ghz_reg_rules_ap[i]; 5514 } 5515 5516 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5517 num_6ghz_client[WMI_REG_INDOOR_AP][i] = 5518 reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i]; 5519 total_reg_rules += num_6ghz_client[WMI_REG_INDOOR_AP][i]; 5520 5521 num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = 5522 reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i]; 5523 total_reg_rules += num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i]; 5524 5525 num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = 5526 reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i]; 5527 total_reg_rules += num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]; 5528 5529 if ((num_6ghz_client[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES) || 5530 (num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] > 5531 MAX_6GHZ_REG_RULES) || 5532 (num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] > 5533 MAX_6GHZ_REG_RULES)) { 5534 ath11k_warn(ab, 5535 "Num 6 GHz client reg rules exceeds max limit, for client(type: %d)\n", 5536 i); 5537 kfree(tb); 5538 return -EINVAL; 5539 } 5540 } 5541 5542 if (!total_reg_rules) { 5543 ath11k_warn(ab, "No reg rules available\n"); 5544 kfree(tb); 5545 return -EINVAL; 5546 } 5547 5548 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); 5549 5550 reg_info->dfs_region = ev->dfs_region; 5551 reg_info->phybitmap = ev->phybitmap; 5552 reg_info->num_phy = ev->num_phy; 5553 reg_info->phy_id = ev->phy_id; 5554 reg_info->ctry_code = ev->country_id; 5555 reg_info->reg_dmn_pair = ev->domain_code; 5556 5557 ath11k_dbg(ab, ATH11K_DBG_WMI, 5558 "status_code %s", 5559 ath11k_cc_status_to_str(reg_info->status_code)); 5560 5561 reg_info->status_code = 5562 ath11k_wmi_cc_setting_code_to_reg(ev->status_code); 5563 5564 reg_info->is_ext_reg_event = true; 5565 5566 reg_info->min_bw_2ghz = ev->min_bw_2ghz; 5567 reg_info->max_bw_2ghz = ev->max_bw_2ghz; 5568 reg_info->min_bw_5ghz = ev->min_bw_5ghz; 5569 reg_info->max_bw_5ghz = ev->max_bw_5ghz; 5570 5571 reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP] = 5572 ev->min_bw_6ghz_ap_lpi; 5573 reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP] = 5574 ev->max_bw_6ghz_ap_lpi; 5575 reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = 5576 ev->min_bw_6ghz_ap_sp; 5577 reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = 5578 ev->max_bw_6ghz_ap_sp; 5579 reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = 5580 ev->min_bw_6ghz_ap_vlp; 5581 reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = 5582 ev->max_bw_6ghz_ap_vlp; 5583 5584 ath11k_dbg(ab, ATH11K_DBG_WMI, 5585 "6 GHz AP BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", 5586 reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP], 5587 reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP], 5588 reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], 5589 reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], 5590 reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP], 5591 reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP]); 5592 5593 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5594 reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = 5595 ev->min_bw_6ghz_client_lpi[i]; 5596 reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = 5597 ev->max_bw_6ghz_client_lpi[i]; 5598 reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = 5599 ev->min_bw_6ghz_client_sp[i]; 5600 reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = 5601 ev->max_bw_6ghz_client_sp[i]; 5602 reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = 5603 ev->min_bw_6ghz_client_vlp[i]; 5604 reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = 5605 ev->max_bw_6ghz_client_vlp[i]; 5606 5607 ath11k_dbg(ab, ATH11K_DBG_WMI, 5608 "6 GHz %s BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", 5609 ath11k_6ghz_client_type_to_str(i), 5610 reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i], 5611 reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i], 5612 reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], 5613 reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], 5614 reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i], 5615 reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]); 5616 } 5617 5618 ath11k_dbg(ab, ATH11K_DBG_WMI, 5619 "cc_ext %s dfs %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d phy_bitmap 0x%x", 5620 reg_info->alpha2, reg_info->dfs_region, 5621 reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, 5622 reg_info->min_bw_5ghz, reg_info->max_bw_5ghz, 5623 reg_info->phybitmap); 5624 5625 ath11k_dbg(ab, ATH11K_DBG_WMI, 5626 "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", 5627 num_2ghz_reg_rules, num_5ghz_reg_rules); 5628 5629 ath11k_dbg(ab, ATH11K_DBG_WMI, 5630 "num_6ghz_reg_rules_ap_lpi: %d num_6ghz_reg_rules_ap_sp: %d num_6ghz_reg_rules_ap_vlp: %d", 5631 num_6ghz_reg_rules_ap[WMI_REG_INDOOR_AP], 5632 num_6ghz_reg_rules_ap[WMI_REG_STANDARD_POWER_AP], 5633 num_6ghz_reg_rules_ap[WMI_REG_VERY_LOW_POWER_AP]); 5634 5635 j = WMI_REG_DEFAULT_CLIENT; 5636 ath11k_dbg(ab, ATH11K_DBG_WMI, 5637 "6 GHz Regular client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", 5638 num_6ghz_client[WMI_REG_INDOOR_AP][j], 5639 num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], 5640 num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); 5641 5642 j = WMI_REG_SUBORDINATE_CLIENT; 5643 ath11k_dbg(ab, ATH11K_DBG_WMI, 5644 "6 GHz Subordinate client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", 5645 num_6ghz_client[WMI_REG_INDOOR_AP][j], 5646 num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], 5647 num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); 5648 5649 ext_wmi_reg_rule = 5650 (struct wmi_regulatory_ext_rule *)((u8 *)ev + sizeof(*ev) + 5651 sizeof(struct wmi_tlv)); 5652 if (num_2ghz_reg_rules) { 5653 reg_info->reg_rules_2ghz_ptr = 5654 create_ext_reg_rules_from_wmi(num_2ghz_reg_rules, 5655 ext_wmi_reg_rule); 5656 5657 if (!reg_info->reg_rules_2ghz_ptr) { 5658 kfree(tb); 5659 ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); 5660 return -ENOMEM; 5661 } 5662 5663 ath11k_print_reg_rule(ab, "2 GHz", 5664 num_2ghz_reg_rules, 5665 reg_info->reg_rules_2ghz_ptr); 5666 } 5667 5668 ext_wmi_reg_rule += num_2ghz_reg_rules; 5669 5670 /* Firmware might include 6 GHz reg rule in 5 GHz rule list 5671 * for few countries along with separate 6 GHz rule. 5672 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list 5673 * causes intersect check to be true, and same rules will be 5674 * shown multiple times in iw cmd. 5675 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list 5676 */ 5677 num_invalid_5ghz_ext_rules = 5678 ath11k_invalid_5ghz_reg_ext_rules_from_wmi(num_5ghz_reg_rules, 5679 ext_wmi_reg_rule); 5680 5681 if (num_invalid_5ghz_ext_rules) { 5682 ath11k_dbg(ab, ATH11K_DBG_WMI, 5683 "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", 5684 reg_info->alpha2, reg_info->num_5ghz_reg_rules, 5685 num_invalid_5ghz_ext_rules); 5686 5687 num_5ghz_reg_rules = num_5ghz_reg_rules - num_invalid_5ghz_ext_rules; 5688 reg_info->num_5ghz_reg_rules = num_5ghz_reg_rules; 5689 } 5690 5691 if (num_5ghz_reg_rules) { 5692 reg_info->reg_rules_5ghz_ptr = 5693 create_ext_reg_rules_from_wmi(num_5ghz_reg_rules, 5694 ext_wmi_reg_rule); 5695 5696 if (!reg_info->reg_rules_5ghz_ptr) { 5697 kfree(tb); 5698 ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); 5699 return -ENOMEM; 5700 } 5701 5702 ath11k_print_reg_rule(ab, "5 GHz", 5703 num_5ghz_reg_rules, 5704 reg_info->reg_rules_5ghz_ptr); 5705 } 5706 5707 /* We have adjusted the number of 5 GHz reg rules above. But still those 5708 * many rules needs to be adjusted in ext_wmi_reg_rule. 5709 * 5710 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. 5711 */ 5712 ext_wmi_reg_rule += (num_5ghz_reg_rules + num_invalid_5ghz_ext_rules); 5713 5714 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5715 reg_info->reg_rules_6ghz_ap_ptr[i] = 5716 create_ext_reg_rules_from_wmi(num_6ghz_reg_rules_ap[i], 5717 ext_wmi_reg_rule); 5718 5719 if (!reg_info->reg_rules_6ghz_ap_ptr[i]) { 5720 kfree(tb); 5721 ath11k_warn(ab, "Unable to Allocate memory for 6 GHz AP rules\n"); 5722 return -ENOMEM; 5723 } 5724 5725 ath11k_print_reg_rule(ab, ath11k_6ghz_ap_type_to_str(i), 5726 num_6ghz_reg_rules_ap[i], 5727 reg_info->reg_rules_6ghz_ap_ptr[i]); 5728 5729 ext_wmi_reg_rule += num_6ghz_reg_rules_ap[i]; 5730 } 5731 5732 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { 5733 ath11k_dbg(ab, ATH11K_DBG_WMI, 5734 "6 GHz AP type %s", ath11k_6ghz_ap_type_to_str(j)); 5735 5736 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5737 reg_info->reg_rules_6ghz_client_ptr[j][i] = 5738 create_ext_reg_rules_from_wmi(num_6ghz_client[j][i], 5739 ext_wmi_reg_rule); 5740 5741 if (!reg_info->reg_rules_6ghz_client_ptr[j][i]) { 5742 kfree(tb); 5743 ath11k_warn(ab, "Unable to Allocate memory for 6 GHz client rules\n"); 5744 return -ENOMEM; 5745 } 5746 5747 ath11k_print_reg_rule(ab, 5748 ath11k_6ghz_client_type_to_str(i), 5749 num_6ghz_client[j][i], 5750 reg_info->reg_rules_6ghz_client_ptr[j][i]); 5751 5752 ext_wmi_reg_rule += num_6ghz_client[j][i]; 5753 } 5754 } 5755 5756 reg_info->client_type = ev->client_type; 5757 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; 5758 reg_info->unspecified_ap_usable = 5759 ev->unspecified_ap_usable; 5760 reg_info->domain_code_6ghz_ap[WMI_REG_INDOOR_AP] = 5761 ev->domain_code_6ghz_ap_lpi; 5762 reg_info->domain_code_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = 5763 ev->domain_code_6ghz_ap_sp; 5764 reg_info->domain_code_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = 5765 ev->domain_code_6ghz_ap_vlp; 5766 5767 ath11k_dbg(ab, ATH11K_DBG_WMI, 5768 "6 GHz reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s, sp %s, vlp %s\n", 5769 ath11k_6ghz_client_type_to_str(reg_info->client_type), 5770 reg_info->rnr_tpe_usable, 5771 reg_info->unspecified_ap_usable, 5772 ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_lpi), 5773 ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_sp), 5774 ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_vlp)); 5775 5776 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5777 reg_info->domain_code_6ghz_client[WMI_REG_INDOOR_AP][i] = 5778 ev->domain_code_6ghz_client_lpi[i]; 5779 reg_info->domain_code_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = 5780 ev->domain_code_6ghz_client_sp[i]; 5781 reg_info->domain_code_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = 5782 ev->domain_code_6ghz_client_vlp[i]; 5783 5784 ath11k_dbg(ab, ATH11K_DBG_WMI, 5785 "6 GHz client type %s client sub domain: lpi %s, sp %s, vlp %s\n", 5786 ath11k_6ghz_client_type_to_str(i), 5787 ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_lpi[i]), 5788 ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_sp[i]), 5789 ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_vlp[i]) 5790 ); 5791 } 5792 5793 reg_info->domain_code_6ghz_super_id = ev->domain_code_6ghz_super_id; 5794 5795 ath11k_dbg(ab, ATH11K_DBG_WMI, 5796 "6 GHz client_type %s 6 GHz super domain %s", 5797 ath11k_6ghz_client_type_to_str(reg_info->client_type), 5798 ath11k_super_reg_6ghz_to_str(reg_info->domain_code_6ghz_super_id)); 5799 5800 ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory ext channel list\n"); 5801 5802 kfree(tb); 5803 return 0; 5804 } 5805 5806 static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb, 5807 struct wmi_peer_delete_resp_event *peer_del_resp) 5808 { 5809 const void **tb; 5810 const struct wmi_peer_delete_resp_event *ev; 5811 int ret; 5812 5813 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5814 if (IS_ERR(tb)) { 5815 ret = PTR_ERR(tb); 5816 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 5817 return ret; 5818 } 5819 5820 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; 5821 if (!ev) { 5822 ath11k_warn(ab, "failed to fetch peer delete resp ev"); 5823 kfree(tb); 5824 return -EPROTO; 5825 } 5826 5827 memset(peer_del_resp, 0, sizeof(*peer_del_resp)); 5828 5829 peer_del_resp->vdev_id = ev->vdev_id; 5830 ether_addr_copy(peer_del_resp->peer_macaddr.addr, 5831 ev->peer_macaddr.addr); 5832 5833 kfree(tb); 5834 return 0; 5835 } 5836 5837 static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab, 5838 struct sk_buff *skb, 5839 u32 *vdev_id) 5840 { 5841 const void **tb; 5842 const struct wmi_vdev_delete_resp_event *ev; 5843 int ret; 5844 5845 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5846 if (IS_ERR(tb)) { 5847 ret = PTR_ERR(tb); 5848 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 5849 return ret; 5850 } 5851 5852 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; 5853 if (!ev) { 5854 ath11k_warn(ab, "failed to fetch vdev delete resp ev"); 5855 kfree(tb); 5856 return -EPROTO; 5857 } 5858 5859 *vdev_id = ev->vdev_id; 5860 5861 kfree(tb); 5862 return 0; 5863 } 5864 5865 static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, 5866 struct sk_buff *skb, 5867 u32 *vdev_id, u32 *tx_status) 5868 { 5869 const void **tb; 5870 const struct wmi_bcn_tx_status_event *ev; 5871 int ret; 5872 5873 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5874 if (IS_ERR(tb)) { 5875 ret = PTR_ERR(tb); 5876 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 5877 return ret; 5878 } 5879 5880 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; 5881 if (!ev) { 5882 ath11k_warn(ab, "failed to fetch bcn tx status ev"); 5883 kfree(tb); 5884 return -EPROTO; 5885 } 5886 5887 *vdev_id = ev->vdev_id; 5888 *tx_status = ev->tx_status; 5889 5890 kfree(tb); 5891 return 0; 5892 } 5893 5894 static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb, 5895 u32 *vdev_id) 5896 { 5897 const void **tb; 5898 const struct wmi_vdev_stopped_event *ev; 5899 int ret; 5900 5901 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5902 if (IS_ERR(tb)) { 5903 ret = PTR_ERR(tb); 5904 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 5905 return ret; 5906 } 5907 5908 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; 5909 if (!ev) { 5910 ath11k_warn(ab, "failed to fetch vdev stop ev"); 5911 kfree(tb); 5912 return -EPROTO; 5913 } 5914 5915 *vdev_id = ev->vdev_id; 5916 5917 kfree(tb); 5918 return 0; 5919 } 5920 5921 static int ath11k_wmi_tlv_mgmt_rx_parse(struct ath11k_base *ab, 5922 u16 tag, u16 len, 5923 const void *ptr, void *data) 5924 { 5925 struct wmi_tlv_mgmt_rx_parse *parse = data; 5926 5927 switch (tag) { 5928 case WMI_TAG_MGMT_RX_HDR: 5929 parse->fixed = ptr; 5930 break; 5931 case WMI_TAG_ARRAY_BYTE: 5932 if (!parse->frame_buf_done) { 5933 parse->frame_buf = ptr; 5934 parse->frame_buf_done = true; 5935 } 5936 break; 5937 } 5938 return 0; 5939 } 5940 5941 static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, 5942 struct sk_buff *skb, 5943 struct mgmt_rx_event_params *hdr) 5944 { 5945 struct wmi_tlv_mgmt_rx_parse parse = { }; 5946 const struct wmi_mgmt_rx_hdr *ev; 5947 const u8 *frame; 5948 int ret; 5949 5950 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 5951 ath11k_wmi_tlv_mgmt_rx_parse, 5952 &parse); 5953 if (ret) { 5954 ath11k_warn(ab, "failed to parse mgmt rx tlv %d\n", 5955 ret); 5956 return ret; 5957 } 5958 5959 ev = parse.fixed; 5960 frame = parse.frame_buf; 5961 5962 if (!ev || !frame) { 5963 ath11k_warn(ab, "failed to fetch mgmt rx hdr"); 5964 return -EPROTO; 5965 } 5966 5967 hdr->pdev_id = ev->pdev_id; 5968 hdr->chan_freq = ev->chan_freq; 5969 hdr->channel = ev->channel; 5970 hdr->snr = ev->snr; 5971 hdr->rate = ev->rate; 5972 hdr->phy_mode = ev->phy_mode; 5973 hdr->buf_len = ev->buf_len; 5974 hdr->status = ev->status; 5975 hdr->flags = ev->flags; 5976 hdr->rssi = ev->rssi; 5977 hdr->tsf_delta = ev->tsf_delta; 5978 memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl)); 5979 5980 if (skb->len < (frame - skb->data) + hdr->buf_len) { 5981 ath11k_warn(ab, "invalid length in mgmt rx hdr ev"); 5982 return -EPROTO; 5983 } 5984 5985 /* shift the sk_buff to point to `frame` */ 5986 skb_trim(skb, 0); 5987 skb_put(skb, frame - skb->data); 5988 skb_pull(skb, frame - skb->data); 5989 skb_put(skb, hdr->buf_len); 5990 5991 ath11k_ce_byte_swap(skb->data, hdr->buf_len); 5992 5993 return 0; 5994 } 5995 5996 static int wmi_process_mgmt_tx_comp(struct ath11k *ar, 5997 struct wmi_mgmt_tx_compl_event *tx_compl_param) 5998 { 5999 struct sk_buff *msdu; 6000 struct ieee80211_tx_info *info; 6001 struct ath11k_skb_cb *skb_cb; 6002 int num_mgmt; 6003 6004 spin_lock_bh(&ar->txmgmt_idr_lock); 6005 msdu = idr_find(&ar->txmgmt_idr, tx_compl_param->desc_id); 6006 6007 if (!msdu) { 6008 ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", 6009 tx_compl_param->desc_id); 6010 spin_unlock_bh(&ar->txmgmt_idr_lock); 6011 return -ENOENT; 6012 } 6013 6014 idr_remove(&ar->txmgmt_idr, tx_compl_param->desc_id); 6015 spin_unlock_bh(&ar->txmgmt_idr_lock); 6016 6017 skb_cb = ATH11K_SKB_CB(msdu); 6018 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 6019 6020 info = IEEE80211_SKB_CB(msdu); 6021 memset(&info->status, 0, sizeof(info->status)); 6022 info->status.rates[0].idx = -1; 6023 6024 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && 6025 !tx_compl_param->status) { 6026 info->flags |= IEEE80211_TX_STAT_ACK; 6027 if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, 6028 ar->ab->wmi_ab.svc_map)) 6029 info->status.ack_signal = tx_compl_param->ack_rssi; 6030 } 6031 6032 ieee80211_tx_status_irqsafe(ar->hw, msdu); 6033 6034 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 6035 6036 /* WARN when we received this event without doing any mgmt tx */ 6037 if (num_mgmt < 0) 6038 WARN_ON_ONCE(1); 6039 6040 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 6041 "mgmt tx comp pending %d desc id %d\n", 6042 num_mgmt, tx_compl_param->desc_id); 6043 6044 if (!num_mgmt) 6045 wake_up(&ar->txmgmt_empty_waitq); 6046 6047 return 0; 6048 } 6049 6050 static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab, 6051 struct sk_buff *skb, 6052 struct wmi_mgmt_tx_compl_event *param) 6053 { 6054 const void **tb; 6055 const struct wmi_mgmt_tx_compl_event *ev; 6056 int ret; 6057 6058 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6059 if (IS_ERR(tb)) { 6060 ret = PTR_ERR(tb); 6061 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6062 return ret; 6063 } 6064 6065 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; 6066 if (!ev) { 6067 ath11k_warn(ab, "failed to fetch mgmt tx compl ev"); 6068 kfree(tb); 6069 return -EPROTO; 6070 } 6071 6072 param->pdev_id = ev->pdev_id; 6073 param->desc_id = ev->desc_id; 6074 param->status = ev->status; 6075 param->ack_rssi = ev->ack_rssi; 6076 6077 kfree(tb); 6078 return 0; 6079 } 6080 6081 static void ath11k_wmi_event_scan_started(struct ath11k *ar) 6082 { 6083 lockdep_assert_held(&ar->data_lock); 6084 6085 switch (ar->scan.state) { 6086 case ATH11K_SCAN_IDLE: 6087 case ATH11K_SCAN_RUNNING: 6088 case ATH11K_SCAN_ABORTING: 6089 ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", 6090 ath11k_scan_state_str(ar->scan.state), 6091 ar->scan.state); 6092 break; 6093 case ATH11K_SCAN_STARTING: 6094 ar->scan.state = ATH11K_SCAN_RUNNING; 6095 if (ar->scan.is_roc) 6096 ieee80211_ready_on_channel(ar->hw); 6097 complete(&ar->scan.started); 6098 break; 6099 } 6100 } 6101 6102 static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar) 6103 { 6104 lockdep_assert_held(&ar->data_lock); 6105 6106 switch (ar->scan.state) { 6107 case ATH11K_SCAN_IDLE: 6108 case ATH11K_SCAN_RUNNING: 6109 case ATH11K_SCAN_ABORTING: 6110 ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", 6111 ath11k_scan_state_str(ar->scan.state), 6112 ar->scan.state); 6113 break; 6114 case ATH11K_SCAN_STARTING: 6115 complete(&ar->scan.started); 6116 __ath11k_mac_scan_finish(ar); 6117 break; 6118 } 6119 } 6120 6121 static void ath11k_wmi_event_scan_completed(struct ath11k *ar) 6122 { 6123 lockdep_assert_held(&ar->data_lock); 6124 6125 switch (ar->scan.state) { 6126 case ATH11K_SCAN_IDLE: 6127 case ATH11K_SCAN_STARTING: 6128 /* One suspected reason scan can be completed while starting is 6129 * if firmware fails to deliver all scan events to the host, 6130 * e.g. when transport pipe is full. This has been observed 6131 * with spectral scan phyerr events starving wmi transport 6132 * pipe. In such case the "scan completed" event should be (and 6133 * is) ignored by the host as it may be just firmware's scan 6134 * state machine recovering. 6135 */ 6136 ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", 6137 ath11k_scan_state_str(ar->scan.state), 6138 ar->scan.state); 6139 break; 6140 case ATH11K_SCAN_RUNNING: 6141 case ATH11K_SCAN_ABORTING: 6142 __ath11k_mac_scan_finish(ar); 6143 break; 6144 } 6145 } 6146 6147 static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar) 6148 { 6149 lockdep_assert_held(&ar->data_lock); 6150 6151 switch (ar->scan.state) { 6152 case ATH11K_SCAN_IDLE: 6153 case ATH11K_SCAN_STARTING: 6154 ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", 6155 ath11k_scan_state_str(ar->scan.state), 6156 ar->scan.state); 6157 break; 6158 case ATH11K_SCAN_RUNNING: 6159 case ATH11K_SCAN_ABORTING: 6160 ar->scan_channel = NULL; 6161 break; 6162 } 6163 } 6164 6165 static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq) 6166 { 6167 lockdep_assert_held(&ar->data_lock); 6168 6169 switch (ar->scan.state) { 6170 case ATH11K_SCAN_IDLE: 6171 case ATH11K_SCAN_STARTING: 6172 ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", 6173 ath11k_scan_state_str(ar->scan.state), 6174 ar->scan.state); 6175 break; 6176 case ATH11K_SCAN_RUNNING: 6177 case ATH11K_SCAN_ABORTING: 6178 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); 6179 if (ar->scan.is_roc && ar->scan.roc_freq == freq) 6180 complete(&ar->scan.on_channel); 6181 break; 6182 } 6183 } 6184 6185 static const char * 6186 ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type, 6187 enum wmi_scan_completion_reason reason) 6188 { 6189 switch (type) { 6190 case WMI_SCAN_EVENT_STARTED: 6191 return "started"; 6192 case WMI_SCAN_EVENT_COMPLETED: 6193 switch (reason) { 6194 case WMI_SCAN_REASON_COMPLETED: 6195 return "completed"; 6196 case WMI_SCAN_REASON_CANCELLED: 6197 return "completed [cancelled]"; 6198 case WMI_SCAN_REASON_PREEMPTED: 6199 return "completed [preempted]"; 6200 case WMI_SCAN_REASON_TIMEDOUT: 6201 return "completed [timedout]"; 6202 case WMI_SCAN_REASON_INTERNAL_FAILURE: 6203 return "completed [internal err]"; 6204 case WMI_SCAN_REASON_MAX: 6205 break; 6206 } 6207 return "completed [unknown]"; 6208 case WMI_SCAN_EVENT_BSS_CHANNEL: 6209 return "bss channel"; 6210 case WMI_SCAN_EVENT_FOREIGN_CHAN: 6211 return "foreign channel"; 6212 case WMI_SCAN_EVENT_DEQUEUED: 6213 return "dequeued"; 6214 case WMI_SCAN_EVENT_PREEMPTED: 6215 return "preempted"; 6216 case WMI_SCAN_EVENT_START_FAILED: 6217 return "start failed"; 6218 case WMI_SCAN_EVENT_RESTARTED: 6219 return "restarted"; 6220 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 6221 return "foreign channel exit"; 6222 default: 6223 return "unknown"; 6224 } 6225 } 6226 6227 static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb, 6228 struct wmi_scan_event *scan_evt_param) 6229 { 6230 const void **tb; 6231 const struct wmi_scan_event *ev; 6232 int ret; 6233 6234 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6235 if (IS_ERR(tb)) { 6236 ret = PTR_ERR(tb); 6237 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6238 return ret; 6239 } 6240 6241 ev = tb[WMI_TAG_SCAN_EVENT]; 6242 if (!ev) { 6243 ath11k_warn(ab, "failed to fetch scan ev"); 6244 kfree(tb); 6245 return -EPROTO; 6246 } 6247 6248 scan_evt_param->event_type = ev->event_type; 6249 scan_evt_param->reason = ev->reason; 6250 scan_evt_param->channel_freq = ev->channel_freq; 6251 scan_evt_param->scan_req_id = ev->scan_req_id; 6252 scan_evt_param->scan_id = ev->scan_id; 6253 scan_evt_param->vdev_id = ev->vdev_id; 6254 scan_evt_param->tsf_timestamp = ev->tsf_timestamp; 6255 6256 kfree(tb); 6257 return 0; 6258 } 6259 6260 static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb, 6261 struct wmi_peer_sta_kickout_arg *arg) 6262 { 6263 const void **tb; 6264 const struct wmi_peer_sta_kickout_event *ev; 6265 int ret; 6266 6267 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6268 if (IS_ERR(tb)) { 6269 ret = PTR_ERR(tb); 6270 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6271 return ret; 6272 } 6273 6274 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; 6275 if (!ev) { 6276 ath11k_warn(ab, "failed to fetch peer sta kickout ev"); 6277 kfree(tb); 6278 return -EPROTO; 6279 } 6280 6281 arg->mac_addr = ev->peer_macaddr.addr; 6282 6283 kfree(tb); 6284 return 0; 6285 } 6286 6287 static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb, 6288 struct wmi_roam_event *roam_ev) 6289 { 6290 const void **tb; 6291 const struct wmi_roam_event *ev; 6292 int ret; 6293 6294 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6295 if (IS_ERR(tb)) { 6296 ret = PTR_ERR(tb); 6297 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6298 return ret; 6299 } 6300 6301 ev = tb[WMI_TAG_ROAM_EVENT]; 6302 if (!ev) { 6303 ath11k_warn(ab, "failed to fetch roam ev"); 6304 kfree(tb); 6305 return -EPROTO; 6306 } 6307 6308 roam_ev->vdev_id = ev->vdev_id; 6309 roam_ev->reason = ev->reason; 6310 roam_ev->rssi = ev->rssi; 6311 6312 kfree(tb); 6313 return 0; 6314 } 6315 6316 static int freq_to_idx(struct ath11k *ar, int freq) 6317 { 6318 struct ieee80211_supported_band *sband; 6319 int band, ch, idx = 0; 6320 6321 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6322 sband = ar->hw->wiphy->bands[band]; 6323 if (!sband) 6324 continue; 6325 6326 for (ch = 0; ch < sband->n_channels; ch++, idx++) 6327 if (sband->channels[ch].center_freq == freq) 6328 goto exit; 6329 } 6330 6331 exit: 6332 return idx; 6333 } 6334 6335 static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, 6336 struct wmi_chan_info_event *ch_info_ev) 6337 { 6338 const void **tb; 6339 const struct wmi_chan_info_event *ev; 6340 int ret; 6341 6342 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6343 if (IS_ERR(tb)) { 6344 ret = PTR_ERR(tb); 6345 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6346 return ret; 6347 } 6348 6349 ev = tb[WMI_TAG_CHAN_INFO_EVENT]; 6350 if (!ev) { 6351 ath11k_warn(ab, "failed to fetch chan info ev"); 6352 kfree(tb); 6353 return -EPROTO; 6354 } 6355 6356 ch_info_ev->err_code = ev->err_code; 6357 ch_info_ev->freq = ev->freq; 6358 ch_info_ev->cmd_flags = ev->cmd_flags; 6359 ch_info_ev->noise_floor = ev->noise_floor; 6360 ch_info_ev->rx_clear_count = ev->rx_clear_count; 6361 ch_info_ev->cycle_count = ev->cycle_count; 6362 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; 6363 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; 6364 ch_info_ev->rx_frame_count = ev->rx_frame_count; 6365 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; 6366 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; 6367 ch_info_ev->vdev_id = ev->vdev_id; 6368 6369 kfree(tb); 6370 return 0; 6371 } 6372 6373 static int 6374 ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, 6375 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) 6376 { 6377 const void **tb; 6378 const struct wmi_pdev_bss_chan_info_event *ev; 6379 int ret; 6380 6381 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6382 if (IS_ERR(tb)) { 6383 ret = PTR_ERR(tb); 6384 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6385 return ret; 6386 } 6387 6388 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; 6389 if (!ev) { 6390 ath11k_warn(ab, "failed to fetch pdev bss chan info ev"); 6391 kfree(tb); 6392 return -EPROTO; 6393 } 6394 6395 bss_ch_info_ev->pdev_id = ev->pdev_id; 6396 bss_ch_info_ev->freq = ev->freq; 6397 bss_ch_info_ev->noise_floor = ev->noise_floor; 6398 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; 6399 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; 6400 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; 6401 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; 6402 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; 6403 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; 6404 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; 6405 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; 6406 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; 6407 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; 6408 6409 kfree(tb); 6410 return 0; 6411 } 6412 6413 static int 6414 ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb, 6415 struct wmi_vdev_install_key_complete_arg *arg) 6416 { 6417 const void **tb; 6418 const struct wmi_vdev_install_key_compl_event *ev; 6419 int ret; 6420 6421 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6422 if (IS_ERR(tb)) { 6423 ret = PTR_ERR(tb); 6424 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6425 return ret; 6426 } 6427 6428 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; 6429 if (!ev) { 6430 ath11k_warn(ab, "failed to fetch vdev install key compl ev"); 6431 kfree(tb); 6432 return -EPROTO; 6433 } 6434 6435 arg->vdev_id = ev->vdev_id; 6436 arg->macaddr = ev->peer_macaddr.addr; 6437 arg->key_idx = ev->key_idx; 6438 arg->key_flags = ev->key_flags; 6439 arg->status = ev->status; 6440 6441 kfree(tb); 6442 return 0; 6443 } 6444 6445 static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb, 6446 struct wmi_peer_assoc_conf_arg *peer_assoc_conf) 6447 { 6448 const void **tb; 6449 const struct wmi_peer_assoc_conf_event *ev; 6450 int ret; 6451 6452 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6453 if (IS_ERR(tb)) { 6454 ret = PTR_ERR(tb); 6455 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 6456 return ret; 6457 } 6458 6459 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; 6460 if (!ev) { 6461 ath11k_warn(ab, "failed to fetch peer assoc conf ev"); 6462 kfree(tb); 6463 return -EPROTO; 6464 } 6465 6466 peer_assoc_conf->vdev_id = ev->vdev_id; 6467 peer_assoc_conf->macaddr = ev->peer_macaddr.addr; 6468 6469 kfree(tb); 6470 return 0; 6471 } 6472 6473 static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, 6474 struct ath11k_fw_stats_pdev *dst) 6475 { 6476 dst->ch_noise_floor = src->chan_nf; 6477 dst->tx_frame_count = src->tx_frame_count; 6478 dst->rx_frame_count = src->rx_frame_count; 6479 dst->rx_clear_count = src->rx_clear_count; 6480 dst->cycle_count = src->cycle_count; 6481 dst->phy_err_count = src->phy_err_count; 6482 dst->chan_tx_power = src->chan_tx_pwr; 6483 } 6484 6485 static void 6486 ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, 6487 struct ath11k_fw_stats_pdev *dst) 6488 { 6489 dst->comp_queued = src->comp_queued; 6490 dst->comp_delivered = src->comp_delivered; 6491 dst->msdu_enqued = src->msdu_enqued; 6492 dst->mpdu_enqued = src->mpdu_enqued; 6493 dst->wmm_drop = src->wmm_drop; 6494 dst->local_enqued = src->local_enqued; 6495 dst->local_freed = src->local_freed; 6496 dst->hw_queued = src->hw_queued; 6497 dst->hw_reaped = src->hw_reaped; 6498 dst->underrun = src->underrun; 6499 dst->hw_paused = src->hw_paused; 6500 dst->tx_abort = src->tx_abort; 6501 dst->mpdus_requeued = src->mpdus_requeued; 6502 dst->tx_ko = src->tx_ko; 6503 dst->tx_xretry = src->tx_xretry; 6504 dst->data_rc = src->data_rc; 6505 dst->self_triggers = src->self_triggers; 6506 dst->sw_retry_failure = src->sw_retry_failure; 6507 dst->illgl_rate_phy_err = src->illgl_rate_phy_err; 6508 dst->pdev_cont_xretry = src->pdev_cont_xretry; 6509 dst->pdev_tx_timeout = src->pdev_tx_timeout; 6510 dst->pdev_resets = src->pdev_resets; 6511 dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure; 6512 dst->phy_underrun = src->phy_underrun; 6513 dst->txop_ovf = src->txop_ovf; 6514 dst->seq_posted = src->seq_posted; 6515 dst->seq_failed_queueing = src->seq_failed_queueing; 6516 dst->seq_completed = src->seq_completed; 6517 dst->seq_restarted = src->seq_restarted; 6518 dst->mu_seq_posted = src->mu_seq_posted; 6519 dst->mpdus_sw_flush = src->mpdus_sw_flush; 6520 dst->mpdus_hw_filter = src->mpdus_hw_filter; 6521 dst->mpdus_truncated = src->mpdus_truncated; 6522 dst->mpdus_ack_failed = src->mpdus_ack_failed; 6523 dst->mpdus_expired = src->mpdus_expired; 6524 } 6525 6526 static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, 6527 struct ath11k_fw_stats_pdev *dst) 6528 { 6529 dst->mid_ppdu_route_change = src->mid_ppdu_route_change; 6530 dst->status_rcvd = src->status_rcvd; 6531 dst->r0_frags = src->r0_frags; 6532 dst->r1_frags = src->r1_frags; 6533 dst->r2_frags = src->r2_frags; 6534 dst->r3_frags = src->r3_frags; 6535 dst->htt_msdus = src->htt_msdus; 6536 dst->htt_mpdus = src->htt_mpdus; 6537 dst->loc_msdus = src->loc_msdus; 6538 dst->loc_mpdus = src->loc_mpdus; 6539 dst->oversize_amsdu = src->oversize_amsdu; 6540 dst->phy_errs = src->phy_errs; 6541 dst->phy_err_drop = src->phy_err_drop; 6542 dst->mpdu_errs = src->mpdu_errs; 6543 dst->rx_ovfl_errs = src->rx_ovfl_errs; 6544 } 6545 6546 static void 6547 ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src, 6548 struct ath11k_fw_stats_vdev *dst) 6549 { 6550 int i; 6551 6552 dst->vdev_id = src->vdev_id; 6553 dst->beacon_snr = src->beacon_snr; 6554 dst->data_snr = src->data_snr; 6555 dst->num_rx_frames = src->num_rx_frames; 6556 dst->num_rts_fail = src->num_rts_fail; 6557 dst->num_rts_success = src->num_rts_success; 6558 dst->num_rx_err = src->num_rx_err; 6559 dst->num_rx_discard = src->num_rx_discard; 6560 dst->num_tx_not_acked = src->num_tx_not_acked; 6561 6562 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) 6563 dst->num_tx_frames[i] = src->num_tx_frames[i]; 6564 6565 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) 6566 dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i]; 6567 6568 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) 6569 dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i]; 6570 6571 for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) 6572 dst->tx_rate_history[i] = src->tx_rate_history[i]; 6573 6574 for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) 6575 dst->beacon_rssi_history[i] = src->beacon_rssi_history[i]; 6576 } 6577 6578 static void 6579 ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src, 6580 struct ath11k_fw_stats_bcn *dst) 6581 { 6582 dst->vdev_id = src->vdev_id; 6583 dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt; 6584 dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt; 6585 } 6586 6587 static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, 6588 u16 tag, u16 len, 6589 const void *ptr, void *data) 6590 { 6591 struct wmi_tlv_fw_stats_parse *parse = data; 6592 const struct wmi_stats_event *ev = parse->ev; 6593 struct ath11k_fw_stats *stats = parse->stats; 6594 struct ath11k *ar; 6595 struct ath11k_vif *arvif; 6596 struct ieee80211_sta *sta; 6597 struct ath11k_sta *arsta; 6598 const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr; 6599 int j, ret = 0; 6600 6601 if (tag != WMI_TAG_RSSI_STATS) 6602 return -EPROTO; 6603 6604 rcu_read_lock(); 6605 6606 ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); 6607 stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; 6608 6609 ath11k_dbg(ab, ATH11K_DBG_WMI, 6610 "stats vdev id %d mac %pM\n", 6611 stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr); 6612 6613 arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id); 6614 if (!arvif) { 6615 ath11k_warn(ab, "not found vif for vdev id %d\n", 6616 stats_rssi->vdev_id); 6617 ret = -EPROTO; 6618 goto exit; 6619 } 6620 6621 ath11k_dbg(ab, ATH11K_DBG_WMI, 6622 "stats bssid %pM vif %p\n", 6623 arvif->bssid, arvif->vif); 6624 6625 sta = ieee80211_find_sta_by_ifaddr(ar->hw, 6626 arvif->bssid, 6627 NULL); 6628 if (!sta) { 6629 ath11k_dbg(ab, ATH11K_DBG_WMI, 6630 "not found station of bssid %pM for rssi chain\n", 6631 arvif->bssid); 6632 goto exit; 6633 } 6634 6635 arsta = ath11k_sta_to_arsta(sta); 6636 6637 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 6638 ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); 6639 6640 for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) { 6641 arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j]; 6642 ath11k_dbg(ab, ATH11K_DBG_WMI, 6643 "stats beacon rssi[%d] %d data rssi[%d] %d\n", 6644 j, 6645 stats_rssi->rssi_avg_beacon[j], 6646 j, 6647 stats_rssi->rssi_avg_data[j]); 6648 } 6649 6650 exit: 6651 rcu_read_unlock(); 6652 return ret; 6653 } 6654 6655 static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, 6656 struct wmi_tlv_fw_stats_parse *parse, 6657 const void *ptr, 6658 u16 len) 6659 { 6660 struct ath11k_fw_stats *stats = parse->stats; 6661 const struct wmi_stats_event *ev = parse->ev; 6662 struct ath11k *ar; 6663 struct ath11k_vif *arvif; 6664 struct ieee80211_sta *sta; 6665 struct ath11k_sta *arsta; 6666 int i, ret = 0; 6667 const void *data = ptr; 6668 6669 if (!ev) { 6670 ath11k_warn(ab, "failed to fetch update stats ev"); 6671 return -EPROTO; 6672 } 6673 6674 stats->stats_id = 0; 6675 6676 rcu_read_lock(); 6677 6678 ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); 6679 6680 for (i = 0; i < ev->num_pdev_stats; i++) { 6681 const struct wmi_pdev_stats *src; 6682 struct ath11k_fw_stats_pdev *dst; 6683 6684 src = data; 6685 if (len < sizeof(*src)) { 6686 ret = -EPROTO; 6687 goto exit; 6688 } 6689 6690 stats->stats_id = WMI_REQUEST_PDEV_STAT; 6691 6692 data += sizeof(*src); 6693 len -= sizeof(*src); 6694 6695 dst = kzalloc_obj(*dst, GFP_ATOMIC); 6696 if (!dst) 6697 continue; 6698 6699 ath11k_wmi_pull_pdev_stats_base(&src->base, dst); 6700 ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst); 6701 ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst); 6702 list_add_tail(&dst->list, &stats->pdevs); 6703 } 6704 6705 for (i = 0; i < ev->num_vdev_stats; i++) { 6706 const struct wmi_vdev_stats *src; 6707 struct ath11k_fw_stats_vdev *dst; 6708 6709 src = data; 6710 if (len < sizeof(*src)) { 6711 ret = -EPROTO; 6712 goto exit; 6713 } 6714 6715 stats->stats_id = WMI_REQUEST_VDEV_STAT; 6716 6717 arvif = ath11k_mac_get_arvif(ar, src->vdev_id); 6718 if (arvif) { 6719 sta = ieee80211_find_sta_by_ifaddr(ar->hw, 6720 arvif->bssid, 6721 NULL); 6722 if (sta) { 6723 arsta = ath11k_sta_to_arsta(sta); 6724 arsta->rssi_beacon = src->beacon_snr; 6725 ath11k_dbg(ab, ATH11K_DBG_WMI, 6726 "stats vdev id %d snr %d\n", 6727 src->vdev_id, src->beacon_snr); 6728 } else { 6729 ath11k_dbg(ab, ATH11K_DBG_WMI, 6730 "not found station of bssid %pM for vdev stat\n", 6731 arvif->bssid); 6732 } 6733 } 6734 6735 data += sizeof(*src); 6736 len -= sizeof(*src); 6737 6738 dst = kzalloc_obj(*dst, GFP_ATOMIC); 6739 if (!dst) 6740 continue; 6741 6742 ath11k_wmi_pull_vdev_stats(src, dst); 6743 list_add_tail(&dst->list, &stats->vdevs); 6744 } 6745 6746 for (i = 0; i < ev->num_bcn_stats; i++) { 6747 const struct wmi_bcn_stats *src; 6748 struct ath11k_fw_stats_bcn *dst; 6749 6750 src = data; 6751 if (len < sizeof(*src)) { 6752 ret = -EPROTO; 6753 goto exit; 6754 } 6755 6756 stats->stats_id = WMI_REQUEST_BCN_STAT; 6757 6758 data += sizeof(*src); 6759 len -= sizeof(*src); 6760 6761 dst = kzalloc_obj(*dst, GFP_ATOMIC); 6762 if (!dst) 6763 continue; 6764 6765 ath11k_wmi_pull_bcn_stats(src, dst); 6766 list_add_tail(&dst->list, &stats->bcn); 6767 } 6768 6769 exit: 6770 rcu_read_unlock(); 6771 return ret; 6772 } 6773 6774 static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab, 6775 u16 tag, u16 len, 6776 const void *ptr, void *data) 6777 { 6778 struct wmi_tlv_fw_stats_parse *parse = data; 6779 int ret = 0; 6780 6781 switch (tag) { 6782 case WMI_TAG_STATS_EVENT: 6783 parse->ev = (struct wmi_stats_event *)ptr; 6784 parse->stats->pdev_id = parse->ev->pdev_id; 6785 break; 6786 case WMI_TAG_ARRAY_BYTE: 6787 ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); 6788 break; 6789 case WMI_TAG_PER_CHAIN_RSSI_STATS: 6790 parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr; 6791 6792 if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT) 6793 parse->rssi_num = parse->rssi->num_per_chain_rssi_stats; 6794 6795 ath11k_dbg(ab, ATH11K_DBG_WMI, 6796 "stats id 0x%x num chain %d\n", 6797 parse->ev->stats_id, 6798 parse->rssi_num); 6799 break; 6800 case WMI_TAG_ARRAY_STRUCT: 6801 if (parse->rssi_num && !parse->chain_rssi_done) { 6802 ret = ath11k_wmi_tlv_iter(ab, ptr, len, 6803 ath11k_wmi_tlv_rssi_chain_parse, 6804 parse); 6805 if (ret) { 6806 ath11k_warn(ab, "failed to parse rssi chain %d\n", 6807 ret); 6808 return ret; 6809 } 6810 parse->chain_rssi_done = true; 6811 } 6812 break; 6813 default: 6814 break; 6815 } 6816 return ret; 6817 } 6818 6819 int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, 6820 struct ath11k_fw_stats *stats) 6821 { 6822 struct wmi_tlv_fw_stats_parse parse = { }; 6823 6824 stats->stats_id = 0; 6825 parse.stats = stats; 6826 6827 return ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 6828 ath11k_wmi_tlv_fw_stats_parse, 6829 &parse); 6830 } 6831 6832 static void 6833 ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev, 6834 char *buf, u32 *length) 6835 { 6836 u32 len = *length; 6837 u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; 6838 6839 len += scnprintf(buf + len, buf_len - len, "\n"); 6840 len += scnprintf(buf + len, buf_len - len, "%30s\n", 6841 "ath11k PDEV stats"); 6842 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 6843 "================="); 6844 6845 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6846 "Channel noise floor", pdev->ch_noise_floor); 6847 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6848 "Channel TX power", pdev->chan_tx_power); 6849 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6850 "TX frame count", pdev->tx_frame_count); 6851 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6852 "RX frame count", pdev->rx_frame_count); 6853 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6854 "RX clear count", pdev->rx_clear_count); 6855 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6856 "Cycle count", pdev->cycle_count); 6857 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6858 "PHY error count", pdev->phy_err_count); 6859 6860 *length = len; 6861 } 6862 6863 static void 6864 ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, 6865 char *buf, u32 *length) 6866 { 6867 u32 len = *length; 6868 u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; 6869 6870 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 6871 "ath11k PDEV TX stats"); 6872 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 6873 "===================="); 6874 6875 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6876 "HTT cookies queued", pdev->comp_queued); 6877 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6878 "HTT cookies disp.", pdev->comp_delivered); 6879 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6880 "MSDU queued", pdev->msdu_enqued); 6881 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6882 "MPDU queued", pdev->mpdu_enqued); 6883 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6884 "MSDUs dropped", pdev->wmm_drop); 6885 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6886 "Local enqued", pdev->local_enqued); 6887 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6888 "Local freed", pdev->local_freed); 6889 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6890 "HW queued", pdev->hw_queued); 6891 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6892 "PPDUs reaped", pdev->hw_reaped); 6893 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6894 "Num underruns", pdev->underrun); 6895 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6896 "Num HW Paused", pdev->hw_paused); 6897 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6898 "PPDUs cleaned", pdev->tx_abort); 6899 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6900 "MPDUs requeued", pdev->mpdus_requeued); 6901 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6902 "PPDU OK", pdev->tx_ko); 6903 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6904 "Excessive retries", pdev->tx_xretry); 6905 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6906 "HW rate", pdev->data_rc); 6907 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6908 "Sched self triggers", pdev->self_triggers); 6909 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6910 "Dropped due to SW retries", 6911 pdev->sw_retry_failure); 6912 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6913 "Illegal rate phy errors", 6914 pdev->illgl_rate_phy_err); 6915 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6916 "PDEV continuous xretry", pdev->pdev_cont_xretry); 6917 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6918 "TX timeout", pdev->pdev_tx_timeout); 6919 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6920 "PDEV resets", pdev->pdev_resets); 6921 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6922 "Stateless TIDs alloc failures", 6923 pdev->stateless_tid_alloc_failure); 6924 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6925 "PHY underrun", pdev->phy_underrun); 6926 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6927 "MPDU is more than txop limit", pdev->txop_ovf); 6928 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6929 "Num sequences posted", pdev->seq_posted); 6930 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6931 "Num seq failed queueing ", pdev->seq_failed_queueing); 6932 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6933 "Num sequences completed ", pdev->seq_completed); 6934 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6935 "Num sequences restarted ", pdev->seq_restarted); 6936 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6937 "Num of MU sequences posted ", pdev->mu_seq_posted); 6938 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6939 "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush); 6940 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6941 "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter); 6942 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6943 "Num of MPDUS truncated ", pdev->mpdus_truncated); 6944 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6945 "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed); 6946 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6947 "Num of MPDUS expired ", pdev->mpdus_expired); 6948 *length = len; 6949 } 6950 6951 static void 6952 ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, 6953 char *buf, u32 *length) 6954 { 6955 u32 len = *length; 6956 u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; 6957 6958 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 6959 "ath11k PDEV RX stats"); 6960 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 6961 "===================="); 6962 6963 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6964 "Mid PPDU route change", 6965 pdev->mid_ppdu_route_change); 6966 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6967 "Tot. number of statuses", pdev->status_rcvd); 6968 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6969 "Extra frags on rings 0", pdev->r0_frags); 6970 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6971 "Extra frags on rings 1", pdev->r1_frags); 6972 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6973 "Extra frags on rings 2", pdev->r2_frags); 6974 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6975 "Extra frags on rings 3", pdev->r3_frags); 6976 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6977 "MSDUs delivered to HTT", pdev->htt_msdus); 6978 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6979 "MPDUs delivered to HTT", pdev->htt_mpdus); 6980 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6981 "MSDUs delivered to stack", pdev->loc_msdus); 6982 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6983 "MPDUs delivered to stack", pdev->loc_mpdus); 6984 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6985 "Oversized AMSUs", pdev->oversize_amsdu); 6986 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6987 "PHY errors", pdev->phy_errs); 6988 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6989 "PHY errors drops", pdev->phy_err_drop); 6990 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6991 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); 6992 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6993 "Overflow errors", pdev->rx_ovfl_errs); 6994 *length = len; 6995 } 6996 6997 static void 6998 ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar, 6999 const struct ath11k_fw_stats_vdev *vdev, 7000 char *buf, u32 *length) 7001 { 7002 u32 len = *length; 7003 u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; 7004 struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id); 7005 u8 *vif_macaddr; 7006 int i; 7007 7008 /* VDEV stats has all the active VDEVs of other PDEVs as well, 7009 * ignoring those not part of requested PDEV 7010 */ 7011 if (!arvif) 7012 return; 7013 7014 vif_macaddr = arvif->vif->addr; 7015 7016 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7017 "VDEV ID", vdev->vdev_id); 7018 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7019 "VDEV MAC address", vif_macaddr); 7020 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7021 "beacon snr", vdev->beacon_snr); 7022 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7023 "data snr", vdev->data_snr); 7024 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7025 "num rx frames", vdev->num_rx_frames); 7026 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7027 "num rts fail", vdev->num_rts_fail); 7028 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7029 "num rts success", vdev->num_rts_success); 7030 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7031 "num rx err", vdev->num_rx_err); 7032 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7033 "num rx discard", vdev->num_rx_discard); 7034 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7035 "num tx not acked", vdev->num_tx_not_acked); 7036 7037 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) 7038 len += scnprintf(buf + len, buf_len - len, 7039 "%25s [%02d] %u\n", 7040 "num tx frames", i, 7041 vdev->num_tx_frames[i]); 7042 7043 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) 7044 len += scnprintf(buf + len, buf_len - len, 7045 "%25s [%02d] %u\n", 7046 "num tx frames retries", i, 7047 vdev->num_tx_frames_retries[i]); 7048 7049 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) 7050 len += scnprintf(buf + len, buf_len - len, 7051 "%25s [%02d] %u\n", 7052 "num tx frames failures", i, 7053 vdev->num_tx_frames_failures[i]); 7054 7055 for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) 7056 len += scnprintf(buf + len, buf_len - len, 7057 "%25s [%02d] 0x%08x\n", 7058 "tx rate history", i, 7059 vdev->tx_rate_history[i]); 7060 7061 for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) 7062 len += scnprintf(buf + len, buf_len - len, 7063 "%25s [%02d] %u\n", 7064 "beacon rssi history", i, 7065 vdev->beacon_rssi_history[i]); 7066 7067 len += scnprintf(buf + len, buf_len - len, "\n"); 7068 *length = len; 7069 } 7070 7071 static void 7072 ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar, 7073 const struct ath11k_fw_stats_bcn *bcn, 7074 char *buf, u32 *length) 7075 { 7076 u32 len = *length; 7077 u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; 7078 struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id); 7079 u8 *vdev_macaddr; 7080 7081 if (!arvif) { 7082 ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats", 7083 bcn->vdev_id); 7084 return; 7085 } 7086 7087 vdev_macaddr = arvif->vif->addr; 7088 7089 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7090 "VDEV ID", bcn->vdev_id); 7091 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7092 "VDEV MAC address", vdev_macaddr); 7093 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7094 "================"); 7095 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7096 "Num of beacon tx success", bcn->tx_bcn_succ_cnt); 7097 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7098 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); 7099 7100 len += scnprintf(buf + len, buf_len - len, "\n"); 7101 *length = len; 7102 } 7103 7104 void ath11k_wmi_fw_stats_fill(struct ath11k *ar, 7105 struct ath11k_fw_stats *fw_stats, 7106 u32 stats_id, char *buf) 7107 { 7108 u32 len = 0; 7109 u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; 7110 const struct ath11k_fw_stats_pdev *pdev; 7111 const struct ath11k_fw_stats_vdev *vdev; 7112 const struct ath11k_fw_stats_bcn *bcn; 7113 size_t num_bcn; 7114 7115 spin_lock_bh(&ar->data_lock); 7116 7117 if (stats_id == WMI_REQUEST_PDEV_STAT) { 7118 pdev = list_first_entry_or_null(&fw_stats->pdevs, 7119 struct ath11k_fw_stats_pdev, list); 7120 if (!pdev) { 7121 ath11k_warn(ar->ab, "failed to get pdev stats\n"); 7122 goto unlock; 7123 } 7124 7125 ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); 7126 ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); 7127 ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); 7128 } 7129 7130 if (stats_id == WMI_REQUEST_VDEV_STAT) { 7131 len += scnprintf(buf + len, buf_len - len, "\n"); 7132 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7133 "ath11k VDEV stats"); 7134 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7135 "================="); 7136 7137 list_for_each_entry(vdev, &fw_stats->vdevs, list) 7138 ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len); 7139 } 7140 7141 if (stats_id == WMI_REQUEST_BCN_STAT) { 7142 num_bcn = list_count_nodes(&fw_stats->bcn); 7143 7144 len += scnprintf(buf + len, buf_len - len, "\n"); 7145 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 7146 "ath11k Beacon stats", num_bcn); 7147 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7148 "==================="); 7149 7150 list_for_each_entry(bcn, &fw_stats->bcn, list) 7151 ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len); 7152 } 7153 7154 unlock: 7155 spin_unlock_bh(&ar->data_lock); 7156 7157 if (len >= buf_len) 7158 buf[len - 1] = 0; 7159 else 7160 buf[len] = 0; 7161 } 7162 7163 static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) 7164 { 7165 /* try to send pending beacons first. they take priority */ 7166 wake_up(&ab->wmi_ab.tx_credits_wq); 7167 } 7168 7169 static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb) 7170 { 7171 const struct wmi_11d_new_cc_ev *ev; 7172 struct ath11k *ar; 7173 struct ath11k_pdev *pdev; 7174 const void **tb; 7175 int ret, i; 7176 7177 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 7178 if (IS_ERR(tb)) { 7179 ret = PTR_ERR(tb); 7180 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 7181 return ret; 7182 } 7183 7184 ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; 7185 if (!ev) { 7186 kfree(tb); 7187 ath11k_warn(ab, "failed to fetch 11d new cc ev"); 7188 return -EPROTO; 7189 } 7190 7191 spin_lock_bh(&ab->base_lock); 7192 memcpy(&ab->new_alpha2, &ev->new_alpha2, 2); 7193 spin_unlock_bh(&ab->base_lock); 7194 7195 ath11k_dbg(ab, ATH11K_DBG_WMI, "event 11d new cc %c%c\n", 7196 ab->new_alpha2[0], 7197 ab->new_alpha2[1]); 7198 7199 kfree(tb); 7200 7201 for (i = 0; i < ab->num_radios; i++) { 7202 pdev = &ab->pdevs[i]; 7203 ar = pdev->ar; 7204 ar->state_11d = ATH11K_11D_IDLE; 7205 complete(&ar->completed_11d_scan); 7206 } 7207 7208 queue_work(ab->workqueue, &ab->update_11d_work); 7209 7210 return 0; 7211 } 7212 7213 static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab, 7214 struct sk_buff *skb) 7215 { 7216 struct ath11k_pdev_wmi *wmi = NULL; 7217 u32 i; 7218 u8 wmi_ep_count; 7219 u8 eid; 7220 7221 eid = ATH11K_SKB_CB(skb)->eid; 7222 dev_kfree_skb(skb); 7223 7224 if (eid >= ATH11K_HTC_EP_COUNT) 7225 return; 7226 7227 wmi_ep_count = ab->htc.wmi_ep_count; 7228 if (wmi_ep_count > ab->hw_params.max_radios) 7229 return; 7230 7231 for (i = 0; i < ab->htc.wmi_ep_count; i++) { 7232 if (ab->wmi_ab.wmi[i].eid == eid) { 7233 wmi = &ab->wmi_ab.wmi[i]; 7234 break; 7235 } 7236 } 7237 7238 if (wmi) 7239 wake_up(&wmi->tx_ce_desc_wq); 7240 } 7241 7242 static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb, 7243 enum wmi_reg_chan_list_cmd_type id) 7244 { 7245 struct cur_regulatory_info *reg_info; 7246 int ret; 7247 7248 reg_info = kzalloc_obj(*reg_info, GFP_ATOMIC); 7249 if (!reg_info) 7250 return -ENOMEM; 7251 7252 if (id == WMI_REG_CHAN_LIST_CC_ID) 7253 ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info); 7254 else 7255 ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); 7256 7257 if (ret) { 7258 ath11k_warn(ab, "failed to extract regulatory info\n"); 7259 goto mem_free; 7260 } 7261 7262 ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP); 7263 if (ret) { 7264 ath11k_warn(ab, "failed to process regulatory info %d\n", ret); 7265 goto mem_free; 7266 } 7267 7268 kfree(reg_info); 7269 return 0; 7270 7271 mem_free: 7272 ath11k_reg_reset_info(reg_info); 7273 kfree(reg_info); 7274 return ret; 7275 } 7276 7277 static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, 7278 const void *ptr, void *data) 7279 { 7280 struct wmi_tlv_rdy_parse *rdy_parse = data; 7281 struct wmi_ready_event fixed_param; 7282 struct wmi_mac_addr *addr_list; 7283 struct ath11k_pdev *pdev; 7284 u32 num_mac_addr; 7285 int i; 7286 7287 switch (tag) { 7288 case WMI_TAG_READY_EVENT: 7289 memset(&fixed_param, 0, sizeof(fixed_param)); 7290 memcpy(&fixed_param, (struct wmi_ready_event *)ptr, 7291 min_t(u16, sizeof(fixed_param), len)); 7292 rdy_parse->num_extra_mac_addr = 7293 fixed_param.ready_event_min.num_extra_mac_addr; 7294 7295 ether_addr_copy(ab->mac_addr, 7296 fixed_param.ready_event_min.mac_addr.addr); 7297 ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum; 7298 break; 7299 case WMI_TAG_ARRAY_FIXED_STRUCT: 7300 addr_list = (struct wmi_mac_addr *)ptr; 7301 num_mac_addr = rdy_parse->num_extra_mac_addr; 7302 7303 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) 7304 break; 7305 7306 for (i = 0; i < ab->num_radios; i++) { 7307 pdev = &ab->pdevs[i]; 7308 ether_addr_copy(pdev->mac_addr, addr_list[i].addr); 7309 } 7310 ab->pdevs_macaddr_valid = true; 7311 break; 7312 default: 7313 break; 7314 } 7315 7316 return 0; 7317 } 7318 7319 static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb) 7320 { 7321 struct wmi_tlv_rdy_parse rdy_parse = { }; 7322 int ret; 7323 7324 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 7325 ath11k_wmi_tlv_rdy_parse, &rdy_parse); 7326 if (ret) { 7327 ath11k_warn(ab, "failed to parse tlv %d\n", ret); 7328 return ret; 7329 } 7330 7331 ath11k_dbg(ab, ATH11K_DBG_WMI, "event ready"); 7332 7333 complete(&ab->wmi_ab.unified_ready); 7334 return 0; 7335 } 7336 7337 static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb) 7338 { 7339 struct wmi_peer_delete_resp_event peer_del_resp; 7340 struct ath11k *ar; 7341 7342 if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { 7343 ath11k_warn(ab, "failed to extract peer delete resp"); 7344 return; 7345 } 7346 7347 ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer delete resp"); 7348 7349 rcu_read_lock(); 7350 ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id); 7351 if (!ar) { 7352 ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d", 7353 peer_del_resp.vdev_id); 7354 rcu_read_unlock(); 7355 return; 7356 } 7357 7358 complete(&ar->peer_delete_done); 7359 rcu_read_unlock(); 7360 ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", 7361 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); 7362 } 7363 7364 static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab, 7365 struct sk_buff *skb) 7366 { 7367 struct ath11k *ar; 7368 u32 vdev_id = 0; 7369 7370 if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { 7371 ath11k_warn(ab, "failed to extract vdev delete resp"); 7372 return; 7373 } 7374 7375 rcu_read_lock(); 7376 ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); 7377 if (!ar) { 7378 ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d", 7379 vdev_id); 7380 rcu_read_unlock(); 7381 return; 7382 } 7383 7384 complete(&ar->vdev_delete_done); 7385 7386 rcu_read_unlock(); 7387 7388 ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev delete resp for vdev id %d\n", 7389 vdev_id); 7390 } 7391 7392 static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status) 7393 { 7394 switch (vdev_resp_status) { 7395 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: 7396 return "invalid vdev id"; 7397 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: 7398 return "not supported"; 7399 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: 7400 return "dfs violation"; 7401 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: 7402 return "invalid regdomain"; 7403 default: 7404 return "unknown"; 7405 } 7406 } 7407 7408 static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb) 7409 { 7410 struct wmi_vdev_start_resp_event vdev_start_resp; 7411 struct ath11k *ar; 7412 u32 status; 7413 7414 if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { 7415 ath11k_warn(ab, "failed to extract vdev start resp"); 7416 return; 7417 } 7418 7419 ath11k_dbg(ab, ATH11K_DBG_WMI, "event start resp event"); 7420 7421 rcu_read_lock(); 7422 ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id); 7423 if (!ar) { 7424 ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d", 7425 vdev_start_resp.vdev_id); 7426 rcu_read_unlock(); 7427 return; 7428 } 7429 7430 ar->last_wmi_vdev_start_status = 0; 7431 ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power; 7432 status = vdev_start_resp.status; 7433 7434 if (WARN_ON_ONCE(status)) { 7435 ath11k_warn(ab, "vdev start resp error status %d (%s)\n", 7436 status, ath11k_wmi_vdev_resp_print(status)); 7437 ar->last_wmi_vdev_start_status = status; 7438 } 7439 7440 complete(&ar->vdev_setup_done); 7441 7442 rcu_read_unlock(); 7443 7444 ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d", 7445 vdev_start_resp.vdev_id); 7446 } 7447 7448 static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) 7449 { 7450 struct ath11k_vif *arvif; 7451 u32 vdev_id, tx_status; 7452 7453 if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { 7454 ath11k_warn(ab, "failed to extract bcn tx status"); 7455 return; 7456 } 7457 7458 ath11k_dbg(ab, ATH11K_DBG_WMI, "event offload bcn tx status"); 7459 7460 rcu_read_lock(); 7461 arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7462 if (!arvif) { 7463 ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status", 7464 vdev_id); 7465 rcu_read_unlock(); 7466 return; 7467 } 7468 7469 queue_work(ab->workqueue, &arvif->bcn_tx_work); 7470 7471 rcu_read_unlock(); 7472 } 7473 7474 static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab, 7475 struct sk_buff *skb) 7476 { 7477 const struct wmi_peer_sta_ps_state_chg_event *ev; 7478 struct ieee80211_sta *sta; 7479 struct ath11k_peer *peer; 7480 struct ath11k *ar; 7481 struct ath11k_sta *arsta; 7482 const void **tb; 7483 enum ath11k_wmi_peer_ps_state peer_previous_ps_state; 7484 int ret; 7485 7486 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 7487 if (IS_ERR(tb)) { 7488 ret = PTR_ERR(tb); 7489 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 7490 return; 7491 } 7492 7493 ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT]; 7494 if (!ev) { 7495 ath11k_warn(ab, "failed to fetch sta ps change ev"); 7496 kfree(tb); 7497 return; 7498 } 7499 7500 ath11k_dbg(ab, ATH11K_DBG_WMI, 7501 "event peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n", 7502 ev->peer_macaddr.addr, ev->peer_ps_state, 7503 ev->ps_supported_bitmap, ev->peer_ps_valid, 7504 ev->peer_ps_timestamp); 7505 7506 rcu_read_lock(); 7507 7508 spin_lock_bh(&ab->base_lock); 7509 7510 peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr); 7511 7512 if (!peer) { 7513 spin_unlock_bh(&ab->base_lock); 7514 ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr); 7515 goto exit; 7516 } 7517 7518 ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); 7519 7520 if (!ar) { 7521 spin_unlock_bh(&ab->base_lock); 7522 ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d", 7523 peer->vdev_id); 7524 7525 goto exit; 7526 } 7527 7528 sta = peer->sta; 7529 7530 spin_unlock_bh(&ab->base_lock); 7531 7532 if (!sta) { 7533 ath11k_warn(ab, "failed to find station entry %pM\n", 7534 ev->peer_macaddr.addr); 7535 goto exit; 7536 } 7537 7538 arsta = ath11k_sta_to_arsta(sta); 7539 7540 spin_lock_bh(&ar->data_lock); 7541 7542 peer_previous_ps_state = arsta->peer_ps_state; 7543 arsta->peer_ps_state = ev->peer_ps_state; 7544 arsta->peer_current_ps_valid = !!ev->peer_ps_valid; 7545 7546 if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT, 7547 ar->ab->wmi_ab.svc_map)) { 7548 if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) || 7549 !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) || 7550 !ev->peer_ps_valid) 7551 goto out; 7552 7553 if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) { 7554 arsta->ps_start_time = ev->peer_ps_timestamp; 7555 arsta->ps_start_jiffies = jiffies; 7556 } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF && 7557 peer_previous_ps_state == WMI_PEER_PS_STATE_ON) { 7558 arsta->ps_total_duration = arsta->ps_total_duration + 7559 (ev->peer_ps_timestamp - arsta->ps_start_time); 7560 } 7561 7562 if (ar->ps_timekeeper_enable) 7563 trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr, 7564 ev->peer_ps_timestamp, 7565 arsta->peer_ps_state); 7566 } 7567 7568 out: 7569 spin_unlock_bh(&ar->data_lock); 7570 exit: 7571 rcu_read_unlock(); 7572 kfree(tb); 7573 } 7574 7575 static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb) 7576 { 7577 struct ath11k *ar; 7578 u32 vdev_id = 0; 7579 7580 if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { 7581 ath11k_warn(ab, "failed to extract vdev stopped event"); 7582 return; 7583 } 7584 7585 ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev stopped"); 7586 7587 rcu_read_lock(); 7588 ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); 7589 if (!ar) { 7590 ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d", 7591 vdev_id); 7592 rcu_read_unlock(); 7593 return; 7594 } 7595 7596 complete(&ar->vdev_setup_done); 7597 7598 rcu_read_unlock(); 7599 7600 ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); 7601 } 7602 7603 static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) 7604 { 7605 struct mgmt_rx_event_params rx_ev = {}; 7606 struct ath11k *ar; 7607 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 7608 struct ieee80211_hdr *hdr; 7609 u16 fc; 7610 struct ieee80211_supported_band *sband; 7611 7612 if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { 7613 ath11k_warn(ab, "failed to extract mgmt rx event"); 7614 dev_kfree_skb(skb); 7615 return; 7616 } 7617 7618 memset(status, 0, sizeof(*status)); 7619 7620 ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx status %08x\n", 7621 rx_ev.status); 7622 7623 rcu_read_lock(); 7624 ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); 7625 7626 if (!ar) { 7627 ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", 7628 rx_ev.pdev_id); 7629 dev_kfree_skb(skb); 7630 goto exit; 7631 } 7632 7633 if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) || 7634 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | 7635 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { 7636 dev_kfree_skb(skb); 7637 goto exit; 7638 } 7639 7640 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) 7641 status->flag |= RX_FLAG_MMIC_ERROR; 7642 7643 if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ && 7644 rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) { 7645 status->band = NL80211_BAND_6GHZ; 7646 status->freq = rx_ev.chan_freq; 7647 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { 7648 status->band = NL80211_BAND_2GHZ; 7649 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) { 7650 status->band = NL80211_BAND_5GHZ; 7651 } else { 7652 /* Shouldn't happen unless list of advertised channels to 7653 * mac80211 has been changed. 7654 */ 7655 WARN_ON_ONCE(1); 7656 dev_kfree_skb(skb); 7657 goto exit; 7658 } 7659 7660 if (rx_ev.phy_mode == MODE_11B && 7661 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) 7662 ath11k_dbg(ab, ATH11K_DBG_WMI, 7663 "mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); 7664 7665 sband = &ar->mac.sbands[status->band]; 7666 7667 if (status->band != NL80211_BAND_6GHZ) 7668 status->freq = ieee80211_channel_to_frequency(rx_ev.channel, 7669 status->band); 7670 7671 status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR; 7672 status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); 7673 7674 hdr = (struct ieee80211_hdr *)skb->data; 7675 fc = le16_to_cpu(hdr->frame_control); 7676 7677 /* Firmware is guaranteed to report all essential management frames via 7678 * WMI while it can deliver some extra via HTT. Since there can be 7679 * duplicates split the reporting wrt monitor/sniffing. 7680 */ 7681 status->flag |= RX_FLAG_SKIP_MONITOR; 7682 7683 /* In case of PMF, FW delivers decrypted frames with Protected Bit set. 7684 * Don't clear that. Also, FW delivers broadcast management frames 7685 * (ex: group privacy action frames in mesh) as encrypted payload. 7686 */ 7687 if (ieee80211_has_protected(hdr->frame_control) && 7688 !is_multicast_ether_addr(ieee80211_get_DA(hdr))) { 7689 status->flag |= RX_FLAG_DECRYPTED; 7690 7691 if (!ieee80211_is_robust_mgmt_frame(skb)) { 7692 status->flag |= RX_FLAG_IV_STRIPPED | 7693 RX_FLAG_MMIC_STRIPPED; 7694 hdr->frame_control = __cpu_to_le16(fc & 7695 ~IEEE80211_FCTL_PROTECTED); 7696 } 7697 } 7698 7699 if (ieee80211_is_beacon(hdr->frame_control)) 7700 ath11k_mac_handle_beacon(ar, skb); 7701 7702 ath11k_dbg(ab, ATH11K_DBG_MGMT, 7703 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 7704 skb, skb->len, 7705 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 7706 7707 ath11k_dbg(ab, ATH11K_DBG_MGMT, 7708 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 7709 status->freq, status->band, status->signal, 7710 status->rate_idx); 7711 7712 ieee80211_rx_ni(ar->hw, skb); 7713 7714 exit: 7715 rcu_read_unlock(); 7716 } 7717 7718 static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb) 7719 { 7720 struct wmi_mgmt_tx_compl_event tx_compl_param = {}; 7721 struct ath11k *ar; 7722 7723 if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { 7724 ath11k_warn(ab, "failed to extract mgmt tx compl event"); 7725 return; 7726 } 7727 7728 rcu_read_lock(); 7729 ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id); 7730 if (!ar) { 7731 ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", 7732 tx_compl_param.pdev_id); 7733 goto exit; 7734 } 7735 7736 wmi_process_mgmt_tx_comp(ar, &tx_compl_param); 7737 7738 ath11k_dbg(ab, ATH11K_DBG_MGMT, 7739 "event mgmt tx compl ev pdev_id %d, desc_id %d, status %d ack_rssi %d", 7740 tx_compl_param.pdev_id, tx_compl_param.desc_id, 7741 tx_compl_param.status, tx_compl_param.ack_rssi); 7742 7743 exit: 7744 rcu_read_unlock(); 7745 } 7746 7747 static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab, 7748 u32 vdev_id, 7749 enum ath11k_scan_state state) 7750 { 7751 int i; 7752 struct ath11k_pdev *pdev; 7753 struct ath11k *ar; 7754 7755 for (i = 0; i < ab->num_radios; i++) { 7756 pdev = rcu_dereference(ab->pdevs_active[i]); 7757 if (pdev && pdev->ar) { 7758 ar = pdev->ar; 7759 7760 spin_lock_bh(&ar->data_lock); 7761 if (ar->scan.state == state && 7762 ar->scan.vdev_id == vdev_id) { 7763 spin_unlock_bh(&ar->data_lock); 7764 return ar; 7765 } 7766 spin_unlock_bh(&ar->data_lock); 7767 } 7768 } 7769 return NULL; 7770 } 7771 7772 static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) 7773 { 7774 struct ath11k *ar; 7775 struct wmi_scan_event scan_ev = {}; 7776 7777 if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) { 7778 ath11k_warn(ab, "failed to extract scan event"); 7779 return; 7780 } 7781 7782 rcu_read_lock(); 7783 7784 /* In case the scan was cancelled, ex. during interface teardown, 7785 * the interface will not be found in active interfaces. 7786 * Rather, in such scenarios, iterate over the active pdev's to 7787 * search 'ar' if the corresponding 'ar' scan is ABORTING and the 7788 * aborting scan's vdev id matches this event info. 7789 */ 7790 if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED && 7791 scan_ev.reason == WMI_SCAN_REASON_CANCELLED) { 7792 ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, 7793 ATH11K_SCAN_ABORTING); 7794 if (!ar) 7795 ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, 7796 ATH11K_SCAN_RUNNING); 7797 } else { 7798 ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id); 7799 } 7800 7801 if (!ar) { 7802 ath11k_warn(ab, "Received scan event for unknown vdev"); 7803 rcu_read_unlock(); 7804 return; 7805 } 7806 7807 spin_lock_bh(&ar->data_lock); 7808 7809 ath11k_dbg(ab, ATH11K_DBG_WMI, 7810 "event scan %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 7811 ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason), 7812 scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq, 7813 scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id, 7814 ath11k_scan_state_str(ar->scan.state), ar->scan.state); 7815 7816 switch (scan_ev.event_type) { 7817 case WMI_SCAN_EVENT_STARTED: 7818 ath11k_wmi_event_scan_started(ar); 7819 break; 7820 case WMI_SCAN_EVENT_COMPLETED: 7821 ath11k_wmi_event_scan_completed(ar); 7822 break; 7823 case WMI_SCAN_EVENT_BSS_CHANNEL: 7824 ath11k_wmi_event_scan_bss_chan(ar); 7825 break; 7826 case WMI_SCAN_EVENT_FOREIGN_CHAN: 7827 ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq); 7828 break; 7829 case WMI_SCAN_EVENT_START_FAILED: 7830 ath11k_warn(ab, "received scan start failure event\n"); 7831 ath11k_wmi_event_scan_start_failed(ar); 7832 break; 7833 case WMI_SCAN_EVENT_DEQUEUED: 7834 __ath11k_mac_scan_finish(ar); 7835 break; 7836 case WMI_SCAN_EVENT_PREEMPTED: 7837 case WMI_SCAN_EVENT_RESTARTED: 7838 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 7839 default: 7840 break; 7841 } 7842 7843 spin_unlock_bh(&ar->data_lock); 7844 7845 rcu_read_unlock(); 7846 } 7847 7848 static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb) 7849 { 7850 struct wmi_peer_sta_kickout_arg arg = {}; 7851 struct ieee80211_sta *sta; 7852 struct ath11k_peer *peer; 7853 struct ath11k *ar; 7854 u32 vdev_id; 7855 7856 if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { 7857 ath11k_warn(ab, "failed to extract peer sta kickout event"); 7858 return; 7859 } 7860 7861 rcu_read_lock(); 7862 7863 spin_lock_bh(&ab->base_lock); 7864 7865 peer = ath11k_peer_find_by_addr(ab, arg.mac_addr); 7866 7867 if (!peer) { 7868 ath11k_warn(ab, "peer not found %pM\n", 7869 arg.mac_addr); 7870 spin_unlock_bh(&ab->base_lock); 7871 goto exit; 7872 } 7873 7874 vdev_id = peer->vdev_id; 7875 7876 spin_unlock_bh(&ab->base_lock); 7877 7878 ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); 7879 if (!ar) { 7880 ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d", 7881 peer->vdev_id); 7882 goto exit; 7883 } 7884 7885 sta = ieee80211_find_sta_by_ifaddr(ar->hw, 7886 arg.mac_addr, NULL); 7887 if (!sta) { 7888 ath11k_warn(ab, "Spurious quick kickout for STA %pM\n", 7889 arg.mac_addr); 7890 goto exit; 7891 } 7892 7893 ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta kickout %pM", 7894 arg.mac_addr); 7895 7896 ieee80211_report_low_ack(sta, 10); 7897 7898 exit: 7899 rcu_read_unlock(); 7900 } 7901 7902 static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) 7903 { 7904 struct wmi_roam_event roam_ev = {}; 7905 struct ath11k *ar; 7906 7907 if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) { 7908 ath11k_warn(ab, "failed to extract roam event"); 7909 return; 7910 } 7911 7912 ath11k_dbg(ab, ATH11K_DBG_WMI, 7913 "event roam vdev %u reason 0x%08x rssi %d\n", 7914 roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); 7915 7916 rcu_read_lock(); 7917 ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id); 7918 if (!ar) { 7919 ath11k_warn(ab, "invalid vdev id in roam ev %d", 7920 roam_ev.vdev_id); 7921 rcu_read_unlock(); 7922 return; 7923 } 7924 7925 if (roam_ev.reason >= WMI_ROAM_REASON_MAX) 7926 ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", 7927 roam_ev.reason, roam_ev.vdev_id); 7928 7929 switch (roam_ev.reason) { 7930 case WMI_ROAM_REASON_BEACON_MISS: 7931 ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id); 7932 break; 7933 case WMI_ROAM_REASON_BETTER_AP: 7934 case WMI_ROAM_REASON_LOW_RSSI: 7935 case WMI_ROAM_REASON_SUITABLE_AP_FOUND: 7936 case WMI_ROAM_REASON_HO_FAILED: 7937 ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", 7938 roam_ev.reason, roam_ev.vdev_id); 7939 break; 7940 } 7941 7942 rcu_read_unlock(); 7943 } 7944 7945 static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) 7946 { 7947 struct wmi_chan_info_event ch_info_ev = {}; 7948 struct ath11k *ar; 7949 struct survey_info *survey; 7950 int idx; 7951 /* HW channel counters frequency value in hertz */ 7952 u32 cc_freq_hz = ab->cc_freq_hz; 7953 7954 if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { 7955 ath11k_warn(ab, "failed to extract chan info event"); 7956 return; 7957 } 7958 7959 ath11k_dbg(ab, ATH11K_DBG_WMI, 7960 "event chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", 7961 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, 7962 ch_info_ev.cmd_flags, ch_info_ev.noise_floor, 7963 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, 7964 ch_info_ev.mac_clk_mhz); 7965 7966 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) { 7967 ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n"); 7968 return; 7969 } 7970 7971 rcu_read_lock(); 7972 ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id); 7973 if (!ar) { 7974 ath11k_warn(ab, "invalid vdev id in chan info ev %d", 7975 ch_info_ev.vdev_id); 7976 rcu_read_unlock(); 7977 return; 7978 } 7979 spin_lock_bh(&ar->data_lock); 7980 7981 switch (ar->scan.state) { 7982 case ATH11K_SCAN_IDLE: 7983 case ATH11K_SCAN_STARTING: 7984 ath11k_warn(ab, "received chan info event without a scan request, ignoring\n"); 7985 goto exit; 7986 case ATH11K_SCAN_RUNNING: 7987 case ATH11K_SCAN_ABORTING: 7988 break; 7989 } 7990 7991 idx = freq_to_idx(ar, ch_info_ev.freq); 7992 if (idx >= ARRAY_SIZE(ar->survey)) { 7993 ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", 7994 ch_info_ev.freq, idx); 7995 goto exit; 7996 } 7997 7998 /* If FW provides MAC clock frequency in Mhz, overriding the initialized 7999 * HW channel counters frequency value 8000 */ 8001 if (ch_info_ev.mac_clk_mhz) 8002 cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000); 8003 8004 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { 8005 survey = &ar->survey[idx]; 8006 memset(survey, 0, sizeof(*survey)); 8007 survey->noise = ch_info_ev.noise_floor; 8008 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | 8009 SURVEY_INFO_TIME_BUSY; 8010 survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz); 8011 survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz); 8012 } 8013 exit: 8014 spin_unlock_bh(&ar->data_lock); 8015 rcu_read_unlock(); 8016 } 8017 8018 static void 8019 ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) 8020 { 8021 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; 8022 struct survey_info *survey; 8023 struct ath11k *ar; 8024 u32 cc_freq_hz = ab->cc_freq_hz; 8025 u64 busy, total, tx, rx, rx_bss; 8026 int idx; 8027 8028 if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { 8029 ath11k_warn(ab, "failed to extract pdev bss chan info event"); 8030 return; 8031 } 8032 8033 busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 | 8034 bss_ch_info_ev.rx_clear_count_low; 8035 8036 total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 | 8037 bss_ch_info_ev.cycle_count_low; 8038 8039 tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 | 8040 bss_ch_info_ev.tx_cycle_count_low; 8041 8042 rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 | 8043 bss_ch_info_ev.rx_cycle_count_low; 8044 8045 rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 | 8046 bss_ch_info_ev.rx_bss_cycle_count_low; 8047 8048 ath11k_dbg(ab, ATH11K_DBG_WMI, 8049 "event pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", 8050 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, 8051 bss_ch_info_ev.noise_floor, busy, total, 8052 tx, rx, rx_bss); 8053 8054 rcu_read_lock(); 8055 ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id); 8056 8057 if (!ar) { 8058 ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", 8059 bss_ch_info_ev.pdev_id); 8060 rcu_read_unlock(); 8061 return; 8062 } 8063 8064 spin_lock_bh(&ar->data_lock); 8065 idx = freq_to_idx(ar, bss_ch_info_ev.freq); 8066 if (idx >= ARRAY_SIZE(ar->survey)) { 8067 ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", 8068 bss_ch_info_ev.freq, idx); 8069 goto exit; 8070 } 8071 8072 survey = &ar->survey[idx]; 8073 8074 survey->noise = bss_ch_info_ev.noise_floor; 8075 survey->time = div_u64(total, cc_freq_hz); 8076 survey->time_busy = div_u64(busy, cc_freq_hz); 8077 survey->time_rx = div_u64(rx_bss, cc_freq_hz); 8078 survey->time_tx = div_u64(tx, cc_freq_hz); 8079 survey->filled |= (SURVEY_INFO_NOISE_DBM | 8080 SURVEY_INFO_TIME | 8081 SURVEY_INFO_TIME_BUSY | 8082 SURVEY_INFO_TIME_RX | 8083 SURVEY_INFO_TIME_TX); 8084 exit: 8085 spin_unlock_bh(&ar->data_lock); 8086 complete(&ar->bss_survey_done); 8087 8088 rcu_read_unlock(); 8089 } 8090 8091 static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, 8092 struct sk_buff *skb) 8093 { 8094 struct wmi_vdev_install_key_complete_arg install_key_compl = {}; 8095 struct ath11k *ar; 8096 8097 if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { 8098 ath11k_warn(ab, "failed to extract install key compl event"); 8099 return; 8100 } 8101 8102 ath11k_dbg(ab, ATH11K_DBG_WMI, 8103 "event vdev install key ev idx %d flags %08x macaddr %pM status %d\n", 8104 install_key_compl.key_idx, install_key_compl.key_flags, 8105 install_key_compl.macaddr, install_key_compl.status); 8106 8107 rcu_read_lock(); 8108 ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); 8109 if (!ar) { 8110 ath11k_warn(ab, "invalid vdev id in install key compl ev %d", 8111 install_key_compl.vdev_id); 8112 rcu_read_unlock(); 8113 return; 8114 } 8115 8116 ar->install_key_status = 0; 8117 8118 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { 8119 ath11k_warn(ab, "install key failed for %pM status %d\n", 8120 install_key_compl.macaddr, install_key_compl.status); 8121 ar->install_key_status = install_key_compl.status; 8122 } 8123 8124 complete(&ar->install_key_done); 8125 rcu_read_unlock(); 8126 } 8127 8128 static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab, 8129 u16 tag, u16 len, 8130 const void *ptr, void *data) 8131 { 8132 const struct wmi_service_available_event *ev; 8133 u32 *wmi_ext2_service_bitmap; 8134 int i, j; 8135 8136 switch (tag) { 8137 case WMI_TAG_SERVICE_AVAILABLE_EVENT: 8138 ev = (struct wmi_service_available_event *)ptr; 8139 for (i = 0, j = WMI_MAX_SERVICE; 8140 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; 8141 i++) { 8142 do { 8143 if (ev->wmi_service_segment_bitmap[i] & 8144 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 8145 set_bit(j, ab->wmi_ab.svc_map); 8146 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 8147 } 8148 8149 ath11k_dbg(ab, ATH11K_DBG_WMI, 8150 "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", 8151 ev->wmi_service_segment_bitmap[0], 8152 ev->wmi_service_segment_bitmap[1], 8153 ev->wmi_service_segment_bitmap[2], 8154 ev->wmi_service_segment_bitmap[3]); 8155 break; 8156 case WMI_TAG_ARRAY_UINT32: 8157 wmi_ext2_service_bitmap = (u32 *)ptr; 8158 for (i = 0, j = WMI_MAX_EXT_SERVICE; 8159 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE; 8160 i++) { 8161 do { 8162 if (wmi_ext2_service_bitmap[i] & 8163 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 8164 set_bit(j, ab->wmi_ab.svc_map); 8165 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 8166 } 8167 8168 ath11k_dbg(ab, ATH11K_DBG_WMI, 8169 "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", 8170 wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1], 8171 wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]); 8172 break; 8173 } 8174 return 0; 8175 } 8176 8177 static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) 8178 { 8179 int ret; 8180 8181 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 8182 ath11k_wmi_tlv_services_parser, 8183 NULL); 8184 if (ret) 8185 ath11k_warn(ab, "failed to parse services available tlv %d\n", ret); 8186 8187 ath11k_dbg(ab, ATH11K_DBG_WMI, "event service available"); 8188 } 8189 8190 static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb) 8191 { 8192 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; 8193 struct ath11k *ar; 8194 8195 if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { 8196 ath11k_warn(ab, "failed to extract peer assoc conf event"); 8197 return; 8198 } 8199 8200 ath11k_dbg(ab, ATH11K_DBG_WMI, 8201 "event peer assoc conf ev vdev id %d macaddr %pM\n", 8202 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); 8203 8204 rcu_read_lock(); 8205 ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); 8206 8207 if (!ar) { 8208 ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d", 8209 peer_assoc_conf.vdev_id); 8210 rcu_read_unlock(); 8211 return; 8212 } 8213 8214 complete(&ar->peer_assoc_done); 8215 rcu_read_unlock(); 8216 } 8217 8218 static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb) 8219 { 8220 struct ath11k_fw_stats stats = {}; 8221 size_t total_vdevs_started = 0; 8222 struct ath11k_pdev *pdev; 8223 bool is_end = true; 8224 int i; 8225 8226 struct ath11k *ar; 8227 int ret; 8228 8229 INIT_LIST_HEAD(&stats.pdevs); 8230 INIT_LIST_HEAD(&stats.vdevs); 8231 INIT_LIST_HEAD(&stats.bcn); 8232 8233 ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats); 8234 if (ret) { 8235 ath11k_warn(ab, "failed to pull fw stats: %d\n", ret); 8236 goto free; 8237 } 8238 8239 ath11k_dbg(ab, ATH11K_DBG_WMI, "event update stats"); 8240 8241 rcu_read_lock(); 8242 ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); 8243 if (!ar) { 8244 rcu_read_unlock(); 8245 ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n", 8246 stats.pdev_id, ret); 8247 goto free; 8248 } 8249 8250 spin_lock_bh(&ar->data_lock); 8251 8252 /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and 8253 * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via 8254 * debugfs fw stats. Therefore, processing it separately. 8255 */ 8256 if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { 8257 list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); 8258 complete(&ar->fw_stats_done); 8259 goto complete; 8260 } 8261 8262 if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { 8263 complete(&ar->fw_stats_done); 8264 goto complete; 8265 } 8266 8267 if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { 8268 if (list_empty(&stats.vdevs)) { 8269 ath11k_warn(ab, "empty vdev stats"); 8270 goto complete; 8271 } 8272 /* FW sends all the active VDEV stats irrespective of PDEV, 8273 * hence limit until the count of all VDEVs started 8274 */ 8275 for (i = 0; i < ab->num_radios; i++) { 8276 pdev = rcu_dereference(ab->pdevs_active[i]); 8277 if (pdev && pdev->ar) 8278 total_vdevs_started += ar->num_started_vdevs; 8279 } 8280 8281 if (total_vdevs_started) 8282 is_end = ((++ar->fw_stats.num_vdev_recvd) == 8283 total_vdevs_started); 8284 8285 list_splice_tail_init(&stats.vdevs, 8286 &ar->fw_stats.vdevs); 8287 8288 if (is_end) 8289 complete(&ar->fw_stats_done); 8290 8291 goto complete; 8292 } 8293 8294 /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats. 8295 * Hence, processing it in debugfs context 8296 */ 8297 ath11k_debugfs_fw_stats_process(ar, &stats); 8298 8299 complete: 8300 complete(&ar->fw_stats_complete); 8301 spin_unlock_bh(&ar->data_lock); 8302 rcu_read_unlock(); 8303 8304 /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised 8305 * at this point, no need to free the individual list. 8306 */ 8307 return; 8308 8309 free: 8310 ath11k_fw_stats_free(&stats); 8311 } 8312 8313 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned 8314 * is not part of BDF CTL(Conformance test limits) table entries. 8315 */ 8316 static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab, 8317 struct sk_buff *skb) 8318 { 8319 const void **tb; 8320 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 8321 int ret; 8322 8323 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8324 if (IS_ERR(tb)) { 8325 ret = PTR_ERR(tb); 8326 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 8327 return; 8328 } 8329 8330 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; 8331 if (!ev) { 8332 ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); 8333 kfree(tb); 8334 return; 8335 } 8336 8337 ath11k_dbg(ab, ATH11K_DBG_WMI, 8338 "event pdev ctl failsafe check status %d\n", 8339 ev->ctl_failsafe_status); 8340 8341 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power 8342 * to 10 dBm else the CTL power entry in the BDF would be picked up. 8343 */ 8344 if (ev->ctl_failsafe_status != 0) 8345 ath11k_warn(ab, "pdev ctl failsafe failure status %d", 8346 ev->ctl_failsafe_status); 8347 8348 kfree(tb); 8349 } 8350 8351 static void 8352 ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab, 8353 const struct wmi_pdev_csa_switch_ev *ev, 8354 const u32 *vdev_ids) 8355 { 8356 int i; 8357 struct ath11k_vif *arvif; 8358 8359 /* Finish CSA once the switch count becomes NULL */ 8360 if (ev->current_switch_count) 8361 return; 8362 8363 rcu_read_lock(); 8364 for (i = 0; i < ev->num_vdevs; i++) { 8365 arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); 8366 8367 if (!arvif) { 8368 ath11k_warn(ab, "Recvd csa status for unknown vdev %d", 8369 vdev_ids[i]); 8370 continue; 8371 } 8372 8373 if (arvif->is_up && arvif->vif->bss_conf.csa_active) 8374 ieee80211_csa_finish(arvif->vif, 0); 8375 } 8376 rcu_read_unlock(); 8377 } 8378 8379 static void 8380 ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab, 8381 struct sk_buff *skb) 8382 { 8383 const void **tb; 8384 const struct wmi_pdev_csa_switch_ev *ev; 8385 const u32 *vdev_ids; 8386 int ret; 8387 8388 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8389 if (IS_ERR(tb)) { 8390 ret = PTR_ERR(tb); 8391 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 8392 return; 8393 } 8394 8395 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; 8396 vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; 8397 8398 if (!ev || !vdev_ids) { 8399 ath11k_warn(ab, "failed to fetch pdev csa switch count ev"); 8400 kfree(tb); 8401 return; 8402 } 8403 8404 ath11k_dbg(ab, ATH11K_DBG_WMI, 8405 "event pdev csa switch count %d for pdev %d, num_vdevs %d", 8406 ev->current_switch_count, ev->pdev_id, 8407 ev->num_vdevs); 8408 8409 ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); 8410 8411 kfree(tb); 8412 } 8413 8414 static void 8415 ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb) 8416 { 8417 const void **tb; 8418 const struct wmi_pdev_radar_ev *ev; 8419 struct ath11k *ar; 8420 int ret; 8421 8422 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8423 if (IS_ERR(tb)) { 8424 ret = PTR_ERR(tb); 8425 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 8426 return; 8427 } 8428 8429 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; 8430 8431 if (!ev) { 8432 ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev"); 8433 kfree(tb); 8434 return; 8435 } 8436 8437 ath11k_dbg(ab, ATH11K_DBG_WMI, 8438 "event pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", 8439 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, 8440 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, 8441 ev->freq_offset, ev->sidx); 8442 8443 rcu_read_lock(); 8444 8445 ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); 8446 8447 if (!ar) { 8448 ath11k_warn(ab, "radar detected in invalid pdev %d\n", 8449 ev->pdev_id); 8450 goto exit; 8451 } 8452 8453 ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n", 8454 ev->pdev_id); 8455 8456 if (ar->dfs_block_radar_events) 8457 ath11k_info(ab, "DFS Radar detected, but ignored as requested\n"); 8458 else 8459 ieee80211_radar_detected(ar->hw, NULL); 8460 8461 exit: 8462 rcu_read_unlock(); 8463 8464 kfree(tb); 8465 } 8466 8467 static void 8468 ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, 8469 struct sk_buff *skb) 8470 { 8471 struct ath11k *ar; 8472 const void **tb; 8473 const struct wmi_pdev_temperature_event *ev; 8474 int ret; 8475 8476 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8477 if (IS_ERR(tb)) { 8478 ret = PTR_ERR(tb); 8479 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 8480 return; 8481 } 8482 8483 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; 8484 if (!ev) { 8485 ath11k_warn(ab, "failed to fetch pdev temp ev"); 8486 kfree(tb); 8487 return; 8488 } 8489 8490 ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n", 8491 ev->temp, ev->pdev_id); 8492 8493 rcu_read_lock(); 8494 8495 ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); 8496 if (!ar) { 8497 ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id); 8498 goto exit; 8499 } 8500 8501 ath11k_thermal_event_temperature(ar, ev->temp); 8502 8503 exit: 8504 rcu_read_unlock(); 8505 8506 kfree(tb); 8507 } 8508 8509 static void ath11k_fils_discovery_event(struct ath11k_base *ab, 8510 struct sk_buff *skb) 8511 { 8512 const void **tb; 8513 const struct wmi_fils_discovery_event *ev; 8514 int ret; 8515 8516 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8517 if (IS_ERR(tb)) { 8518 ret = PTR_ERR(tb); 8519 ath11k_warn(ab, 8520 "failed to parse FILS discovery event tlv %d\n", 8521 ret); 8522 return; 8523 } 8524 8525 ath11k_dbg(ab, ATH11K_DBG_WMI, "event fils discovery"); 8526 8527 ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; 8528 if (!ev) { 8529 ath11k_warn(ab, "failed to fetch FILS discovery event\n"); 8530 kfree(tb); 8531 return; 8532 } 8533 8534 ath11k_warn(ab, 8535 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", 8536 ev->vdev_id, ev->fils_tt, ev->tbtt); 8537 8538 kfree(tb); 8539 } 8540 8541 static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab, 8542 struct sk_buff *skb) 8543 { 8544 const void **tb; 8545 const struct wmi_probe_resp_tx_status_event *ev; 8546 int ret; 8547 8548 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8549 if (IS_ERR(tb)) { 8550 ret = PTR_ERR(tb); 8551 ath11k_warn(ab, 8552 "failed to parse probe response transmission status event tlv: %d\n", 8553 ret); 8554 return; 8555 } 8556 8557 ath11k_dbg(ab, ATH11K_DBG_WMI, "event probe resp tx status"); 8558 8559 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; 8560 if (!ev) { 8561 ath11k_warn(ab, 8562 "failed to fetch probe response transmission status event"); 8563 kfree(tb); 8564 return; 8565 } 8566 8567 if (ev->tx_status) 8568 ath11k_warn(ab, 8569 "Probe response transmission failed for vdev_id %u, status %u\n", 8570 ev->vdev_id, ev->tx_status); 8571 8572 kfree(tb); 8573 } 8574 8575 static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab, 8576 u16 tag, u16 len, 8577 const void *ptr, void *data) 8578 { 8579 struct wmi_wow_ev_arg *ev = data; 8580 const char *wow_pg_fault; 8581 int wow_pg_len; 8582 8583 switch (tag) { 8584 case WMI_TAG_WOW_EVENT_INFO: 8585 memcpy(ev, ptr, sizeof(*ev)); 8586 ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n", 8587 ev->wake_reason, wow_reason(ev->wake_reason)); 8588 break; 8589 8590 case WMI_TAG_ARRAY_BYTE: 8591 if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) { 8592 wow_pg_fault = ptr; 8593 /* the first 4 bytes are length */ 8594 wow_pg_len = *(int *)wow_pg_fault; 8595 wow_pg_fault += sizeof(int); 8596 ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n", 8597 wow_pg_len); 8598 ath11k_dbg_dump(ab, ATH11K_DBG_WMI, 8599 "wow_event_info_type packet present", 8600 "wow_pg_fault ", 8601 wow_pg_fault, 8602 wow_pg_len); 8603 } 8604 break; 8605 default: 8606 break; 8607 } 8608 8609 return 0; 8610 } 8611 8612 static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb) 8613 { 8614 struct wmi_wow_ev_arg ev = { }; 8615 int ret; 8616 8617 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 8618 ath11k_wmi_tlv_wow_wakeup_host_parse, 8619 &ev); 8620 if (ret) { 8621 ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret); 8622 return; 8623 } 8624 8625 ath11k_dbg(ab, ATH11K_DBG_WMI, "event wow wakeup host"); 8626 8627 complete(&ab->wow.wakeup_completed); 8628 } 8629 8630 static void 8631 ath11k_wmi_diag_event(struct ath11k_base *ab, 8632 struct sk_buff *skb) 8633 { 8634 ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag"); 8635 8636 trace_ath11k_wmi_diag(ab, skb->data, skb->len); 8637 } 8638 8639 static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status) 8640 { 8641 switch (status) { 8642 case WMI_ADD_TWT_STATUS_OK: 8643 return "ok"; 8644 case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED: 8645 return "twt disabled"; 8646 case WMI_ADD_TWT_STATUS_USED_DIALOG_ID: 8647 return "dialog id in use"; 8648 case WMI_ADD_TWT_STATUS_INVALID_PARAM: 8649 return "invalid parameters"; 8650 case WMI_ADD_TWT_STATUS_NOT_READY: 8651 return "not ready"; 8652 case WMI_ADD_TWT_STATUS_NO_RESOURCE: 8653 return "resource unavailable"; 8654 case WMI_ADD_TWT_STATUS_NO_ACK: 8655 return "no ack"; 8656 case WMI_ADD_TWT_STATUS_NO_RESPONSE: 8657 return "no response"; 8658 case WMI_ADD_TWT_STATUS_DENIED: 8659 return "denied"; 8660 case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR: 8661 fallthrough; 8662 default: 8663 return "unknown error"; 8664 } 8665 } 8666 8667 static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab, 8668 struct sk_buff *skb) 8669 { 8670 const void **tb; 8671 const struct wmi_twt_add_dialog_event *ev; 8672 int ret; 8673 8674 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8675 if (IS_ERR(tb)) { 8676 ret = PTR_ERR(tb); 8677 ath11k_warn(ab, 8678 "failed to parse wmi twt add dialog status event tlv: %d\n", 8679 ret); 8680 return; 8681 } 8682 8683 ath11k_dbg(ab, ATH11K_DBG_WMI, "event twt add dialog"); 8684 8685 ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT]; 8686 if (!ev) { 8687 ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n"); 8688 goto exit; 8689 } 8690 8691 if (ev->status) 8692 ath11k_warn(ab, 8693 "wmi add twt dialog event vdev %d dialog id %d status %s\n", 8694 ev->vdev_id, ev->dialog_id, 8695 ath11k_wmi_twt_add_dialog_event_status(ev->status)); 8696 8697 exit: 8698 kfree(tb); 8699 } 8700 8701 static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, 8702 struct sk_buff *skb) 8703 { 8704 const void **tb; 8705 const struct wmi_gtk_offload_status_event *ev; 8706 struct ath11k_vif *arvif; 8707 __be64 replay_ctr_be; 8708 u64 replay_ctr; 8709 int ret; 8710 8711 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8712 if (IS_ERR(tb)) { 8713 ret = PTR_ERR(tb); 8714 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); 8715 return; 8716 } 8717 8718 ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; 8719 if (!ev) { 8720 ath11k_warn(ab, "failed to fetch gtk offload status ev"); 8721 kfree(tb); 8722 return; 8723 } 8724 8725 rcu_read_lock(); 8726 8727 arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); 8728 if (!arvif) { 8729 ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n", 8730 ev->vdev_id); 8731 goto exit; 8732 } 8733 8734 ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n", 8735 ev->refresh_cnt); 8736 ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt", 8737 NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES); 8738 8739 replay_ctr = ev->replay_ctr.word1; 8740 replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0; 8741 arvif->rekey_data.replay_ctr = replay_ctr; 8742 8743 /* supplicant expects big-endian replay counter */ 8744 replay_ctr_be = cpu_to_be64(replay_ctr); 8745 8746 ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, 8747 (void *)&replay_ctr_be, GFP_ATOMIC); 8748 exit: 8749 rcu_read_unlock(); 8750 8751 kfree(tb); 8752 } 8753 8754 static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab, 8755 struct sk_buff *skb) 8756 { 8757 const void **tb; 8758 const struct wmi_p2p_noa_event *ev; 8759 const struct ath11k_wmi_p2p_noa_info *noa; 8760 struct ath11k *ar; 8761 int vdev_id; 8762 u8 noa_descriptors; 8763 8764 tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8765 if (IS_ERR(tb)) { 8766 ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb)); 8767 return; 8768 } 8769 8770 ev = tb[WMI_TAG_P2P_NOA_EVENT]; 8771 noa = tb[WMI_TAG_P2P_NOA_INFO]; 8772 8773 if (!ev || !noa) 8774 goto out; 8775 8776 vdev_id = ev->vdev_id; 8777 noa_descriptors = u32_get_bits(noa->noa_attr, 8778 WMI_P2P_NOA_INFO_DESC_NUM); 8779 8780 if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) { 8781 ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n", 8782 noa_descriptors); 8783 goto out; 8784 } 8785 8786 ath11k_dbg(ab, ATH11K_DBG_WMI, 8787 "wmi tlv p2p noa vdev_id %i descriptors %u\n", 8788 vdev_id, noa_descriptors); 8789 8790 rcu_read_lock(); 8791 ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); 8792 if (!ar) { 8793 ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n", 8794 vdev_id); 8795 goto unlock; 8796 } 8797 8798 ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); 8799 8800 unlock: 8801 rcu_read_unlock(); 8802 out: 8803 kfree(tb); 8804 } 8805 8806 static void ath11k_wmi_tlv_cfr_capture_event_fixed_param(const void *ptr, 8807 void *data) 8808 { 8809 struct ath11k_cfr_peer_tx_param *tx_params = data; 8810 const struct ath11k_wmi_cfr_peer_tx_event_param *params = ptr; 8811 8812 tx_params->capture_method = params->capture_method; 8813 tx_params->vdev_id = params->vdev_id; 8814 ether_addr_copy(tx_params->peer_mac_addr, params->mac_addr.addr); 8815 tx_params->primary_20mhz_chan = params->chan_mhz; 8816 tx_params->bandwidth = params->bandwidth; 8817 tx_params->phy_mode = params->phy_mode; 8818 tx_params->band_center_freq1 = params->band_center_freq1; 8819 tx_params->band_center_freq2 = params->band_center_freq2; 8820 tx_params->spatial_streams = params->sts_count; 8821 tx_params->correlation_info_1 = params->correlation_info_1; 8822 tx_params->correlation_info_2 = params->correlation_info_2; 8823 tx_params->status = params->status; 8824 tx_params->timestamp_us = params->timestamp_us; 8825 tx_params->counter = params->counter; 8826 tx_params->rx_start_ts = params->rx_start_ts; 8827 8828 memcpy(tx_params->chain_rssi, params->chain_rssi, 8829 sizeof(tx_params->chain_rssi)); 8830 8831 if (WMI_CFR_CFO_MEASUREMENT_VALID & params->cfo_measurement) 8832 tx_params->cfo_measurement = FIELD_GET(WMI_CFR_CFO_MEASUREMENT_RAW_DATA, 8833 params->cfo_measurement); 8834 } 8835 8836 static void ath11k_wmi_tlv_cfr_capture_phase_fixed_param(const void *ptr, 8837 void *data) 8838 { 8839 struct ath11k_cfr_peer_tx_param *tx_params = data; 8840 const struct ath11k_wmi_cfr_peer_tx_event_phase_param *params = ptr; 8841 int i; 8842 8843 for (i = 0; i < WMI_MAX_CHAINS; i++) { 8844 tx_params->chain_phase[i] = params->chain_phase[i]; 8845 tx_params->agc_gain[i] = params->agc_gain[i]; 8846 } 8847 } 8848 8849 static int ath11k_wmi_tlv_cfr_capture_evt_parse(struct ath11k_base *ab, 8850 u16 tag, u16 len, 8851 const void *ptr, void *data) 8852 { 8853 switch (tag) { 8854 case WMI_TAG_PEER_CFR_CAPTURE_EVENT: 8855 ath11k_wmi_tlv_cfr_capture_event_fixed_param(ptr, data); 8856 break; 8857 case WMI_TAG_CFR_CAPTURE_PHASE_PARAM: 8858 ath11k_wmi_tlv_cfr_capture_phase_fixed_param(ptr, data); 8859 break; 8860 default: 8861 ath11k_warn(ab, "Invalid tag received tag %d len %d\n", 8862 tag, len); 8863 return -EINVAL; 8864 } 8865 8866 return 0; 8867 } 8868 8869 static void ath11k_wmi_parse_cfr_capture_event(struct ath11k_base *ab, 8870 struct sk_buff *skb) 8871 { 8872 struct ath11k_cfr_peer_tx_param params = {}; 8873 int ret; 8874 8875 ath11k_dbg_dump(ab, ATH11K_DBG_CFR_DUMP, "cfr_dump:", "", 8876 skb->data, skb->len); 8877 8878 ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, 8879 ath11k_wmi_tlv_cfr_capture_evt_parse, 8880 ¶ms); 8881 if (ret) { 8882 ath11k_warn(ab, "failed to parse cfr capture event tlv %d\n", 8883 ret); 8884 return; 8885 } 8886 8887 ret = ath11k_process_cfr_capture_event(ab, ¶ms); 8888 if (ret) 8889 ath11k_dbg(ab, ATH11K_DBG_CFR, 8890 "failed to process cfr capture ret = %d\n", ret); 8891 } 8892 8893 static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) 8894 { 8895 struct wmi_cmd_hdr *cmd_hdr; 8896 enum wmi_tlv_event_id id; 8897 8898 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 8899 id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id)); 8900 8901 trace_ath11k_wmi_event(ab, id, skb->data, skb->len); 8902 8903 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 8904 goto out; 8905 8906 switch (id) { 8907 /* Process all the WMI events here */ 8908 case WMI_SERVICE_READY_EVENTID: 8909 ath11k_service_ready_event(ab, skb); 8910 break; 8911 case WMI_SERVICE_READY_EXT_EVENTID: 8912 ath11k_service_ready_ext_event(ab, skb); 8913 break; 8914 case WMI_SERVICE_READY_EXT2_EVENTID: 8915 ath11k_service_ready_ext2_event(ab, skb); 8916 break; 8917 case WMI_REG_CHAN_LIST_CC_EVENTID: 8918 ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_ID); 8919 break; 8920 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: 8921 ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_EXT_ID); 8922 break; 8923 case WMI_READY_EVENTID: 8924 ath11k_ready_event(ab, skb); 8925 break; 8926 case WMI_PEER_DELETE_RESP_EVENTID: 8927 ath11k_peer_delete_resp_event(ab, skb); 8928 break; 8929 case WMI_VDEV_START_RESP_EVENTID: 8930 ath11k_vdev_start_resp_event(ab, skb); 8931 break; 8932 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: 8933 ath11k_bcn_tx_status_event(ab, skb); 8934 break; 8935 case WMI_VDEV_STOPPED_EVENTID: 8936 ath11k_vdev_stopped_event(ab, skb); 8937 break; 8938 case WMI_MGMT_RX_EVENTID: 8939 ath11k_mgmt_rx_event(ab, skb); 8940 /* mgmt_rx_event() owns the skb now! */ 8941 return; 8942 case WMI_MGMT_TX_COMPLETION_EVENTID: 8943 ath11k_mgmt_tx_compl_event(ab, skb); 8944 break; 8945 case WMI_SCAN_EVENTID: 8946 ath11k_scan_event(ab, skb); 8947 break; 8948 case WMI_PEER_STA_KICKOUT_EVENTID: 8949 ath11k_peer_sta_kickout_event(ab, skb); 8950 break; 8951 case WMI_ROAM_EVENTID: 8952 ath11k_roam_event(ab, skb); 8953 break; 8954 case WMI_CHAN_INFO_EVENTID: 8955 ath11k_chan_info_event(ab, skb); 8956 break; 8957 case WMI_PDEV_BSS_CHAN_INFO_EVENTID: 8958 ath11k_pdev_bss_chan_info_event(ab, skb); 8959 break; 8960 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 8961 ath11k_vdev_install_key_compl_event(ab, skb); 8962 break; 8963 case WMI_SERVICE_AVAILABLE_EVENTID: 8964 ath11k_service_available_event(ab, skb); 8965 break; 8966 case WMI_PEER_ASSOC_CONF_EVENTID: 8967 ath11k_peer_assoc_conf_event(ab, skb); 8968 break; 8969 case WMI_UPDATE_STATS_EVENTID: 8970 ath11k_update_stats_event(ab, skb); 8971 break; 8972 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: 8973 ath11k_pdev_ctl_failsafe_check_event(ab, skb); 8974 break; 8975 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: 8976 ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb); 8977 break; 8978 case WMI_PDEV_UTF_EVENTID: 8979 ath11k_tm_wmi_event(ab, id, skb); 8980 break; 8981 case WMI_PDEV_TEMPERATURE_EVENTID: 8982 ath11k_wmi_pdev_temperature_event(ab, skb); 8983 break; 8984 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: 8985 ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb); 8986 break; 8987 case WMI_HOST_FILS_DISCOVERY_EVENTID: 8988 ath11k_fils_discovery_event(ab, skb); 8989 break; 8990 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: 8991 ath11k_probe_resp_tx_status_event(ab, skb); 8992 break; 8993 case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: 8994 ath11k_wmi_obss_color_collision_event(ab, skb); 8995 break; 8996 case WMI_TWT_ADD_DIALOG_EVENTID: 8997 ath11k_wmi_twt_add_dialog_event(ab, skb); 8998 break; 8999 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: 9000 ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb); 9001 break; 9002 case WMI_VDEV_DELETE_RESP_EVENTID: 9003 ath11k_vdev_delete_resp_event(ab, skb); 9004 break; 9005 case WMI_WOW_WAKEUP_HOST_EVENTID: 9006 ath11k_wmi_event_wow_wakeup_host(ab, skb); 9007 break; 9008 case WMI_11D_NEW_COUNTRY_EVENTID: 9009 ath11k_reg_11d_new_cc_event(ab, skb); 9010 break; 9011 case WMI_DIAG_EVENTID: 9012 ath11k_wmi_diag_event(ab, skb); 9013 break; 9014 case WMI_PEER_STA_PS_STATECHG_EVENTID: 9015 ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb); 9016 break; 9017 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 9018 ath11k_wmi_gtk_offload_status_event(ab, skb); 9019 break; 9020 case WMI_P2P_NOA_EVENTID: 9021 ath11k_wmi_p2p_noa_event(ab, skb); 9022 break; 9023 case WMI_PEER_CFR_CAPTURE_EVENTID: 9024 ath11k_wmi_parse_cfr_capture_event(ab, skb); 9025 break; 9026 default: 9027 ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id); 9028 break; 9029 } 9030 9031 out: 9032 dev_kfree_skb(skb); 9033 } 9034 9035 static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab, 9036 u32 pdev_idx) 9037 { 9038 int status; 9039 u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL, 9040 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1, 9041 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 }; 9042 9043 struct ath11k_htc_svc_conn_req conn_req; 9044 struct ath11k_htc_svc_conn_resp conn_resp; 9045 9046 memset(&conn_req, 0, sizeof(conn_req)); 9047 memset(&conn_resp, 0, sizeof(conn_resp)); 9048 9049 /* these fields are the same for all service endpoints */ 9050 conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete; 9051 conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx; 9052 conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits; 9053 9054 /* connect to control service */ 9055 conn_req.service_id = svc_id[pdev_idx]; 9056 9057 status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); 9058 if (status) { 9059 ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", 9060 status); 9061 return status; 9062 } 9063 9064 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; 9065 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; 9066 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; 9067 init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq); 9068 9069 return 0; 9070 } 9071 9072 static int 9073 ath11k_wmi_send_unit_test_cmd(struct ath11k *ar, 9074 struct wmi_unit_test_cmd ut_cmd, 9075 u32 *test_args) 9076 { 9077 struct ath11k_pdev_wmi *wmi = ar->wmi; 9078 struct wmi_unit_test_cmd *cmd; 9079 struct sk_buff *skb; 9080 struct wmi_tlv *tlv; 9081 void *ptr; 9082 u32 *ut_cmd_args; 9083 int buf_len, arg_len; 9084 int ret; 9085 int i; 9086 9087 arg_len = sizeof(u32) * ut_cmd.num_args; 9088 buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; 9089 9090 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 9091 if (!skb) 9092 return -ENOMEM; 9093 9094 cmd = (struct wmi_unit_test_cmd *)skb->data; 9095 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) | 9096 FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE); 9097 9098 cmd->vdev_id = ut_cmd.vdev_id; 9099 cmd->module_id = ut_cmd.module_id; 9100 cmd->num_args = ut_cmd.num_args; 9101 cmd->diag_token = ut_cmd.diag_token; 9102 9103 ptr = skb->data + sizeof(ut_cmd); 9104 9105 tlv = ptr; 9106 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | 9107 FIELD_PREP(WMI_TLV_LEN, arg_len); 9108 9109 ptr += TLV_HDR_SIZE; 9110 9111 ut_cmd_args = ptr; 9112 for (i = 0; i < ut_cmd.num_args; i++) 9113 ut_cmd_args[i] = test_args[i]; 9114 9115 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); 9116 9117 if (ret) { 9118 ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", 9119 ret); 9120 dev_kfree_skb(skb); 9121 } 9122 9123 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 9124 "cmd unit test module %d vdev %d n_args %d token %d\n", 9125 cmd->module_id, cmd->vdev_id, cmd->num_args, 9126 cmd->diag_token); 9127 9128 return ret; 9129 } 9130 9131 int ath11k_wmi_simulate_radar(struct ath11k *ar) 9132 { 9133 struct ath11k_vif *arvif; 9134 u32 dfs_args[DFS_MAX_TEST_ARGS]; 9135 struct wmi_unit_test_cmd wmi_ut; 9136 bool arvif_found = false; 9137 9138 list_for_each_entry(arvif, &ar->arvifs, list) { 9139 if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) { 9140 arvif_found = true; 9141 break; 9142 } 9143 } 9144 9145 if (!arvif_found) 9146 return -EINVAL; 9147 9148 dfs_args[DFS_TEST_CMDID] = 0; 9149 dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 9150 /* Currently we could pass segment_id(b0 - b1), chirp(b2) 9151 * freq offset (b3 - b10) to unit test. For simulation 9152 * purpose this can be set to 0 which is valid. 9153 */ 9154 dfs_args[DFS_TEST_RADAR_PARAM] = 0; 9155 9156 wmi_ut.vdev_id = arvif->vdev_id; 9157 wmi_ut.module_id = DFS_UNIT_TEST_MODULE; 9158 wmi_ut.num_args = DFS_MAX_TEST_ARGS; 9159 wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN; 9160 9161 ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n"); 9162 9163 return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); 9164 } 9165 9166 int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap, 9167 struct ath11k_fw_dbglog *dbglog) 9168 { 9169 struct ath11k_pdev_wmi *wmi = ar->wmi; 9170 struct wmi_debug_log_config_cmd_fixed_param *cmd; 9171 struct sk_buff *skb; 9172 struct wmi_tlv *tlv; 9173 int ret, len; 9174 9175 len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); 9176 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 9177 if (!skb) 9178 return -ENOMEM; 9179 9180 cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data; 9181 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) | 9182 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9183 cmd->dbg_log_param = dbglog->param; 9184 9185 tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd)); 9186 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | 9187 FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); 9188 9189 switch (dbglog->param) { 9190 case WMI_DEBUG_LOG_PARAM_LOG_LEVEL: 9191 case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE: 9192 case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE: 9193 case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP: 9194 cmd->value = dbglog->value; 9195 break; 9196 case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP: 9197 case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP: 9198 cmd->value = dbglog->value; 9199 memcpy(tlv->value, module_id_bitmap, 9200 MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); 9201 /* clear current config to be used for next user config */ 9202 memset(module_id_bitmap, 0, 9203 MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); 9204 break; 9205 default: 9206 dev_kfree_skb(skb); 9207 return -EINVAL; 9208 } 9209 9210 ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID); 9211 if (ret) { 9212 ath11k_warn(ar->ab, 9213 "failed to send WMI_DBGLOG_CFG_CMDID\n"); 9214 dev_kfree_skb(skb); 9215 } 9216 9217 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd dbglog cfg"); 9218 9219 return ret; 9220 } 9221 9222 int ath11k_wmi_connect(struct ath11k_base *ab) 9223 { 9224 u32 i; 9225 u8 wmi_ep_count; 9226 9227 wmi_ep_count = ab->htc.wmi_ep_count; 9228 if (wmi_ep_count > ab->hw_params.max_radios) 9229 return -1; 9230 9231 for (i = 0; i < wmi_ep_count; i++) 9232 ath11k_connect_pdev_htc_service(ab, i); 9233 9234 return 0; 9235 } 9236 9237 static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id) 9238 { 9239 if (WARN_ON(pdev_id >= MAX_RADIOS)) 9240 return; 9241 9242 /* TODO: Deinit any pdev specific wmi resource */ 9243 } 9244 9245 int ath11k_wmi_pdev_attach(struct ath11k_base *ab, 9246 u8 pdev_id) 9247 { 9248 struct ath11k_pdev_wmi *wmi_handle; 9249 9250 if (pdev_id >= ab->hw_params.max_radios) 9251 return -EINVAL; 9252 9253 wmi_handle = &ab->wmi_ab.wmi[pdev_id]; 9254 9255 wmi_handle->wmi_ab = &ab->wmi_ab; 9256 9257 ab->wmi_ab.ab = ab; 9258 /* TODO: Init remaining resource specific to pdev */ 9259 9260 return 0; 9261 } 9262 9263 int ath11k_wmi_attach(struct ath11k_base *ab) 9264 { 9265 int ret; 9266 9267 ret = ath11k_wmi_pdev_attach(ab, 0); 9268 if (ret) 9269 return ret; 9270 9271 ab->wmi_ab.ab = ab; 9272 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; 9273 9274 /* It's overwritten when service_ext_ready is handled */ 9275 if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) 9276 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; 9277 9278 /* TODO: Init remaining wmi soc resources required */ 9279 init_completion(&ab->wmi_ab.service_ready); 9280 init_completion(&ab->wmi_ab.unified_ready); 9281 9282 return 0; 9283 } 9284 9285 void ath11k_wmi_detach(struct ath11k_base *ab) 9286 { 9287 int i; 9288 9289 /* TODO: Deinit wmi resource specific to SOC as required */ 9290 9291 for (i = 0; i < ab->htc.wmi_ep_count; i++) 9292 ath11k_wmi_pdev_detach(ab, i); 9293 9294 ath11k_wmi_free_dbring_caps(ab); 9295 } 9296 9297 int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, 9298 u32 filter_bitmap, bool enable) 9299 { 9300 struct wmi_hw_data_filter_cmd *cmd; 9301 struct sk_buff *skb; 9302 int len; 9303 9304 len = sizeof(*cmd); 9305 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9306 9307 if (!skb) 9308 return -ENOMEM; 9309 9310 cmd = (struct wmi_hw_data_filter_cmd *)skb->data; 9311 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) | 9312 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9313 9314 cmd->vdev_id = vdev_id; 9315 cmd->enable = enable; 9316 9317 /* Set all modes in case of disable */ 9318 if (cmd->enable) 9319 cmd->hw_filter_bitmap = filter_bitmap; 9320 else 9321 cmd->hw_filter_bitmap = ((u32)~0U); 9322 9323 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 9324 "hw data filter enable %d filter_bitmap 0x%x\n", 9325 enable, filter_bitmap); 9326 9327 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); 9328 } 9329 9330 int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) 9331 { 9332 struct wmi_wow_host_wakeup_ind *cmd; 9333 struct sk_buff *skb; 9334 size_t len; 9335 9336 len = sizeof(*cmd); 9337 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9338 if (!skb) 9339 return -ENOMEM; 9340 9341 cmd = (struct wmi_wow_host_wakeup_ind *)skb->data; 9342 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 9343 WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) | 9344 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9345 9346 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow host wakeup ind\n"); 9347 9348 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); 9349 } 9350 9351 int ath11k_wmi_wow_enable(struct ath11k *ar) 9352 { 9353 struct wmi_wow_enable_cmd *cmd; 9354 struct sk_buff *skb; 9355 int len; 9356 9357 len = sizeof(*cmd); 9358 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9359 if (!skb) 9360 return -ENOMEM; 9361 9362 cmd = (struct wmi_wow_enable_cmd *)skb->data; 9363 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) | 9364 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9365 9366 cmd->enable = 1; 9367 cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED; 9368 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow enable\n"); 9369 9370 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); 9371 } 9372 9373 int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, 9374 const u8 mac_addr[ETH_ALEN]) 9375 { 9376 struct sk_buff *skb; 9377 struct wmi_scan_prob_req_oui_cmd *cmd; 9378 u32 prob_req_oui; 9379 int len; 9380 9381 prob_req_oui = (((u32)mac_addr[0]) << 16) | 9382 (((u32)mac_addr[1]) << 8) | mac_addr[2]; 9383 9384 len = sizeof(*cmd); 9385 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9386 if (!skb) 9387 return -ENOMEM; 9388 9389 cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data; 9390 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 9391 WMI_TAG_SCAN_PROB_REQ_OUI_CMD) | 9392 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9393 cmd->prob_req_oui = prob_req_oui; 9394 9395 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "scan prob req oui %d\n", 9396 prob_req_oui); 9397 9398 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); 9399 } 9400 9401 int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, 9402 enum wmi_wow_wakeup_event event, 9403 u32 enable) 9404 { 9405 struct wmi_wow_add_del_event_cmd *cmd; 9406 struct sk_buff *skb; 9407 size_t len; 9408 9409 len = sizeof(*cmd); 9410 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9411 if (!skb) 9412 return -ENOMEM; 9413 9414 cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; 9415 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) | 9416 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9417 9418 cmd->vdev_id = vdev_id; 9419 cmd->is_add = enable; 9420 cmd->event_bitmap = (1 << event); 9421 9422 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add wakeup event %s enable %d vdev_id %d\n", 9423 wow_wakeup_event(event), enable, vdev_id); 9424 9425 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); 9426 } 9427 9428 int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, 9429 const u8 *pattern, const u8 *mask, 9430 int pattern_len, int pattern_offset) 9431 { 9432 struct wmi_wow_add_pattern_cmd *cmd; 9433 struct wmi_wow_bitmap_pattern *bitmap; 9434 struct wmi_tlv *tlv; 9435 struct sk_buff *skb; 9436 u8 *ptr; 9437 size_t len; 9438 9439 len = sizeof(*cmd) + 9440 sizeof(*tlv) + /* array struct */ 9441 sizeof(*bitmap) + /* bitmap */ 9442 sizeof(*tlv) + /* empty ipv4 sync */ 9443 sizeof(*tlv) + /* empty ipv6 sync */ 9444 sizeof(*tlv) + /* empty magic */ 9445 sizeof(*tlv) + /* empty info timeout */ 9446 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ 9447 9448 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9449 if (!skb) 9450 return -ENOMEM; 9451 9452 /* cmd */ 9453 ptr = (u8 *)skb->data; 9454 cmd = (struct wmi_wow_add_pattern_cmd *)ptr; 9455 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 9456 WMI_TAG_WOW_ADD_PATTERN_CMD) | 9457 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9458 9459 cmd->vdev_id = vdev_id; 9460 cmd->pattern_id = pattern_id; 9461 cmd->pattern_type = WOW_BITMAP_PATTERN; 9462 9463 ptr += sizeof(*cmd); 9464 9465 /* bitmap */ 9466 tlv = (struct wmi_tlv *)ptr; 9467 tlv->header = FIELD_PREP(WMI_TLV_TAG, 9468 WMI_TAG_ARRAY_STRUCT) | 9469 FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap)); 9470 9471 ptr += sizeof(*tlv); 9472 9473 bitmap = (struct wmi_wow_bitmap_pattern *)ptr; 9474 bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG, 9475 WMI_TAG_WOW_BITMAP_PATTERN_T) | 9476 FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE); 9477 9478 memcpy(bitmap->patternbuf, pattern, pattern_len); 9479 ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4)); 9480 memcpy(bitmap->bitmaskbuf, mask, pattern_len); 9481 ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4)); 9482 bitmap->pattern_offset = pattern_offset; 9483 bitmap->pattern_len = pattern_len; 9484 bitmap->bitmask_len = pattern_len; 9485 bitmap->pattern_id = pattern_id; 9486 9487 ptr += sizeof(*bitmap); 9488 9489 /* ipv4 sync */ 9490 tlv = (struct wmi_tlv *)ptr; 9491 tlv->header = FIELD_PREP(WMI_TLV_TAG, 9492 WMI_TAG_ARRAY_STRUCT) | 9493 FIELD_PREP(WMI_TLV_LEN, 0); 9494 9495 ptr += sizeof(*tlv); 9496 9497 /* ipv6 sync */ 9498 tlv = (struct wmi_tlv *)ptr; 9499 tlv->header = FIELD_PREP(WMI_TLV_TAG, 9500 WMI_TAG_ARRAY_STRUCT) | 9501 FIELD_PREP(WMI_TLV_LEN, 0); 9502 9503 ptr += sizeof(*tlv); 9504 9505 /* magic */ 9506 tlv = (struct wmi_tlv *)ptr; 9507 tlv->header = FIELD_PREP(WMI_TLV_TAG, 9508 WMI_TAG_ARRAY_STRUCT) | 9509 FIELD_PREP(WMI_TLV_LEN, 0); 9510 9511 ptr += sizeof(*tlv); 9512 9513 /* pattern info timeout */ 9514 tlv = (struct wmi_tlv *)ptr; 9515 tlv->header = FIELD_PREP(WMI_TLV_TAG, 9516 WMI_TAG_ARRAY_UINT32) | 9517 FIELD_PREP(WMI_TLV_LEN, 0); 9518 9519 ptr += sizeof(*tlv); 9520 9521 /* ratelimit interval */ 9522 tlv = (struct wmi_tlv *)ptr; 9523 tlv->header = FIELD_PREP(WMI_TLV_TAG, 9524 WMI_TAG_ARRAY_UINT32) | 9525 FIELD_PREP(WMI_TLV_LEN, sizeof(u32)); 9526 9527 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n", 9528 vdev_id, pattern_id, pattern_offset); 9529 9530 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); 9531 } 9532 9533 int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id) 9534 { 9535 struct wmi_wow_del_pattern_cmd *cmd; 9536 struct sk_buff *skb; 9537 size_t len; 9538 9539 len = sizeof(*cmd); 9540 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9541 if (!skb) 9542 return -ENOMEM; 9543 9544 cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; 9545 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 9546 WMI_TAG_WOW_DEL_PATTERN_CMD) | 9547 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9548 9549 cmd->vdev_id = vdev_id; 9550 cmd->pattern_id = pattern_id; 9551 cmd->pattern_type = WOW_BITMAP_PATTERN; 9552 9553 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow del pattern vdev_id %d pattern_id %d\n", 9554 vdev_id, pattern_id); 9555 9556 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); 9557 } 9558 9559 static struct sk_buff * 9560 ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar, 9561 u32 vdev_id, 9562 struct wmi_pno_scan_req *pno) 9563 { 9564 struct nlo_configured_parameters *nlo_list; 9565 struct wmi_wow_nlo_config_cmd *cmd; 9566 struct wmi_tlv *tlv; 9567 struct sk_buff *skb; 9568 u32 *channel_list; 9569 size_t len, nlo_list_len, channel_list_len; 9570 u8 *ptr; 9571 u32 i; 9572 9573 len = sizeof(*cmd) + 9574 sizeof(*tlv) + 9575 /* TLV place holder for array of structures 9576 * nlo_configured_parameters(nlo_list) 9577 */ 9578 sizeof(*tlv); 9579 /* TLV place holder for array of uint32 channel_list */ 9580 9581 channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; 9582 len += channel_list_len; 9583 9584 nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; 9585 len += nlo_list_len; 9586 9587 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9588 if (!skb) 9589 return ERR_PTR(-ENOMEM); 9590 9591 ptr = (u8 *)skb->data; 9592 cmd = (struct wmi_wow_nlo_config_cmd *)ptr; 9593 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | 9594 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9595 9596 cmd->vdev_id = pno->vdev_id; 9597 cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; 9598 9599 /* current FW does not support min-max range for dwell time */ 9600 cmd->active_dwell_time = pno->active_max_time; 9601 cmd->passive_dwell_time = pno->passive_max_time; 9602 9603 if (pno->do_passive_scan) 9604 cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; 9605 9606 cmd->fast_scan_period = pno->fast_scan_period; 9607 cmd->slow_scan_period = pno->slow_scan_period; 9608 cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; 9609 cmd->delay_start_time = pno->delay_start_time; 9610 9611 if (pno->enable_pno_scan_randomization) { 9612 cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | 9613 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; 9614 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); 9615 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); 9616 ath11k_ce_byte_swap(cmd->mac_addr.addr, 8); 9617 ath11k_ce_byte_swap(cmd->mac_mask.addr, 8); 9618 } 9619 9620 ptr += sizeof(*cmd); 9621 9622 /* nlo_configured_parameters(nlo_list) */ 9623 cmd->no_of_ssids = pno->uc_networks_count; 9624 tlv = (struct wmi_tlv *)ptr; 9625 tlv->header = FIELD_PREP(WMI_TLV_TAG, 9626 WMI_TAG_ARRAY_STRUCT) | 9627 FIELD_PREP(WMI_TLV_LEN, nlo_list_len); 9628 9629 ptr += sizeof(*tlv); 9630 nlo_list = (struct nlo_configured_parameters *)ptr; 9631 for (i = 0; i < cmd->no_of_ssids; i++) { 9632 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); 9633 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 9634 FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv)); 9635 9636 nlo_list[i].ssid.valid = true; 9637 nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; 9638 memcpy(nlo_list[i].ssid.ssid.ssid, 9639 pno->a_networks[i].ssid.ssid, 9640 nlo_list[i].ssid.ssid.ssid_len); 9641 ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid, 9642 roundup(nlo_list[i].ssid.ssid.ssid_len, 4)); 9643 9644 if (pno->a_networks[i].rssi_threshold && 9645 pno->a_networks[i].rssi_threshold > -300) { 9646 nlo_list[i].rssi_cond.valid = true; 9647 nlo_list[i].rssi_cond.rssi = 9648 pno->a_networks[i].rssi_threshold; 9649 } 9650 9651 nlo_list[i].bcast_nw_type.valid = true; 9652 nlo_list[i].bcast_nw_type.bcast_nw_type = 9653 pno->a_networks[i].bcast_nw_type; 9654 } 9655 9656 ptr += nlo_list_len; 9657 cmd->num_of_channels = pno->a_networks[0].channel_count; 9658 tlv = (struct wmi_tlv *)ptr; 9659 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | 9660 FIELD_PREP(WMI_TLV_LEN, channel_list_len); 9661 ptr += sizeof(*tlv); 9662 channel_list = (u32 *)ptr; 9663 for (i = 0; i < cmd->num_of_channels; i++) 9664 channel_list[i] = pno->a_networks[0].channels[i]; 9665 9666 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv start pno config vdev_id %d\n", 9667 vdev_id); 9668 9669 return skb; 9670 } 9671 9672 static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar, 9673 u32 vdev_id) 9674 { 9675 struct wmi_wow_nlo_config_cmd *cmd; 9676 struct sk_buff *skb; 9677 size_t len; 9678 9679 len = sizeof(*cmd); 9680 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9681 if (!skb) 9682 return ERR_PTR(-ENOMEM); 9683 9684 cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; 9685 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | 9686 FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); 9687 9688 cmd->vdev_id = vdev_id; 9689 cmd->flags = WMI_NLO_CONFIG_STOP; 9690 9691 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 9692 "tlv stop pno config vdev_id %d\n", vdev_id); 9693 return skb; 9694 } 9695 9696 int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, 9697 struct wmi_pno_scan_req *pno_scan) 9698 { 9699 struct sk_buff *skb; 9700 9701 if (pno_scan->enable) 9702 skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); 9703 else 9704 skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id); 9705 9706 if (IS_ERR_OR_NULL(skb)) 9707 return -ENOMEM; 9708 9709 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); 9710 } 9711 9712 static void ath11k_wmi_fill_ns_offload(struct ath11k *ar, 9713 struct ath11k_arp_ns_offload *offload, 9714 u8 **ptr, 9715 bool enable, 9716 bool ext) 9717 { 9718 struct wmi_ns_offload_tuple *ns; 9719 struct wmi_tlv *tlv; 9720 u8 *buf_ptr = *ptr; 9721 u32 ns_cnt, ns_ext_tuples; 9722 int i, max_offloads; 9723 9724 ns_cnt = offload->ipv6_count; 9725 9726 tlv = (struct wmi_tlv *)buf_ptr; 9727 9728 if (ext) { 9729 ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; 9730 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 9731 FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns)); 9732 i = WMI_MAX_NS_OFFLOADS; 9733 max_offloads = offload->ipv6_count; 9734 } else { 9735 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 9736 FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns)); 9737 i = 0; 9738 max_offloads = WMI_MAX_NS_OFFLOADS; 9739 } 9740 9741 buf_ptr += sizeof(*tlv); 9742 9743 for (; i < max_offloads; i++) { 9744 ns = (struct wmi_ns_offload_tuple *)buf_ptr; 9745 ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) | 9746 FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE); 9747 9748 if (enable) { 9749 if (i < ns_cnt) 9750 ns->flags |= WMI_NSOL_FLAGS_VALID; 9751 9752 memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); 9753 memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); 9754 ath11k_ce_byte_swap(ns->target_ipaddr[0], 16); 9755 ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16); 9756 9757 if (offload->ipv6_type[i]) 9758 ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST; 9759 9760 memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); 9761 ath11k_ce_byte_swap(ns->target_mac.addr, 8); 9762 9763 if (ns->target_mac.word0 != 0 || 9764 ns->target_mac.word1 != 0) { 9765 ns->flags |= WMI_NSOL_FLAGS_MAC_VALID; 9766 } 9767 9768 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 9769 "index %d ns_solicited %pI6 target %pI6", 9770 i, ns->solicitation_ipaddr, 9771 ns->target_ipaddr[0]); 9772 } 9773 9774 buf_ptr += sizeof(*ns); 9775 } 9776 9777 *ptr = buf_ptr; 9778 } 9779 9780 static void ath11k_wmi_fill_arp_offload(struct ath11k *ar, 9781 struct ath11k_arp_ns_offload *offload, 9782 u8 **ptr, 9783 bool enable) 9784 { 9785 struct wmi_arp_offload_tuple *arp; 9786 struct wmi_tlv *tlv; 9787 u8 *buf_ptr = *ptr; 9788 int i; 9789 9790 /* fill arp tuple */ 9791 tlv = (struct wmi_tlv *)buf_ptr; 9792 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 9793 FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); 9794 buf_ptr += sizeof(*tlv); 9795 9796 for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { 9797 arp = (struct wmi_arp_offload_tuple *)buf_ptr; 9798 arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) | 9799 FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); 9800 9801 if (enable && i < offload->ipv4_count) { 9802 /* Copy the target ip addr and flags */ 9803 arp->flags = WMI_ARPOL_FLAGS_VALID; 9804 memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); 9805 ath11k_ce_byte_swap(arp->target_ipaddr, 4); 9806 9807 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "arp offload address %pI4", 9808 arp->target_ipaddr); 9809 } 9810 9811 buf_ptr += sizeof(*arp); 9812 } 9813 9814 *ptr = buf_ptr; 9815 } 9816 9817 int ath11k_wmi_arp_ns_offload(struct ath11k *ar, 9818 struct ath11k_vif *arvif, bool enable) 9819 { 9820 struct ath11k_arp_ns_offload *offload; 9821 struct wmi_set_arp_ns_offload_cmd *cmd; 9822 struct wmi_tlv *tlv; 9823 struct sk_buff *skb; 9824 u8 *buf_ptr; 9825 size_t len; 9826 u8 ns_cnt, ns_ext_tuples = 0; 9827 9828 offload = &arvif->arp_ns_offload; 9829 ns_cnt = offload->ipv6_count; 9830 9831 len = sizeof(*cmd) + 9832 sizeof(*tlv) + 9833 WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) + 9834 sizeof(*tlv) + 9835 WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple); 9836 9837 if (ns_cnt > WMI_MAX_NS_OFFLOADS) { 9838 ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; 9839 len += sizeof(*tlv) + 9840 ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple); 9841 } 9842 9843 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9844 if (!skb) 9845 return -ENOMEM; 9846 9847 buf_ptr = skb->data; 9848 cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr; 9849 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 9850 WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) | 9851 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9852 9853 cmd->flags = 0; 9854 cmd->vdev_id = arvif->vdev_id; 9855 cmd->num_ns_ext_tuples = ns_ext_tuples; 9856 9857 buf_ptr += sizeof(*cmd); 9858 9859 ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); 9860 ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); 9861 9862 if (ns_ext_tuples) 9863 ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); 9864 9865 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); 9866 } 9867 9868 int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, 9869 struct ath11k_vif *arvif, bool enable) 9870 { 9871 struct wmi_gtk_rekey_offload_cmd *cmd; 9872 struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; 9873 int len; 9874 struct sk_buff *skb; 9875 __le64 replay_ctr; 9876 9877 len = sizeof(*cmd); 9878 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9879 if (!skb) 9880 return -ENOMEM; 9881 9882 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 9883 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | 9884 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9885 9886 cmd->vdev_id = arvif->vdev_id; 9887 9888 if (enable) { 9889 cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE; 9890 9891 /* the length in rekey_data and cmd is equal */ 9892 memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); 9893 ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES); 9894 memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); 9895 ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES); 9896 9897 replay_ctr = cpu_to_le64(rekey_data->replay_ctr); 9898 memcpy(cmd->replay_ctr, &replay_ctr, 9899 sizeof(replay_ctr)); 9900 ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES); 9901 } else { 9902 cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE; 9903 } 9904 9905 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", 9906 arvif->vdev_id, enable); 9907 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 9908 } 9909 9910 int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, 9911 struct ath11k_vif *arvif) 9912 { 9913 struct wmi_gtk_rekey_offload_cmd *cmd; 9914 int len; 9915 struct sk_buff *skb; 9916 9917 len = sizeof(*cmd); 9918 skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 9919 if (!skb) 9920 return -ENOMEM; 9921 9922 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 9923 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | 9924 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9925 9926 cmd->vdev_id = arvif->vdev_id; 9927 cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE; 9928 9929 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n", 9930 arvif->vdev_id); 9931 return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 9932 } 9933 9934 int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val) 9935 { struct ath11k_pdev_wmi *wmi = ar->wmi; 9936 struct wmi_pdev_set_sar_table_cmd *cmd; 9937 struct wmi_tlv *tlv; 9938 struct sk_buff *skb; 9939 u8 *buf_ptr; 9940 u32 len, sar_len_aligned, rsvd_len_aligned; 9941 9942 sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32)); 9943 rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32)); 9944 len = sizeof(*cmd) + 9945 TLV_HDR_SIZE + sar_len_aligned + 9946 TLV_HDR_SIZE + rsvd_len_aligned; 9947 9948 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 9949 if (!skb) 9950 return -ENOMEM; 9951 9952 cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data; 9953 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) | 9954 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9955 cmd->pdev_id = ar->pdev->pdev_id; 9956 cmd->sar_len = BIOS_SAR_TABLE_LEN; 9957 cmd->rsvd_len = BIOS_SAR_RSVD1_LEN; 9958 9959 buf_ptr = skb->data + sizeof(*cmd); 9960 tlv = (struct wmi_tlv *)buf_ptr; 9961 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 9962 FIELD_PREP(WMI_TLV_LEN, sar_len_aligned); 9963 buf_ptr += TLV_HDR_SIZE; 9964 memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN); 9965 9966 buf_ptr += sar_len_aligned; 9967 tlv = (struct wmi_tlv *)buf_ptr; 9968 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 9969 FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); 9970 9971 return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); 9972 } 9973 9974 int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar) 9975 { 9976 struct ath11k_pdev_wmi *wmi = ar->wmi; 9977 struct wmi_pdev_set_geo_table_cmd *cmd; 9978 struct wmi_tlv *tlv; 9979 struct sk_buff *skb; 9980 u8 *buf_ptr; 9981 u32 len, rsvd_len_aligned; 9982 9983 rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32)); 9984 len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned; 9985 9986 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 9987 if (!skb) 9988 return -ENOMEM; 9989 9990 cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data; 9991 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) | 9992 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 9993 cmd->pdev_id = ar->pdev->pdev_id; 9994 cmd->rsvd_len = BIOS_SAR_RSVD2_LEN; 9995 9996 buf_ptr = skb->data + sizeof(*cmd); 9997 tlv = (struct wmi_tlv *)buf_ptr; 9998 tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | 9999 FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); 10000 10001 return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); 10002 } 10003 10004 int ath11k_wmi_sta_keepalive(struct ath11k *ar, 10005 const struct wmi_sta_keepalive_arg *arg) 10006 { 10007 struct ath11k_pdev_wmi *wmi = ar->wmi; 10008 struct wmi_sta_keepalive_cmd *cmd; 10009 struct wmi_sta_keepalive_arp_resp *arp; 10010 struct sk_buff *skb; 10011 size_t len; 10012 10013 len = sizeof(*cmd) + sizeof(*arp); 10014 skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 10015 if (!skb) 10016 return -ENOMEM; 10017 10018 cmd = (struct wmi_sta_keepalive_cmd *)skb->data; 10019 cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, 10020 WMI_TAG_STA_KEEPALIVE_CMD) | 10021 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 10022 cmd->vdev_id = arg->vdev_id; 10023 cmd->enabled = arg->enabled; 10024 cmd->interval = arg->interval; 10025 cmd->method = arg->method; 10026 10027 arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1); 10028 arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, 10029 WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) | 10030 FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); 10031 10032 if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || 10033 arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { 10034 arp->src_ip4_addr = arg->src_ip4_addr; 10035 arp->dest_ip4_addr = arg->dest_ip4_addr; 10036 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); 10037 } 10038 10039 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 10040 "sta keepalive vdev %d enabled %d method %d interval %d\n", 10041 arg->vdev_id, arg->enabled, arg->method, arg->interval); 10042 10043 return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); 10044 } 10045 10046 bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar) 10047 { 10048 return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 10049 ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; 10050 } 10051