xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 0cf6d425d39cfc1b676fbf9dea36ecd68eeb27ee)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debugfs.h"
19 #include "debug.h"
20 #include "mac.h"
21 #include "hw.h"
22 #include "peer.h"
23 #include "p2p.h"
24 #include "testmode.h"
25 
26 struct ath12k_wmi_svc_ready_parse {
27 	bool wmi_svc_bitmap_done;
28 };
29 
30 struct wmi_tlv_fw_stats_parse {
31 	const struct wmi_stats_event *ev;
32 	struct ath12k_fw_stats *stats;
33 };
34 
35 struct ath12k_wmi_dma_ring_caps_parse {
36 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
37 	u32 n_dma_ring_caps;
38 };
39 
40 struct ath12k_wmi_service_ext_arg {
41 	u32 default_conc_scan_config_bits;
42 	u32 default_fw_config_bits;
43 	struct ath12k_wmi_ppe_threshold_arg ppet;
44 	u32 he_cap_info;
45 	u32 mpdu_density;
46 	u32 max_bssid_rx_filters;
47 	u32 num_hw_modes;
48 	u32 num_phy;
49 };
50 
51 struct ath12k_wmi_svc_rdy_ext_parse {
52 	struct ath12k_wmi_service_ext_arg arg;
53 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
54 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
55 	u32 n_hw_mode_caps;
56 	u32 tot_phy_id;
57 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
58 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
59 	u32 n_mac_phy_caps;
60 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
61 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
62 	u32 n_ext_hal_reg_caps;
63 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
64 	bool hw_mode_done;
65 	bool mac_phy_done;
66 	bool ext_hal_reg_done;
67 	bool mac_phy_chainmask_combo_done;
68 	bool mac_phy_chainmask_cap_done;
69 	bool oem_dma_ring_cap_done;
70 	bool dma_ring_cap_done;
71 };
72 
73 struct ath12k_wmi_svc_rdy_ext2_arg {
74 	u32 reg_db_version;
75 	u32 hw_min_max_tx_power_2ghz;
76 	u32 hw_min_max_tx_power_5ghz;
77 	u32 chwidth_num_peer_caps;
78 	u32 preamble_puncture_bw;
79 	u32 max_user_per_ppdu_ofdma;
80 	u32 max_user_per_ppdu_mumimo;
81 	u32 target_cap_flags;
82 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
83 	u32 max_num_linkview_peers;
84 	u32 max_num_msduq_supported_per_tid;
85 	u32 default_num_msduq_supported_per_tid;
86 };
87 
88 struct ath12k_wmi_svc_rdy_ext2_parse {
89 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
90 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
91 	bool dma_ring_cap_done;
92 	bool spectral_bin_scaling_done;
93 	bool mac_phy_caps_ext_done;
94 };
95 
96 struct ath12k_wmi_rdy_parse {
97 	u32 num_extra_mac_addr;
98 };
99 
100 struct ath12k_wmi_dma_buf_release_arg {
101 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
102 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
103 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
104 	u32 num_buf_entry;
105 	u32 num_meta;
106 	bool buf_entry_done;
107 	bool meta_data_done;
108 };
109 
110 struct ath12k_wmi_tlv_policy {
111 	size_t min_len;
112 };
113 
114 struct wmi_tlv_mgmt_rx_parse {
115 	const struct ath12k_wmi_mgmt_rx_params *fixed;
116 	const u8 *frame_buf;
117 	bool frame_buf_done;
118 };
119 
120 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
121 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
122 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
123 	[WMI_TAG_SERVICE_READY_EVENT] = {
124 		.min_len = sizeof(struct wmi_service_ready_event) },
125 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
126 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
127 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
128 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
129 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
130 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
131 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
132 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
133 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
134 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
135 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
136 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
137 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
138 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
139 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
140 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
141 	[WMI_TAG_MGMT_RX_HDR] = {
142 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
143 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
144 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
145 	[WMI_TAG_SCAN_EVENT] = {
146 		.min_len = sizeof(struct wmi_scan_event) },
147 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
148 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
149 	[WMI_TAG_ROAM_EVENT] = {
150 		.min_len = sizeof(struct wmi_roam_event) },
151 	[WMI_TAG_CHAN_INFO_EVENT] = {
152 		.min_len = sizeof(struct wmi_chan_info_event) },
153 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
154 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
155 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
156 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
157 	[WMI_TAG_READY_EVENT] = {
158 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
159 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
160 		.min_len = sizeof(struct wmi_service_available_event) },
161 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
162 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
163 	[WMI_TAG_RFKILL_EVENT] = {
164 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
165 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
166 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
167 	[WMI_TAG_HOST_SWFDA_EVENT] = {
168 		.min_len = sizeof(struct wmi_fils_discovery_event) },
169 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
170 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
171 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
172 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
173 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
174 		.min_len = sizeof(struct wmi_twt_enable_event) },
175 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
176 		.min_len = sizeof(struct wmi_twt_disable_event) },
177 	[WMI_TAG_P2P_NOA_INFO] = {
178 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
179 	[WMI_TAG_P2P_NOA_EVENT] = {
180 		.min_len = sizeof(struct wmi_p2p_noa_event) },
181 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
182 		.min_len = sizeof(struct wmi_11d_new_cc_event) },
183 };
184 
185 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
186 {
187 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
188 		le32_encode_bits(len, WMI_TLV_LEN);
189 }
190 
191 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
192 {
193 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
194 }
195 
196 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
197 			     struct ath12k_wmi_resource_config_arg *config)
198 {
199 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
200 	config->num_peers = ab->num_radios *
201 		ath12k_core_get_max_peers_per_radio(ab);
202 	config->num_tids = ath12k_core_get_max_num_tids(ab);
203 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
204 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
205 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
206 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
207 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
208 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
209 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
210 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
211 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
212 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
213 
214 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
215 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
216 	else
217 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
218 
219 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
220 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
221 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
222 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
223 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
224 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
225 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
226 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
227 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
228 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
229 	config->rx_skip_defrag_timeout_dup_detection_check =
230 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
231 	config->vow_config = TARGET_VOW_CONFIG;
232 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
233 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
234 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
235 	config->rx_batchmode = TARGET_RX_BATCHMODE;
236 	/* Indicates host supports peer map v3 and unmap v2 support */
237 	config->peer_map_unmap_version = 0x32;
238 	config->twt_ap_pdev_count = ab->num_radios;
239 	config->twt_ap_sta_count = 1000;
240 	config->ema_max_vap_cnt = ab->num_radios;
241 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
242 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
243 
244 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
245 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
246 }
247 
248 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
249 			     struct ath12k_wmi_resource_config_arg *config)
250 {
251 	config->num_vdevs = 4;
252 	config->num_peers = 16;
253 	config->num_tids = 32;
254 
255 	config->num_offload_peers = 3;
256 	config->num_offload_reorder_buffs = 3;
257 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
258 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
259 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
260 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
261 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
262 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
263 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
264 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
265 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
266 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
267 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
268 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
269 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
270 	config->num_mcast_groups = 0;
271 	config->num_mcast_table_elems = 0;
272 	config->mcast2ucast_mode = 0;
273 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
274 	config->num_wds_entries = 0;
275 	config->dma_burst_size = 0;
276 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
277 	config->vow_config = TARGET_VOW_CONFIG;
278 	config->gtk_offload_max_vdev = 2;
279 	config->num_msdu_desc = 0x400;
280 	config->beacon_tx_offload_max_vdev = 2;
281 	config->rx_batchmode = TARGET_RX_BATCHMODE;
282 
283 	config->peer_map_unmap_version = 0x1;
284 	config->use_pdev_id = 1;
285 	config->max_frag_entries = 0xa;
286 	config->num_tdls_vdevs = 0x1;
287 	config->num_tdls_conn_table_entries = 8;
288 	config->beacon_tx_offload_max_vdev = 0x2;
289 	config->num_multicast_filter_entries = 0x20;
290 	config->num_wow_filters = 0x16;
291 	config->num_keep_alive_pattern = 0;
292 }
293 
294 #define PRIMAP(_hw_mode_) \
295 	[_hw_mode_] = _hw_mode_##_PRI
296 
297 static const int ath12k_hw_mode_pri_map[] = {
298 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
299 	PRIMAP(WMI_HOST_HW_MODE_DBS),
300 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
301 	PRIMAP(WMI_HOST_HW_MODE_SBS),
302 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
303 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
304 	/* keep last */
305 	PRIMAP(WMI_HOST_HW_MODE_MAX),
306 };
307 
308 static int
309 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
310 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
311 				const void *ptr, void *data),
312 		    void *data)
313 {
314 	const void *begin = ptr;
315 	const struct wmi_tlv *tlv;
316 	u16 tlv_tag, tlv_len;
317 	int ret;
318 
319 	while (len > 0) {
320 		if (len < sizeof(*tlv)) {
321 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
322 				   ptr - begin, len, sizeof(*tlv));
323 			return -EINVAL;
324 		}
325 
326 		tlv = ptr;
327 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
328 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
329 		ptr += sizeof(*tlv);
330 		len -= sizeof(*tlv);
331 
332 		if (tlv_len > len) {
333 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
334 				   tlv_tag, ptr - begin, len, tlv_len);
335 			return -EINVAL;
336 		}
337 
338 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
339 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
340 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
341 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
342 				   tlv_tag, ptr - begin, tlv_len,
343 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
344 			return -EINVAL;
345 		}
346 
347 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
348 		if (ret)
349 			return ret;
350 
351 		ptr += tlv_len;
352 		len -= tlv_len;
353 	}
354 
355 	return 0;
356 }
357 
358 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
359 				     const void *ptr, void *data)
360 {
361 	const void **tb = data;
362 
363 	if (tag < WMI_TAG_MAX)
364 		tb[tag] = ptr;
365 
366 	return 0;
367 }
368 
369 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
370 				const void *ptr, size_t len)
371 {
372 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
373 				   (void *)tb);
374 }
375 
376 static const void **
377 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
378 			   struct sk_buff *skb, gfp_t gfp)
379 {
380 	const void **tb;
381 	int ret;
382 
383 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
384 	if (!tb)
385 		return ERR_PTR(-ENOMEM);
386 
387 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
388 	if (ret) {
389 		kfree(tb);
390 		return ERR_PTR(ret);
391 	}
392 
393 	return tb;
394 }
395 
396 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
397 				      u32 cmd_id)
398 {
399 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
400 	struct ath12k_base *ab = wmi->wmi_ab->ab;
401 	struct wmi_cmd_hdr *cmd_hdr;
402 	int ret;
403 
404 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
405 		return -ENOMEM;
406 
407 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
408 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
409 
410 	memset(skb_cb, 0, sizeof(*skb_cb));
411 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
412 
413 	if (ret)
414 		goto err_pull;
415 
416 	return 0;
417 
418 err_pull:
419 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
420 	return ret;
421 }
422 
423 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
424 			u32 cmd_id)
425 {
426 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
427 	int ret = -EOPNOTSUPP;
428 
429 	might_sleep();
430 
431 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
432 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
433 
434 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
435 			ret = -ESHUTDOWN;
436 
437 		(ret != -EAGAIN);
438 	}), WMI_SEND_TIMEOUT_HZ);
439 
440 	if (ret == -EAGAIN)
441 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
442 
443 	return ret;
444 }
445 
446 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
447 				     const void *ptr,
448 				     struct ath12k_wmi_service_ext_arg *arg)
449 {
450 	const struct wmi_service_ready_ext_event *ev = ptr;
451 	int i;
452 
453 	if (!ev)
454 		return -EINVAL;
455 
456 	/* Move this to host based bitmap */
457 	arg->default_conc_scan_config_bits =
458 		le32_to_cpu(ev->default_conc_scan_config_bits);
459 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
460 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
461 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
462 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
463 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
464 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
465 
466 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
467 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
468 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
469 
470 	return 0;
471 }
472 
473 static int
474 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
475 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
476 				      u8 hw_mode_id, u8 phy_id,
477 				      struct ath12k_pdev *pdev)
478 {
479 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
480 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
481 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
482 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
483 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
484 	struct ath12k_band_cap *cap_band;
485 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
486 	struct ath12k_fw_pdev *fw_pdev;
487 	u32 phy_map;
488 	u32 hw_idx, phy_idx = 0;
489 	int i;
490 
491 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
492 		return -EINVAL;
493 
494 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
495 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
496 			break;
497 
498 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
499 		phy_idx = fls(phy_map);
500 	}
501 
502 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
503 		return -EINVAL;
504 
505 	phy_idx += phy_id;
506 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
507 		return -EINVAL;
508 
509 	mac_caps = wmi_mac_phy_caps + phy_idx;
510 
511 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
512 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
513 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
514 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
515 
516 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
517 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
518 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
519 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
520 	ab->fw_pdev_count++;
521 
522 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
523 	 * band to band for a single radio, need to see how this should be
524 	 * handled.
525 	 */
526 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
527 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
528 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
529 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
530 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
531 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
532 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
533 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
534 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
535 	} else {
536 		return -EINVAL;
537 	}
538 
539 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
540 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
541 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
542 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
543 	 * will be advertised for second mac or vice-versa. Compute the shift value
544 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
545 	 * mac80211.
546 	 */
547 	pdev_cap->tx_chain_mask_shift =
548 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
549 	pdev_cap->rx_chain_mask_shift =
550 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
551 
552 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
553 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
554 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
555 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
556 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
557 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
558 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
559 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
560 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
561 			cap_band->he_cap_phy_info[i] =
562 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
563 
564 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
565 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
566 
567 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
568 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
569 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
570 	}
571 
572 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
573 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
574 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
575 		cap_band->max_bw_supported =
576 			le32_to_cpu(mac_caps->max_bw_supported_5g);
577 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
578 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
579 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
580 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
581 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
582 			cap_band->he_cap_phy_info[i] =
583 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
584 
585 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
586 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
587 
588 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
589 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
590 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
591 
592 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
593 		cap_band->max_bw_supported =
594 			le32_to_cpu(mac_caps->max_bw_supported_5g);
595 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
596 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
597 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
598 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
599 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
600 			cap_band->he_cap_phy_info[i] =
601 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
602 
603 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
604 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
605 
606 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
607 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
608 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
609 	}
610 
611 	return 0;
612 }
613 
614 static int
615 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
616 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
617 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
618 				u8 phy_idx,
619 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
620 {
621 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
622 
623 	if (!reg_caps || !ext_caps)
624 		return -EINVAL;
625 
626 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
627 		return -EINVAL;
628 
629 	ext_reg_cap = &ext_caps[phy_idx];
630 
631 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
632 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
633 	param->eeprom_reg_domain_ext =
634 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
635 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
636 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
637 	/* check if param->wireless_mode is needed */
638 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
639 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
640 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
641 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
642 
643 	return 0;
644 }
645 
646 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
647 					 const void *evt_buf,
648 					 struct ath12k_wmi_target_cap_arg *cap)
649 {
650 	const struct wmi_service_ready_event *ev = evt_buf;
651 
652 	if (!ev) {
653 		ath12k_err(ab, "%s: failed by NULL param\n",
654 			   __func__);
655 		return -EINVAL;
656 	}
657 
658 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
659 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
660 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
661 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
662 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
663 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
664 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
665 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
666 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
667 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
668 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
669 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
670 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
671 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
672 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
673 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
674 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
675 
676 	return 0;
677 }
678 
679 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
680  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
681  * 4-byte word.
682  */
683 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
684 					   const u32 *wmi_svc_bm)
685 {
686 	int i, j;
687 
688 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
689 		do {
690 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
691 				set_bit(j, wmi->wmi_ab->svc_map);
692 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
693 	}
694 }
695 
696 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
697 				    const void *ptr, void *data)
698 {
699 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
700 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
701 	u16 expect_len;
702 
703 	switch (tag) {
704 	case WMI_TAG_SERVICE_READY_EVENT:
705 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
706 			return -EINVAL;
707 		break;
708 
709 	case WMI_TAG_ARRAY_UINT32:
710 		if (!svc_ready->wmi_svc_bitmap_done) {
711 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
712 			if (len < expect_len) {
713 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
714 					    len, tag);
715 				return -EINVAL;
716 			}
717 
718 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
719 
720 			svc_ready->wmi_svc_bitmap_done = true;
721 		}
722 		break;
723 	default:
724 		break;
725 	}
726 
727 	return 0;
728 }
729 
730 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
731 {
732 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
733 	int ret;
734 
735 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
736 				  ath12k_wmi_svc_rdy_parse,
737 				  &svc_ready);
738 	if (ret) {
739 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
740 		return ret;
741 	}
742 
743 	return 0;
744 }
745 
746 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
747 				    struct ieee80211_tx_info *info)
748 {
749 	struct ath12k_base *ab = ar->ab;
750 	u32 freq = 0;
751 
752 	if (ab->hw_params->single_pdev_only &&
753 	    ar->scan.is_roc &&
754 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
755 		freq = ar->scan.roc_freq;
756 
757 	return freq;
758 }
759 
760 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
761 {
762 	struct sk_buff *skb;
763 	struct ath12k_base *ab = wmi_ab->ab;
764 	u32 round_len = roundup(len, 4);
765 
766 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
767 	if (!skb)
768 		return NULL;
769 
770 	skb_reserve(skb, WMI_SKB_HEADROOM);
771 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
772 		ath12k_warn(ab, "unaligned WMI skb data\n");
773 
774 	skb_put(skb, round_len);
775 	memset(skb->data, 0, round_len);
776 
777 	return skb;
778 }
779 
780 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
781 			 struct sk_buff *frame)
782 {
783 	struct ath12k_wmi_pdev *wmi = ar->wmi;
784 	struct wmi_mgmt_send_cmd *cmd;
785 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
786 	struct wmi_tlv *frame_tlv;
787 	struct sk_buff *skb;
788 	u32 buf_len;
789 	int ret, len;
790 
791 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
792 
793 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
794 
795 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
796 	if (!skb)
797 		return -ENOMEM;
798 
799 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
800 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
801 						 sizeof(*cmd));
802 	cmd->vdev_id = cpu_to_le32(vdev_id);
803 	cmd->desc_id = cpu_to_le32(buf_id);
804 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
805 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
806 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
807 	cmd->frame_len = cpu_to_le32(frame->len);
808 	cmd->buf_len = cpu_to_le32(buf_len);
809 	cmd->tx_params_valid = 0;
810 
811 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
812 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
813 
814 	memcpy(frame_tlv->value, frame->data, buf_len);
815 
816 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
817 	if (ret) {
818 		ath12k_warn(ar->ab,
819 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
820 		dev_kfree_skb(skb);
821 	}
822 
823 	return ret;
824 }
825 
826 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
827 				      u32 vdev_id, u32 pdev_id)
828 {
829 	struct ath12k_wmi_pdev *wmi = ar->wmi;
830 	struct wmi_request_stats_cmd *cmd;
831 	struct sk_buff *skb;
832 	int ret;
833 
834 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
835 	if (!skb)
836 		return -ENOMEM;
837 
838 	cmd = (struct wmi_request_stats_cmd *)skb->data;
839 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
840 						 sizeof(*cmd));
841 
842 	cmd->stats_id = cpu_to_le32(stats_id);
843 	cmd->vdev_id = cpu_to_le32(vdev_id);
844 	cmd->pdev_id = cpu_to_le32(pdev_id);
845 
846 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
847 	if (ret) {
848 		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
849 		dev_kfree_skb(skb);
850 	}
851 
852 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
853 		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
854 		   stats_id, vdev_id, pdev_id);
855 
856 	return ret;
857 }
858 
859 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
860 			   struct ath12k_wmi_vdev_create_arg *args)
861 {
862 	struct ath12k_wmi_pdev *wmi = ar->wmi;
863 	struct wmi_vdev_create_cmd *cmd;
864 	struct sk_buff *skb;
865 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
866 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
867 	struct wmi_vdev_create_mlo_params *ml_params;
868 	struct wmi_tlv *tlv;
869 	int ret, len;
870 	void *ptr;
871 
872 	/* It can be optimized my sending tx/rx chain configuration
873 	 * only for supported bands instead of always sending it for
874 	 * both the bands.
875 	 */
876 	len = sizeof(*cmd) + TLV_HDR_SIZE +
877 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
878 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
879 
880 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
881 	if (!skb)
882 		return -ENOMEM;
883 
884 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
885 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
886 						 sizeof(*cmd));
887 
888 	cmd->vdev_id = cpu_to_le32(args->if_id);
889 	cmd->vdev_type = cpu_to_le32(args->type);
890 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
891 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
892 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
893 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
894 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
895 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
896 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
897 
898 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
899 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
900 
901 	ptr = skb->data + sizeof(*cmd);
902 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
903 
904 	tlv = ptr;
905 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
906 
907 	ptr += TLV_HDR_SIZE;
908 	txrx_streams = ptr;
909 	len = sizeof(*txrx_streams);
910 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
911 							  len);
912 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
913 	txrx_streams->supported_tx_streams =
914 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
915 	txrx_streams->supported_rx_streams =
916 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
917 
918 	txrx_streams++;
919 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
920 							  len);
921 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
922 	txrx_streams->supported_tx_streams =
923 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
924 	txrx_streams->supported_rx_streams =
925 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
926 
927 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
928 
929 	if (is_ml_vdev) {
930 		tlv = ptr;
931 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
932 						 sizeof(*ml_params));
933 		ptr += TLV_HDR_SIZE;
934 		ml_params = ptr;
935 
936 		ml_params->tlv_header =
937 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
938 					       sizeof(*ml_params));
939 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
940 	}
941 
942 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
943 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
944 		   args->if_id, args->type, args->subtype,
945 		   macaddr, args->pdev_id);
946 
947 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
948 	if (ret) {
949 		ath12k_warn(ar->ab,
950 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
951 		dev_kfree_skb(skb);
952 	}
953 
954 	return ret;
955 }
956 
957 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
958 {
959 	struct ath12k_wmi_pdev *wmi = ar->wmi;
960 	struct wmi_vdev_delete_cmd *cmd;
961 	struct sk_buff *skb;
962 	int ret;
963 
964 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
965 	if (!skb)
966 		return -ENOMEM;
967 
968 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
969 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
970 						 sizeof(*cmd));
971 	cmd->vdev_id = cpu_to_le32(vdev_id);
972 
973 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
974 
975 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
976 	if (ret) {
977 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
978 		dev_kfree_skb(skb);
979 	}
980 
981 	return ret;
982 }
983 
984 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
985 {
986 	struct ath12k_wmi_pdev *wmi = ar->wmi;
987 	struct wmi_vdev_stop_cmd *cmd;
988 	struct sk_buff *skb;
989 	int ret;
990 
991 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
992 	if (!skb)
993 		return -ENOMEM;
994 
995 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
996 
997 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
998 						 sizeof(*cmd));
999 	cmd->vdev_id = cpu_to_le32(vdev_id);
1000 
1001 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
1002 
1003 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
1004 	if (ret) {
1005 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
1006 		dev_kfree_skb(skb);
1007 	}
1008 
1009 	return ret;
1010 }
1011 
1012 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
1013 {
1014 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1015 	struct wmi_vdev_down_cmd *cmd;
1016 	struct sk_buff *skb;
1017 	int ret;
1018 
1019 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1020 	if (!skb)
1021 		return -ENOMEM;
1022 
1023 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1024 
1025 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
1026 						 sizeof(*cmd));
1027 	cmd->vdev_id = cpu_to_le32(vdev_id);
1028 
1029 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
1030 
1031 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
1032 	if (ret) {
1033 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
1034 		dev_kfree_skb(skb);
1035 	}
1036 
1037 	return ret;
1038 }
1039 
1040 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
1041 				       struct wmi_vdev_start_req_arg *arg)
1042 {
1043 	u32 center_freq1 = arg->band_center_freq1;
1044 
1045 	memset(chan, 0, sizeof(*chan));
1046 
1047 	chan->mhz = cpu_to_le32(arg->freq);
1048 	chan->band_center_freq1 = cpu_to_le32(center_freq1);
1049 	if (arg->mode == MODE_11BE_EHT320) {
1050 		if (arg->freq > center_freq1)
1051 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
1052 		else
1053 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
1054 
1055 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1056 
1057 	} else if (arg->mode == MODE_11BE_EHT160) {
1058 		if (arg->freq > center_freq1)
1059 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
1060 		else
1061 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
1062 
1063 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1064 	} else if (arg->mode == MODE_11BE_EHT80_80) {
1065 		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
1066 	} else {
1067 		chan->band_center_freq2 = 0;
1068 	}
1069 
1070 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1071 	if (arg->passive)
1072 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1073 	if (arg->allow_ibss)
1074 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1075 	if (arg->allow_ht)
1076 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1077 	if (arg->allow_vht)
1078 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1079 	if (arg->allow_he)
1080 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1081 	if (arg->ht40plus)
1082 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1083 	if (arg->chan_radar)
1084 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1085 	if (arg->freq2_radar)
1086 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1087 
1088 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1089 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1090 		le32_encode_bits(arg->max_reg_power,
1091 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1092 
1093 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1094 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1095 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1096 }
1097 
1098 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1099 			  bool restart)
1100 {
1101 	struct wmi_vdev_start_mlo_params *ml_params;
1102 	struct wmi_partner_link_info *partner_info;
1103 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1104 	struct wmi_vdev_start_request_cmd *cmd;
1105 	struct sk_buff *skb;
1106 	struct ath12k_wmi_channel_params *chan;
1107 	struct wmi_tlv *tlv;
1108 	void *ptr;
1109 	int ret, len, i, ml_arg_size = 0;
1110 
1111 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1112 		return -EINVAL;
1113 
1114 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1115 
1116 	if (!restart && arg->ml.enabled) {
1117 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1118 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1119 					      sizeof(*partner_info));
1120 		len += ml_arg_size;
1121 	}
1122 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1123 	if (!skb)
1124 		return -ENOMEM;
1125 
1126 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1127 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1128 						 sizeof(*cmd));
1129 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1130 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1131 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1132 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1133 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1134 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1135 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1136 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1137 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1138 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1139 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1140 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1141 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1142 
1143 	if (!restart) {
1144 		if (arg->ssid) {
1145 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1146 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1147 		}
1148 		if (arg->hidden_ssid)
1149 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1150 		if (arg->pmf_enabled)
1151 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1152 	}
1153 
1154 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1155 
1156 	ptr = skb->data + sizeof(*cmd);
1157 	chan = ptr;
1158 
1159 	ath12k_wmi_put_wmi_channel(chan, arg);
1160 
1161 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1162 						  sizeof(*chan));
1163 	ptr += sizeof(*chan);
1164 
1165 	tlv = ptr;
1166 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1167 
1168 	/* Note: This is a nested TLV containing:
1169 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1170 	 */
1171 
1172 	ptr += sizeof(*tlv);
1173 
1174 	if (ml_arg_size) {
1175 		tlv = ptr;
1176 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1177 						 sizeof(*ml_params));
1178 		ptr += TLV_HDR_SIZE;
1179 
1180 		ml_params = ptr;
1181 
1182 		ml_params->tlv_header =
1183 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1184 					       sizeof(*ml_params));
1185 
1186 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1187 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1188 				   le32_encode_bits(arg->ml.assoc_link,
1189 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1190 				   le32_encode_bits(arg->ml.mcast_link,
1191 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1192 				   le32_encode_bits(arg->ml.link_add,
1193 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1194 
1195 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1196 			   arg->vdev_id, ml_params->flags);
1197 
1198 		ptr += sizeof(*ml_params);
1199 
1200 		tlv = ptr;
1201 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1202 						 arg->ml.num_partner_links *
1203 						 sizeof(*partner_info));
1204 		ptr += TLV_HDR_SIZE;
1205 
1206 		partner_info = ptr;
1207 
1208 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1209 			partner_info->tlv_header =
1210 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1211 						       sizeof(*partner_info));
1212 			partner_info->vdev_id =
1213 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1214 			partner_info->hw_link_id =
1215 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1216 			ether_addr_copy(partner_info->vdev_addr.addr,
1217 					arg->ml.partner_info[i].addr);
1218 
1219 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1220 				   partner_info->vdev_id, partner_info->hw_link_id,
1221 				   partner_info->vdev_addr.addr);
1222 
1223 			partner_info++;
1224 		}
1225 
1226 		ptr = partner_info;
1227 	}
1228 
1229 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1230 		   restart ? "restart" : "start", arg->vdev_id,
1231 		   arg->freq, arg->mode);
1232 
1233 	if (restart)
1234 		ret = ath12k_wmi_cmd_send(wmi, skb,
1235 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1236 	else
1237 		ret = ath12k_wmi_cmd_send(wmi, skb,
1238 					  WMI_VDEV_START_REQUEST_CMDID);
1239 	if (ret) {
1240 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1241 			    restart ? "restart" : "start");
1242 		dev_kfree_skb(skb);
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1249 {
1250 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1251 	struct wmi_vdev_up_cmd *cmd;
1252 	struct sk_buff *skb;
1253 	int ret;
1254 
1255 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1256 	if (!skb)
1257 		return -ENOMEM;
1258 
1259 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1260 
1261 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1262 						 sizeof(*cmd));
1263 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1264 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1265 
1266 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1267 
1268 	if (params->tx_bssid) {
1269 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1270 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1271 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1272 	}
1273 
1274 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1275 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1276 		   params->vdev_id, params->aid, params->bssid);
1277 
1278 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1279 	if (ret) {
1280 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1281 		dev_kfree_skb(skb);
1282 	}
1283 
1284 	return ret;
1285 }
1286 
1287 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1288 				    struct ath12k_wmi_peer_create_arg *arg)
1289 {
1290 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1291 	struct wmi_peer_create_cmd *cmd;
1292 	struct sk_buff *skb;
1293 	int ret, len;
1294 	struct wmi_peer_create_mlo_params *ml_param;
1295 	void *ptr;
1296 	struct wmi_tlv *tlv;
1297 
1298 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1299 
1300 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1301 	if (!skb)
1302 		return -ENOMEM;
1303 
1304 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1305 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1306 						 sizeof(*cmd));
1307 
1308 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1309 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1310 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1311 
1312 	ptr = skb->data + sizeof(*cmd);
1313 	tlv = ptr;
1314 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1315 					 sizeof(*ml_param));
1316 	ptr += TLV_HDR_SIZE;
1317 	ml_param = ptr;
1318 	ml_param->tlv_header =
1319 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1320 					       sizeof(*ml_param));
1321 	if (arg->ml_enabled)
1322 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1323 
1324 	ptr += sizeof(*ml_param);
1325 
1326 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1327 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1328 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1329 
1330 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1331 	if (ret) {
1332 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1333 		dev_kfree_skb(skb);
1334 	}
1335 
1336 	return ret;
1337 }
1338 
1339 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1340 				    const u8 *peer_addr, u8 vdev_id)
1341 {
1342 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1343 	struct wmi_peer_delete_cmd *cmd;
1344 	struct sk_buff *skb;
1345 	int ret;
1346 
1347 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1348 	if (!skb)
1349 		return -ENOMEM;
1350 
1351 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1352 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1353 						 sizeof(*cmd));
1354 
1355 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1356 	cmd->vdev_id = cpu_to_le32(vdev_id);
1357 
1358 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1359 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1360 		   vdev_id,  peer_addr);
1361 
1362 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1363 	if (ret) {
1364 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1365 		dev_kfree_skb(skb);
1366 	}
1367 
1368 	return ret;
1369 }
1370 
1371 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1372 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1373 {
1374 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1375 	struct wmi_pdev_set_regdomain_cmd *cmd;
1376 	struct sk_buff *skb;
1377 	int ret;
1378 
1379 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1380 	if (!skb)
1381 		return -ENOMEM;
1382 
1383 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1384 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1385 						 sizeof(*cmd));
1386 
1387 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1388 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1389 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1390 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1391 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1392 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1393 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1394 
1395 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1396 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1397 		   arg->current_rd_in_use, arg->current_rd_2g,
1398 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1399 
1400 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1401 	if (ret) {
1402 		ath12k_warn(ar->ab,
1403 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1404 		dev_kfree_skb(skb);
1405 	}
1406 
1407 	return ret;
1408 }
1409 
1410 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1411 			      u32 vdev_id, u32 param_id, u32 param_val)
1412 {
1413 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1414 	struct wmi_peer_set_param_cmd *cmd;
1415 	struct sk_buff *skb;
1416 	int ret;
1417 
1418 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1419 	if (!skb)
1420 		return -ENOMEM;
1421 
1422 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1423 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1424 						 sizeof(*cmd));
1425 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1426 	cmd->vdev_id = cpu_to_le32(vdev_id);
1427 	cmd->param_id = cpu_to_le32(param_id);
1428 	cmd->param_value = cpu_to_le32(param_val);
1429 
1430 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1431 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1432 		   vdev_id, peer_addr, param_id, param_val);
1433 
1434 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1435 	if (ret) {
1436 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1437 		dev_kfree_skb(skb);
1438 	}
1439 
1440 	return ret;
1441 }
1442 
1443 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1444 					u8 peer_addr[ETH_ALEN],
1445 					u32 peer_tid_bitmap,
1446 					u8 vdev_id)
1447 {
1448 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1449 	struct wmi_peer_flush_tids_cmd *cmd;
1450 	struct sk_buff *skb;
1451 	int ret;
1452 
1453 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1454 	if (!skb)
1455 		return -ENOMEM;
1456 
1457 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1458 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1459 						 sizeof(*cmd));
1460 
1461 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1462 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1463 	cmd->vdev_id = cpu_to_le32(vdev_id);
1464 
1465 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1466 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1467 		   vdev_id, peer_addr, peer_tid_bitmap);
1468 
1469 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1470 	if (ret) {
1471 		ath12k_warn(ar->ab,
1472 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1473 		dev_kfree_skb(skb);
1474 	}
1475 
1476 	return ret;
1477 }
1478 
1479 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1480 					   int vdev_id, const u8 *addr,
1481 					   dma_addr_t paddr, u8 tid,
1482 					   u8 ba_window_size_valid,
1483 					   u32 ba_window_size)
1484 {
1485 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1486 	struct sk_buff *skb;
1487 	int ret;
1488 
1489 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1490 	if (!skb)
1491 		return -ENOMEM;
1492 
1493 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1494 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1495 						 sizeof(*cmd));
1496 
1497 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1498 	cmd->vdev_id = cpu_to_le32(vdev_id);
1499 	cmd->tid = cpu_to_le32(tid);
1500 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1501 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1502 	cmd->queue_no = cpu_to_le32(tid);
1503 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1504 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1505 
1506 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1507 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1508 		   addr, vdev_id, tid);
1509 
1510 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1511 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1512 	if (ret) {
1513 		ath12k_warn(ar->ab,
1514 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1515 		dev_kfree_skb(skb);
1516 	}
1517 
1518 	return ret;
1519 }
1520 
1521 int
1522 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1523 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1524 {
1525 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1526 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1527 	struct sk_buff *skb;
1528 	int ret;
1529 
1530 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1531 	if (!skb)
1532 		return -ENOMEM;
1533 
1534 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1535 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1536 						 sizeof(*cmd));
1537 
1538 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1539 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1540 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1541 
1542 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1543 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1544 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1545 
1546 	ret = ath12k_wmi_cmd_send(wmi, skb,
1547 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1548 	if (ret) {
1549 		ath12k_warn(ar->ab,
1550 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1551 		dev_kfree_skb(skb);
1552 	}
1553 
1554 	return ret;
1555 }
1556 
1557 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1558 			      u32 param_value, u8 pdev_id)
1559 {
1560 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1561 	struct wmi_pdev_set_param_cmd *cmd;
1562 	struct sk_buff *skb;
1563 	int ret;
1564 
1565 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1566 	if (!skb)
1567 		return -ENOMEM;
1568 
1569 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1570 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1571 						 sizeof(*cmd));
1572 	cmd->pdev_id = cpu_to_le32(pdev_id);
1573 	cmd->param_id = cpu_to_le32(param_id);
1574 	cmd->param_value = cpu_to_le32(param_value);
1575 
1576 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1577 		   "WMI pdev set param %d pdev id %d value %d\n",
1578 		   param_id, pdev_id, param_value);
1579 
1580 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1581 	if (ret) {
1582 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1583 		dev_kfree_skb(skb);
1584 	}
1585 
1586 	return ret;
1587 }
1588 
1589 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1590 {
1591 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1592 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1593 	struct sk_buff *skb;
1594 	int ret;
1595 
1596 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1597 	if (!skb)
1598 		return -ENOMEM;
1599 
1600 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1601 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1602 						 sizeof(*cmd));
1603 	cmd->vdev_id = cpu_to_le32(vdev_id);
1604 	cmd->sta_ps_mode = cpu_to_le32(enable);
1605 
1606 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1607 		   "WMI vdev set psmode %d vdev id %d\n",
1608 		   enable, vdev_id);
1609 
1610 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1611 	if (ret) {
1612 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1613 		dev_kfree_skb(skb);
1614 	}
1615 
1616 	return ret;
1617 }
1618 
1619 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1620 			    u32 pdev_id)
1621 {
1622 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1623 	struct wmi_pdev_suspend_cmd *cmd;
1624 	struct sk_buff *skb;
1625 	int ret;
1626 
1627 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1628 	if (!skb)
1629 		return -ENOMEM;
1630 
1631 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1632 
1633 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1634 						 sizeof(*cmd));
1635 
1636 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1637 	cmd->pdev_id = cpu_to_le32(pdev_id);
1638 
1639 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1640 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1641 
1642 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1643 	if (ret) {
1644 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1645 		dev_kfree_skb(skb);
1646 	}
1647 
1648 	return ret;
1649 }
1650 
1651 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1652 {
1653 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1654 	struct wmi_pdev_resume_cmd *cmd;
1655 	struct sk_buff *skb;
1656 	int ret;
1657 
1658 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1659 	if (!skb)
1660 		return -ENOMEM;
1661 
1662 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1663 
1664 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1665 						 sizeof(*cmd));
1666 	cmd->pdev_id = cpu_to_le32(pdev_id);
1667 
1668 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1669 		   "WMI pdev resume pdev id %d\n", pdev_id);
1670 
1671 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1672 	if (ret) {
1673 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1674 		dev_kfree_skb(skb);
1675 	}
1676 
1677 	return ret;
1678 }
1679 
1680 /* TODO FW Support for the cmd is not available yet.
1681  * Can be tested once the command and corresponding
1682  * event is implemented in FW
1683  */
1684 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1685 					  enum wmi_bss_chan_info_req_type type)
1686 {
1687 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1688 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1689 	struct sk_buff *skb;
1690 	int ret;
1691 
1692 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1693 	if (!skb)
1694 		return -ENOMEM;
1695 
1696 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1697 
1698 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1699 						 sizeof(*cmd));
1700 	cmd->req_type = cpu_to_le32(type);
1701 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1702 
1703 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1704 		   "WMI bss chan info req type %d\n", type);
1705 
1706 	ret = ath12k_wmi_cmd_send(wmi, skb,
1707 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1708 	if (ret) {
1709 		ath12k_warn(ar->ab,
1710 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1711 		dev_kfree_skb(skb);
1712 	}
1713 
1714 	return ret;
1715 }
1716 
1717 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1718 					struct ath12k_wmi_ap_ps_arg *arg)
1719 {
1720 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1721 	struct wmi_ap_ps_peer_cmd *cmd;
1722 	struct sk_buff *skb;
1723 	int ret;
1724 
1725 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1726 	if (!skb)
1727 		return -ENOMEM;
1728 
1729 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1730 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1731 						 sizeof(*cmd));
1732 
1733 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1734 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1735 	cmd->param = cpu_to_le32(arg->param);
1736 	cmd->value = cpu_to_le32(arg->value);
1737 
1738 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1739 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1740 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1741 
1742 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1743 	if (ret) {
1744 		ath12k_warn(ar->ab,
1745 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1746 		dev_kfree_skb(skb);
1747 	}
1748 
1749 	return ret;
1750 }
1751 
1752 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1753 				u32 param, u32 param_value)
1754 {
1755 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1756 	struct wmi_sta_powersave_param_cmd *cmd;
1757 	struct sk_buff *skb;
1758 	int ret;
1759 
1760 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1761 	if (!skb)
1762 		return -ENOMEM;
1763 
1764 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1765 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1766 						 sizeof(*cmd));
1767 
1768 	cmd->vdev_id = cpu_to_le32(vdev_id);
1769 	cmd->param = cpu_to_le32(param);
1770 	cmd->value = cpu_to_le32(param_value);
1771 
1772 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1773 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1774 		   vdev_id, param, param_value);
1775 
1776 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1777 	if (ret) {
1778 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1779 		dev_kfree_skb(skb);
1780 	}
1781 
1782 	return ret;
1783 }
1784 
1785 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1786 {
1787 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1788 	struct wmi_force_fw_hang_cmd *cmd;
1789 	struct sk_buff *skb;
1790 	int ret, len;
1791 
1792 	len = sizeof(*cmd);
1793 
1794 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1795 	if (!skb)
1796 		return -ENOMEM;
1797 
1798 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1799 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1800 						 len);
1801 
1802 	cmd->type = cpu_to_le32(type);
1803 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1804 
1805 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1806 
1807 	if (ret) {
1808 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1809 		dev_kfree_skb(skb);
1810 	}
1811 	return ret;
1812 }
1813 
1814 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1815 				  u32 param_id, u32 param_value)
1816 {
1817 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1818 	struct wmi_vdev_set_param_cmd *cmd;
1819 	struct sk_buff *skb;
1820 	int ret;
1821 
1822 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1823 	if (!skb)
1824 		return -ENOMEM;
1825 
1826 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1827 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1828 						 sizeof(*cmd));
1829 
1830 	cmd->vdev_id = cpu_to_le32(vdev_id);
1831 	cmd->param_id = cpu_to_le32(param_id);
1832 	cmd->param_value = cpu_to_le32(param_value);
1833 
1834 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1835 		   "WMI vdev id 0x%x set param %d value %d\n",
1836 		   vdev_id, param_id, param_value);
1837 
1838 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1839 	if (ret) {
1840 		ath12k_warn(ar->ab,
1841 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1842 		dev_kfree_skb(skb);
1843 	}
1844 
1845 	return ret;
1846 }
1847 
1848 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1849 {
1850 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1851 	struct wmi_get_pdev_temperature_cmd *cmd;
1852 	struct sk_buff *skb;
1853 	int ret;
1854 
1855 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1856 	if (!skb)
1857 		return -ENOMEM;
1858 
1859 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1860 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1861 						 sizeof(*cmd));
1862 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1863 
1864 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1865 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1866 
1867 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1868 	if (ret) {
1869 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1870 		dev_kfree_skb(skb);
1871 	}
1872 
1873 	return ret;
1874 }
1875 
1876 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1877 					    u32 vdev_id, u32 bcn_ctrl_op)
1878 {
1879 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1880 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1881 	struct sk_buff *skb;
1882 	int ret;
1883 
1884 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1885 	if (!skb)
1886 		return -ENOMEM;
1887 
1888 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1889 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1890 						 sizeof(*cmd));
1891 
1892 	cmd->vdev_id = cpu_to_le32(vdev_id);
1893 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1894 
1895 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1896 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1897 		   vdev_id, bcn_ctrl_op);
1898 
1899 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1900 	if (ret) {
1901 		ath12k_warn(ar->ab,
1902 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1903 		dev_kfree_skb(skb);
1904 	}
1905 
1906 	return ret;
1907 }
1908 
1909 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1910 			     const u8 *p2p_ie)
1911 {
1912 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1913 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1914 	size_t p2p_ie_len, aligned_len;
1915 	struct wmi_tlv *tlv;
1916 	struct sk_buff *skb;
1917 	void *ptr;
1918 	int ret, len;
1919 
1920 	p2p_ie_len = p2p_ie[1] + 2;
1921 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1922 
1923 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1924 
1925 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1926 	if (!skb)
1927 		return -ENOMEM;
1928 
1929 	ptr = skb->data;
1930 	cmd = ptr;
1931 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1932 						 sizeof(*cmd));
1933 	cmd->vdev_id = cpu_to_le32(vdev_id);
1934 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1935 
1936 	ptr += sizeof(*cmd);
1937 	tlv = ptr;
1938 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1939 					     aligned_len);
1940 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1941 
1942 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1943 	if (ret) {
1944 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1945 		dev_kfree_skb(skb);
1946 	}
1947 
1948 	return ret;
1949 }
1950 
1951 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
1952 			struct ieee80211_mutable_offsets *offs,
1953 			struct sk_buff *bcn,
1954 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1955 {
1956 	struct ath12k *ar = arvif->ar;
1957 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1958 	struct ath12k_base *ab = ar->ab;
1959 	struct wmi_bcn_tmpl_cmd *cmd;
1960 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1961 	struct ath12k_vif *ahvif = arvif->ahvif;
1962 	struct ieee80211_bss_conf *conf;
1963 	u32 vdev_id = arvif->vdev_id;
1964 	struct wmi_tlv *tlv;
1965 	struct sk_buff *skb;
1966 	u32 ema_params = 0;
1967 	void *ptr;
1968 	int ret, len;
1969 	size_t aligned_len = roundup(bcn->len, 4);
1970 
1971 	conf = ath12k_mac_get_link_bss_conf(arvif);
1972 	if (!conf) {
1973 		ath12k_warn(ab,
1974 			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
1975 			    ahvif->vif->addr, arvif->link_id);
1976 		return -EINVAL;
1977 	}
1978 
1979 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1980 
1981 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1982 	if (!skb)
1983 		return -ENOMEM;
1984 
1985 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1986 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1987 						 sizeof(*cmd));
1988 	cmd->vdev_id = cpu_to_le32(vdev_id);
1989 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1990 
1991 	if (conf->csa_active) {
1992 		cmd->csa_switch_count_offset =
1993 				cpu_to_le32(offs->cntdwn_counter_offs[0]);
1994 		cmd->ext_csa_switch_count_offset =
1995 				cpu_to_le32(offs->cntdwn_counter_offs[1]);
1996 		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
1997 		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
1998 	}
1999 
2000 	cmd->buf_len = cpu_to_le32(bcn->len);
2001 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
2002 	if (ema_args) {
2003 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
2004 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
2005 		if (ema_args->bcn_index == 0)
2006 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
2007 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
2008 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
2009 		cmd->ema_params = cpu_to_le32(ema_params);
2010 	}
2011 
2012 	ptr = skb->data + sizeof(*cmd);
2013 
2014 	bcn_prb_info = ptr;
2015 	len = sizeof(*bcn_prb_info);
2016 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
2017 							  len);
2018 	bcn_prb_info->caps = 0;
2019 	bcn_prb_info->erp = 0;
2020 
2021 	ptr += sizeof(*bcn_prb_info);
2022 
2023 	tlv = ptr;
2024 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2025 	memcpy(tlv->value, bcn->data, bcn->len);
2026 
2027 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
2028 	if (ret) {
2029 		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
2030 		dev_kfree_skb(skb);
2031 	}
2032 
2033 	return ret;
2034 }
2035 
2036 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
2037 				struct wmi_vdev_install_key_arg *arg)
2038 {
2039 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2040 	struct wmi_vdev_install_key_cmd *cmd;
2041 	struct wmi_tlv *tlv;
2042 	struct sk_buff *skb;
2043 	int ret, len, key_len_aligned;
2044 
2045 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
2046 	 * length is specified in cmd->key_len.
2047 	 */
2048 	key_len_aligned = roundup(arg->key_len, 4);
2049 
2050 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
2051 
2052 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2053 	if (!skb)
2054 		return -ENOMEM;
2055 
2056 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
2057 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
2058 						 sizeof(*cmd));
2059 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2060 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2061 	cmd->key_idx = cpu_to_le32(arg->key_idx);
2062 	cmd->key_flags = cpu_to_le32(arg->key_flags);
2063 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
2064 	cmd->key_len = cpu_to_le32(arg->key_len);
2065 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
2066 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
2067 
2068 	if (arg->key_rsc_counter)
2069 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
2070 
2071 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2072 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
2073 	memcpy(tlv->value, arg->key_data, arg->key_len);
2074 
2075 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2076 		   "WMI vdev install key idx %d cipher %d len %d\n",
2077 		   arg->key_idx, arg->key_cipher, arg->key_len);
2078 
2079 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
2080 	if (ret) {
2081 		ath12k_warn(ar->ab,
2082 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2083 		dev_kfree_skb(skb);
2084 	}
2085 
2086 	return ret;
2087 }
2088 
2089 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2090 				       struct ath12k_wmi_peer_assoc_arg *arg,
2091 				       bool hw_crypto_disabled)
2092 {
2093 	cmd->peer_flags = 0;
2094 	cmd->peer_flags_ext = 0;
2095 
2096 	if (arg->is_wme_set) {
2097 		if (arg->qos_flag)
2098 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2099 		if (arg->apsd_flag)
2100 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2101 		if (arg->ht_flag)
2102 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2103 		if (arg->bw_40)
2104 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2105 		if (arg->bw_80)
2106 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2107 		if (arg->bw_160)
2108 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2109 		if (arg->bw_320)
2110 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2111 
2112 		/* Typically if STBC is enabled for VHT it should be enabled
2113 		 * for HT as well
2114 		 **/
2115 		if (arg->stbc_flag)
2116 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2117 
2118 		/* Typically if LDPC is enabled for VHT it should be enabled
2119 		 * for HT as well
2120 		 **/
2121 		if (arg->ldpc_flag)
2122 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2123 
2124 		if (arg->static_mimops_flag)
2125 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2126 		if (arg->dynamic_mimops_flag)
2127 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2128 		if (arg->spatial_mux_flag)
2129 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2130 		if (arg->vht_flag)
2131 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2132 		if (arg->he_flag)
2133 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2134 		if (arg->twt_requester)
2135 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2136 		if (arg->twt_responder)
2137 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2138 		if (arg->eht_flag)
2139 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2140 	}
2141 
2142 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2143 	 * (during re-association).
2144 	 * Authorization will be done for these modes on key installation.
2145 	 */
2146 	if (arg->auth_flag)
2147 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2148 	if (arg->need_ptk_4_way) {
2149 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2150 		if (!hw_crypto_disabled)
2151 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2152 	}
2153 	if (arg->need_gtk_2_way)
2154 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2155 	/* safe mode bypass the 4-way handshake */
2156 	if (arg->safe_mode_enabled)
2157 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2158 						 WMI_PEER_NEED_GTK_2_WAY));
2159 
2160 	if (arg->is_pmf_enabled)
2161 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2162 
2163 	/* Disable AMSDU for station transmit, if user configures it */
2164 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2165 	 * it
2166 	 * if (arg->amsdu_disable) Add after FW support
2167 	 **/
2168 
2169 	/* Target asserts if node is marked HT and all MCS is set to 0.
2170 	 * Mark the node as non-HT if all the mcs rates are disabled through
2171 	 * iwpriv
2172 	 **/
2173 	if (arg->peer_ht_rates.num_rates == 0)
2174 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2175 }
2176 
2177 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2178 				   struct ath12k_wmi_peer_assoc_arg *arg)
2179 {
2180 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2181 	struct wmi_peer_assoc_complete_cmd *cmd;
2182 	struct ath12k_wmi_vht_rate_set_params *mcs;
2183 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2184 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2185 	struct wmi_peer_assoc_mlo_params *ml_params;
2186 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2187 	struct sk_buff *skb;
2188 	struct wmi_tlv *tlv;
2189 	void *ptr;
2190 	u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
2191 	u32 peer_ht_rates_align, eml_trans_timeout;
2192 	int i, ret, len;
2193 	u16 eml_cap;
2194 	__le32 v;
2195 
2196 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2197 					  sizeof(u32));
2198 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2199 				      sizeof(u32));
2200 
2201 	len = sizeof(*cmd) +
2202 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2203 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2204 	      sizeof(*mcs) + TLV_HDR_SIZE +
2205 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2206 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2207 
2208 	if (arg->ml.enabled)
2209 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2210 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2211 	else
2212 		len += (2 * TLV_HDR_SIZE);
2213 
2214 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2215 	if (!skb)
2216 		return -ENOMEM;
2217 
2218 	ptr = skb->data;
2219 
2220 	cmd = ptr;
2221 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2222 						 sizeof(*cmd));
2223 
2224 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2225 
2226 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2227 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2228 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2229 
2230 	ath12k_wmi_copy_peer_flags(cmd, arg,
2231 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2232 					    &ar->ab->dev_flags));
2233 
2234 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2235 
2236 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2237 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2238 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2239 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2240 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2241 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2242 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2243 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2244 
2245 	/* Update 11ax capabilities */
2246 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2247 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2248 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2249 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2250 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2251 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2252 		cmd->peer_he_cap_phy[i] =
2253 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2254 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2255 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2256 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2257 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2258 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2259 
2260 	/* Update 11be capabilities */
2261 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2262 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2263 		       0);
2264 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2265 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2266 		       0);
2267 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2268 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2269 
2270 	/* Update peer legacy rate information */
2271 	ptr += sizeof(*cmd);
2272 
2273 	tlv = ptr;
2274 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2275 
2276 	ptr += TLV_HDR_SIZE;
2277 
2278 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2279 	memcpy(ptr, arg->peer_legacy_rates.rates,
2280 	       arg->peer_legacy_rates.num_rates);
2281 
2282 	/* Update peer HT rate information */
2283 	ptr += peer_legacy_rates_align;
2284 
2285 	tlv = ptr;
2286 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2287 	ptr += TLV_HDR_SIZE;
2288 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2289 	memcpy(ptr, arg->peer_ht_rates.rates,
2290 	       arg->peer_ht_rates.num_rates);
2291 
2292 	/* VHT Rates */
2293 	ptr += peer_ht_rates_align;
2294 
2295 	mcs = ptr;
2296 
2297 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2298 						 sizeof(*mcs));
2299 
2300 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2301 
2302 	/* Update bandwidth-NSS mapping */
2303 	cmd->peer_bw_rxnss_override = 0;
2304 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2305 
2306 	if (arg->vht_capable) {
2307 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2308 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2309 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2310 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2311 	}
2312 
2313 	/* HE Rates */
2314 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2315 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2316 
2317 	ptr += sizeof(*mcs);
2318 
2319 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2320 
2321 	tlv = ptr;
2322 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2323 	ptr += TLV_HDR_SIZE;
2324 
2325 	/* Loop through the HE rate set */
2326 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2327 		he_mcs = ptr;
2328 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2329 							    sizeof(*he_mcs));
2330 
2331 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2332 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2333 		ptr += sizeof(*he_mcs);
2334 	}
2335 
2336 	tlv = ptr;
2337 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2338 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2339 	ptr += TLV_HDR_SIZE;
2340 	if (!len)
2341 		goto skip_ml_params;
2342 
2343 	ml_params = ptr;
2344 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2345 						       len);
2346 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2347 
2348 	if (arg->ml.assoc_link)
2349 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2350 
2351 	if (arg->ml.primary_umac)
2352 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2353 
2354 	if (arg->ml.logical_link_idx_valid)
2355 		ml_params->flags |=
2356 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2357 
2358 	if (arg->ml.peer_id_valid)
2359 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2360 
2361 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2362 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2363 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2364 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2365 
2366 	eml_cap = arg->ml.eml_cap;
2367 	if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
2368 		/* Padding delay */
2369 		eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
2370 		ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
2371 		/* Transition delay */
2372 		eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
2373 		ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
2374 		/* Transition timeout */
2375 		eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
2376 		ml_params->emlsr_trans_timeout_us =
2377 					cpu_to_le32(eml_trans_timeout);
2378 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
2379 			   arg->peer_mac, eml_pad_delay, eml_trans_delay,
2380 			   eml_trans_timeout);
2381 	}
2382 
2383 	ptr += sizeof(*ml_params);
2384 
2385 skip_ml_params:
2386 	/* Loop through the EHT rate set */
2387 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2388 	tlv = ptr;
2389 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2390 	ptr += TLV_HDR_SIZE;
2391 
2392 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2393 		eht_mcs = ptr;
2394 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
2395 							     sizeof(*eht_mcs));
2396 
2397 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2398 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2399 		ptr += sizeof(*eht_mcs);
2400 	}
2401 
2402 	/* Update MCS15 capability */
2403 	if (arg->eht_disable_mcs15)
2404 		cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
2405 
2406 	tlv = ptr;
2407 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2408 	/* fill ML Partner links */
2409 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2410 	ptr += TLV_HDR_SIZE;
2411 
2412 	if (len == 0)
2413 		goto send;
2414 
2415 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2416 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2417 
2418 		partner_info = ptr;
2419 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2420 								  sizeof(*partner_info));
2421 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2422 		partner_info->hw_link_id =
2423 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2424 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2425 
2426 		if (arg->ml.partner_info[i].assoc_link)
2427 			partner_info->flags |=
2428 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2429 
2430 		if (arg->ml.partner_info[i].primary_umac)
2431 			partner_info->flags |=
2432 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2433 
2434 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2435 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2436 			partner_info->flags |= v;
2437 		}
2438 
2439 		partner_info->logical_link_idx =
2440 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2441 		ptr += sizeof(*partner_info);
2442 	}
2443 
2444 send:
2445 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2446 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
2447 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2448 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2449 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2450 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2451 		   cmd->peer_mpdu_density,
2452 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2453 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2454 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2455 		   cmd->peer_he_cap_phy[2],
2456 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2457 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2458 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2459 		   cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
2460 
2461 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2462 	if (ret) {
2463 		ath12k_warn(ar->ab,
2464 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2465 		dev_kfree_skb(skb);
2466 	}
2467 
2468 	return ret;
2469 }
2470 
2471 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2472 				struct ath12k_wmi_scan_req_arg *arg)
2473 {
2474 	/* setup commonly used values */
2475 	arg->scan_req_id = 1;
2476 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2477 	arg->dwell_time_active = 50;
2478 	arg->dwell_time_active_2g = 0;
2479 	arg->dwell_time_passive = 150;
2480 	arg->dwell_time_active_6g = 70;
2481 	arg->dwell_time_passive_6g = 70;
2482 	arg->min_rest_time = 50;
2483 	arg->max_rest_time = 500;
2484 	arg->repeat_probe_time = 0;
2485 	arg->probe_spacing_time = 0;
2486 	arg->idle_time = 0;
2487 	arg->max_scan_time = 20000;
2488 	arg->probe_delay = 5;
2489 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2490 				  WMI_SCAN_EVENT_COMPLETED |
2491 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2492 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2493 				  WMI_SCAN_EVENT_DEQUEUED;
2494 	arg->scan_f_chan_stat_evnt = 1;
2495 	arg->num_bssid = 1;
2496 
2497 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2498 	 * ZEROs in probe request
2499 	 */
2500 	eth_broadcast_addr(arg->bssid_list[0].addr);
2501 }
2502 
2503 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2504 						   struct ath12k_wmi_scan_req_arg *arg)
2505 {
2506 	/* Scan events subscription */
2507 	if (arg->scan_ev_started)
2508 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2509 	if (arg->scan_ev_completed)
2510 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2511 	if (arg->scan_ev_bss_chan)
2512 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2513 	if (arg->scan_ev_foreign_chan)
2514 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2515 	if (arg->scan_ev_dequeued)
2516 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2517 	if (arg->scan_ev_preempted)
2518 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2519 	if (arg->scan_ev_start_failed)
2520 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2521 	if (arg->scan_ev_restarted)
2522 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2523 	if (arg->scan_ev_foreign_chn_exit)
2524 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2525 	if (arg->scan_ev_suspended)
2526 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2527 	if (arg->scan_ev_resumed)
2528 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2529 
2530 	/** Set scan control flags */
2531 	cmd->scan_ctrl_flags = 0;
2532 	if (arg->scan_f_passive)
2533 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2534 	if (arg->scan_f_strict_passive_pch)
2535 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2536 	if (arg->scan_f_promisc_mode)
2537 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2538 	if (arg->scan_f_capture_phy_err)
2539 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2540 	if (arg->scan_f_half_rate)
2541 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2542 	if (arg->scan_f_quarter_rate)
2543 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2544 	if (arg->scan_f_cck_rates)
2545 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2546 	if (arg->scan_f_ofdm_rates)
2547 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2548 	if (arg->scan_f_chan_stat_evnt)
2549 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2550 	if (arg->scan_f_filter_prb_req)
2551 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2552 	if (arg->scan_f_bcast_probe)
2553 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2554 	if (arg->scan_f_offchan_mgmt_tx)
2555 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2556 	if (arg->scan_f_offchan_data_tx)
2557 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2558 	if (arg->scan_f_force_active_dfs_chn)
2559 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2560 	if (arg->scan_f_add_tpc_ie_in_probe)
2561 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2562 	if (arg->scan_f_add_ds_ie_in_probe)
2563 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2564 	if (arg->scan_f_add_spoofed_mac_in_probe)
2565 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2566 	if (arg->scan_f_add_rand_seq_in_probe)
2567 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2568 	if (arg->scan_f_en_ie_whitelist_in_probe)
2569 		cmd->scan_ctrl_flags |=
2570 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2571 
2572 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2573 						 WMI_SCAN_DWELL_MODE_MASK);
2574 }
2575 
2576 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2577 				   struct ath12k_wmi_scan_req_arg *arg)
2578 {
2579 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2580 	struct wmi_start_scan_cmd *cmd;
2581 	struct ath12k_wmi_ssid_params *ssid = NULL;
2582 	struct ath12k_wmi_mac_addr_params *bssid;
2583 	struct sk_buff *skb;
2584 	struct wmi_tlv *tlv;
2585 	void *ptr;
2586 	int i, ret, len;
2587 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2588 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2589 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2590 
2591 	len = sizeof(*cmd);
2592 
2593 	len += TLV_HDR_SIZE;
2594 	if (arg->num_chan)
2595 		len += arg->num_chan * sizeof(u32);
2596 
2597 	len += TLV_HDR_SIZE;
2598 	if (arg->num_ssids)
2599 		len += arg->num_ssids * sizeof(*ssid);
2600 
2601 	len += TLV_HDR_SIZE;
2602 	if (arg->num_bssid)
2603 		len += sizeof(*bssid) * arg->num_bssid;
2604 
2605 	if (arg->num_hint_bssid)
2606 		len += TLV_HDR_SIZE +
2607 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2608 
2609 	if (arg->num_hint_s_ssid)
2610 		len += TLV_HDR_SIZE +
2611 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2612 
2613 	len += TLV_HDR_SIZE;
2614 	if (arg->extraie.len)
2615 		extraie_len_with_pad =
2616 			roundup(arg->extraie.len, sizeof(u32));
2617 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2618 		len += extraie_len_with_pad;
2619 	} else {
2620 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2621 			    arg->extraie.len);
2622 		extraie_len_with_pad = 0;
2623 	}
2624 
2625 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2626 	if (!skb)
2627 		return -ENOMEM;
2628 
2629 	ptr = skb->data;
2630 
2631 	cmd = ptr;
2632 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2633 						 sizeof(*cmd));
2634 
2635 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2636 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2637 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2638 	if (ar->state_11d == ATH12K_11D_PREPARING)
2639 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
2640 	else
2641 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2642 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2643 
2644 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2645 
2646 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2647 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2648 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2649 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2650 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2651 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2652 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2653 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2654 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2655 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2656 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2657 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2658 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2659 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2660 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2661 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2662 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2663 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2664 
2665 	ptr += sizeof(*cmd);
2666 
2667 	len = arg->num_chan * sizeof(u32);
2668 
2669 	tlv = ptr;
2670 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2671 	ptr += TLV_HDR_SIZE;
2672 	tmp_ptr = (u32 *)ptr;
2673 
2674 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2675 
2676 	ptr += len;
2677 
2678 	len = arg->num_ssids * sizeof(*ssid);
2679 	tlv = ptr;
2680 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2681 
2682 	ptr += TLV_HDR_SIZE;
2683 
2684 	if (arg->num_ssids) {
2685 		ssid = ptr;
2686 		for (i = 0; i < arg->num_ssids; ++i) {
2687 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2688 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2689 			       arg->ssid[i].ssid_len);
2690 			ssid++;
2691 		}
2692 	}
2693 
2694 	ptr += (arg->num_ssids * sizeof(*ssid));
2695 	len = arg->num_bssid * sizeof(*bssid);
2696 	tlv = ptr;
2697 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2698 
2699 	ptr += TLV_HDR_SIZE;
2700 	bssid = ptr;
2701 
2702 	if (arg->num_bssid) {
2703 		for (i = 0; i < arg->num_bssid; ++i) {
2704 			ether_addr_copy(bssid->addr,
2705 					arg->bssid_list[i].addr);
2706 			bssid++;
2707 		}
2708 	}
2709 
2710 	ptr += arg->num_bssid * sizeof(*bssid);
2711 
2712 	len = extraie_len_with_pad;
2713 	tlv = ptr;
2714 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2715 	ptr += TLV_HDR_SIZE;
2716 
2717 	if (extraie_len_with_pad)
2718 		memcpy(ptr, arg->extraie.ptr,
2719 		       arg->extraie.len);
2720 
2721 	ptr += extraie_len_with_pad;
2722 
2723 	if (arg->num_hint_s_ssid) {
2724 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2725 		tlv = ptr;
2726 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2727 		ptr += TLV_HDR_SIZE;
2728 		s_ssid = ptr;
2729 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2730 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2731 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2732 			s_ssid++;
2733 		}
2734 		ptr += len;
2735 	}
2736 
2737 	if (arg->num_hint_bssid) {
2738 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2739 		tlv = ptr;
2740 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2741 		ptr += TLV_HDR_SIZE;
2742 		hint_bssid = ptr;
2743 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2744 			hint_bssid->freq_flags =
2745 				arg->hint_bssid[i].freq_flags;
2746 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2747 					&hint_bssid->bssid.addr[0]);
2748 			hint_bssid++;
2749 		}
2750 	}
2751 
2752 	ret = ath12k_wmi_cmd_send(wmi, skb,
2753 				  WMI_START_SCAN_CMDID);
2754 	if (ret) {
2755 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2756 		dev_kfree_skb(skb);
2757 	}
2758 
2759 	return ret;
2760 }
2761 
2762 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2763 				  struct ath12k_wmi_scan_cancel_arg *arg)
2764 {
2765 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2766 	struct wmi_stop_scan_cmd *cmd;
2767 	struct sk_buff *skb;
2768 	int ret;
2769 
2770 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2771 	if (!skb)
2772 		return -ENOMEM;
2773 
2774 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2775 
2776 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2777 						 sizeof(*cmd));
2778 
2779 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2780 	cmd->requestor = cpu_to_le32(arg->requester);
2781 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2782 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2783 	/* stop the scan with the corresponding scan_id */
2784 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2785 		/* Cancelling all scans */
2786 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2787 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2788 		/* Cancelling VAP scans */
2789 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2790 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2791 		/* Cancelling specific scan */
2792 		cmd->req_type = WMI_SCAN_STOP_ONE;
2793 	} else {
2794 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2795 			    arg->req_type);
2796 		dev_kfree_skb(skb);
2797 		return -EINVAL;
2798 	}
2799 
2800 	ret = ath12k_wmi_cmd_send(wmi, skb,
2801 				  WMI_STOP_SCAN_CMDID);
2802 	if (ret) {
2803 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2804 		dev_kfree_skb(skb);
2805 	}
2806 
2807 	return ret;
2808 }
2809 
2810 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2811 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2812 {
2813 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2814 	struct wmi_scan_chan_list_cmd *cmd;
2815 	struct sk_buff *skb;
2816 	struct ath12k_wmi_channel_params *chan_info;
2817 	struct ath12k_wmi_channel_arg *channel_arg;
2818 	struct wmi_tlv *tlv;
2819 	void *ptr;
2820 	int i, ret, len;
2821 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2822 	__le32 *reg1, *reg2;
2823 
2824 	channel_arg = &arg->channel[0];
2825 	while (arg->nallchans) {
2826 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2827 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2828 			sizeof(*chan_info);
2829 
2830 		num_send_chans = min(arg->nallchans, max_chan_limit);
2831 
2832 		arg->nallchans -= num_send_chans;
2833 		len += sizeof(*chan_info) * num_send_chans;
2834 
2835 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2836 		if (!skb)
2837 			return -ENOMEM;
2838 
2839 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2840 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2841 							 sizeof(*cmd));
2842 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2843 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2844 		if (num_sends)
2845 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2846 
2847 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2848 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2849 			   num_send_chans, len, cmd->pdev_id, num_sends);
2850 
2851 		ptr = skb->data + sizeof(*cmd);
2852 
2853 		len = sizeof(*chan_info) * num_send_chans;
2854 		tlv = ptr;
2855 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2856 						     len);
2857 		ptr += TLV_HDR_SIZE;
2858 
2859 		for (i = 0; i < num_send_chans; ++i) {
2860 			chan_info = ptr;
2861 			memset(chan_info, 0, sizeof(*chan_info));
2862 			len = sizeof(*chan_info);
2863 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2864 								       len);
2865 
2866 			reg1 = &chan_info->reg_info_1;
2867 			reg2 = &chan_info->reg_info_2;
2868 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2869 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2870 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2871 
2872 			if (channel_arg->is_chan_passive)
2873 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2874 			if (channel_arg->allow_he)
2875 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2876 			else if (channel_arg->allow_vht)
2877 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2878 			else if (channel_arg->allow_ht)
2879 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2880 			if (channel_arg->half_rate)
2881 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2882 			if (channel_arg->quarter_rate)
2883 				chan_info->info |=
2884 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2885 
2886 			if (channel_arg->psc_channel)
2887 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2888 
2889 			if (channel_arg->dfs_set)
2890 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2891 
2892 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2893 							    WMI_CHAN_INFO_MODE);
2894 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2895 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2896 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2897 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2898 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2899 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2900 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2901 						  WMI_CHAN_REG_INFO1_REG_CLS);
2902 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2903 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2904 			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
2905 						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
2906 
2907 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2908 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2909 				   i, chan_info->mhz, chan_info->info);
2910 
2911 			ptr += sizeof(*chan_info);
2912 
2913 			channel_arg++;
2914 		}
2915 
2916 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2917 		if (ret) {
2918 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2919 			dev_kfree_skb(skb);
2920 			return ret;
2921 		}
2922 
2923 		num_sends++;
2924 	}
2925 
2926 	return 0;
2927 }
2928 
2929 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2930 				   struct wmi_wmm_params_all_arg *param)
2931 {
2932 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2933 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2934 	struct wmi_wmm_params *wmm_param;
2935 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2936 	struct sk_buff *skb;
2937 	int ret, ac;
2938 
2939 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2940 	if (!skb)
2941 		return -ENOMEM;
2942 
2943 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2944 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2945 						 sizeof(*cmd));
2946 
2947 	cmd->vdev_id = cpu_to_le32(vdev_id);
2948 	cmd->wmm_param_type = 0;
2949 
2950 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2951 		switch (ac) {
2952 		case WME_AC_BE:
2953 			wmi_wmm_arg = &param->ac_be;
2954 			break;
2955 		case WME_AC_BK:
2956 			wmi_wmm_arg = &param->ac_bk;
2957 			break;
2958 		case WME_AC_VI:
2959 			wmi_wmm_arg = &param->ac_vi;
2960 			break;
2961 		case WME_AC_VO:
2962 			wmi_wmm_arg = &param->ac_vo;
2963 			break;
2964 		}
2965 
2966 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2967 		wmm_param->tlv_header =
2968 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2969 					       sizeof(*wmm_param));
2970 
2971 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2972 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2973 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2974 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2975 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2976 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2977 
2978 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2979 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2980 			   ac, wmm_param->aifs, wmm_param->cwmin,
2981 			   wmm_param->cwmax, wmm_param->txoplimit,
2982 			   wmm_param->acm, wmm_param->no_ack);
2983 	}
2984 	ret = ath12k_wmi_cmd_send(wmi, skb,
2985 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2986 	if (ret) {
2987 		ath12k_warn(ar->ab,
2988 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2989 		dev_kfree_skb(skb);
2990 	}
2991 
2992 	return ret;
2993 }
2994 
2995 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2996 						  u32 pdev_id)
2997 {
2998 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2999 	struct wmi_dfs_phyerr_offload_cmd *cmd;
3000 	struct sk_buff *skb;
3001 	int ret;
3002 
3003 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3004 	if (!skb)
3005 		return -ENOMEM;
3006 
3007 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
3008 	cmd->tlv_header =
3009 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
3010 				       sizeof(*cmd));
3011 
3012 	cmd->pdev_id = cpu_to_le32(pdev_id);
3013 
3014 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3015 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
3016 
3017 	ret = ath12k_wmi_cmd_send(wmi, skb,
3018 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
3019 	if (ret) {
3020 		ath12k_warn(ar->ab,
3021 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
3022 		dev_kfree_skb(skb);
3023 	}
3024 
3025 	return ret;
3026 }
3027 
3028 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
3029 			    const u8 *buf, size_t buf_len)
3030 {
3031 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3032 	struct wmi_pdev_set_bios_interface_cmd *cmd;
3033 	struct wmi_tlv *tlv;
3034 	struct sk_buff *skb;
3035 	u8 *ptr;
3036 	u32 len, len_aligned;
3037 	int ret;
3038 
3039 	len_aligned = roundup(buf_len, sizeof(u32));
3040 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
3041 
3042 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3043 	if (!skb)
3044 		return -ENOMEM;
3045 
3046 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
3047 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
3048 						 sizeof(*cmd));
3049 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3050 	cmd->param_type_id = cpu_to_le32(param_id);
3051 	cmd->length = cpu_to_le32(buf_len);
3052 
3053 	ptr = skb->data + sizeof(*cmd);
3054 	tlv = (struct wmi_tlv *)ptr;
3055 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
3056 	ptr += TLV_HDR_SIZE;
3057 	memcpy(ptr, buf, buf_len);
3058 
3059 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3060 				  skb,
3061 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
3062 	if (ret) {
3063 		ath12k_warn(ab,
3064 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
3065 			    param_id, ret);
3066 		dev_kfree_skb(skb);
3067 	}
3068 
3069 	return 0;
3070 }
3071 
3072 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
3073 {
3074 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3075 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
3076 	struct wmi_tlv *tlv;
3077 	struct sk_buff *skb;
3078 	int ret;
3079 	u8 *buf_ptr;
3080 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
3081 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
3082 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
3083 
3084 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
3085 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
3086 					      sizeof(u32));
3087 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
3088 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
3089 
3090 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3091 	if (!skb)
3092 		return -ENOMEM;
3093 
3094 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
3095 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
3096 						 sizeof(*cmd));
3097 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3098 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3099 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3100 
3101 	buf_ptr = skb->data + sizeof(*cmd);
3102 	tlv = (struct wmi_tlv *)buf_ptr;
3103 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3104 					 sar_table_len_aligned);
3105 	buf_ptr += TLV_HDR_SIZE;
3106 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3107 
3108 	buf_ptr += sar_table_len_aligned;
3109 	tlv = (struct wmi_tlv *)buf_ptr;
3110 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3111 					 sar_dbs_backoff_len_aligned);
3112 	buf_ptr += TLV_HDR_SIZE;
3113 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3114 
3115 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3116 				  skb,
3117 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3118 	if (ret) {
3119 		ath12k_warn(ab,
3120 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3121 			    ret);
3122 		dev_kfree_skb(skb);
3123 	}
3124 
3125 	return ret;
3126 }
3127 
3128 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3129 {
3130 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3131 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3132 	struct wmi_tlv *tlv;
3133 	struct sk_buff *skb;
3134 	int ret;
3135 	u8 *buf_ptr;
3136 	u32 len, sar_geo_len_aligned;
3137 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3138 
3139 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3140 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3141 
3142 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3143 	if (!skb)
3144 		return -ENOMEM;
3145 
3146 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3147 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3148 						 sizeof(*cmd));
3149 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3150 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3151 
3152 	buf_ptr = skb->data + sizeof(*cmd);
3153 	tlv = (struct wmi_tlv *)buf_ptr;
3154 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3155 	buf_ptr += TLV_HDR_SIZE;
3156 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3157 
3158 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3159 				  skb,
3160 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3161 	if (ret) {
3162 		ath12k_warn(ab,
3163 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3164 			    ret);
3165 		dev_kfree_skb(skb);
3166 	}
3167 
3168 	return ret;
3169 }
3170 
3171 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3172 			  u32 tid, u32 initiator, u32 reason)
3173 {
3174 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3175 	struct wmi_delba_send_cmd *cmd;
3176 	struct sk_buff *skb;
3177 	int ret;
3178 
3179 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3180 	if (!skb)
3181 		return -ENOMEM;
3182 
3183 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3184 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3185 						 sizeof(*cmd));
3186 	cmd->vdev_id = cpu_to_le32(vdev_id);
3187 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3188 	cmd->tid = cpu_to_le32(tid);
3189 	cmd->initiator = cpu_to_le32(initiator);
3190 	cmd->reasoncode = cpu_to_le32(reason);
3191 
3192 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3193 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3194 		   vdev_id, mac, tid, initiator, reason);
3195 
3196 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3197 
3198 	if (ret) {
3199 		ath12k_warn(ar->ab,
3200 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3201 		dev_kfree_skb(skb);
3202 	}
3203 
3204 	return ret;
3205 }
3206 
3207 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3208 			      u32 tid, u32 status)
3209 {
3210 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3211 	struct wmi_addba_setresponse_cmd *cmd;
3212 	struct sk_buff *skb;
3213 	int ret;
3214 
3215 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3216 	if (!skb)
3217 		return -ENOMEM;
3218 
3219 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3220 	cmd->tlv_header =
3221 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3222 				       sizeof(*cmd));
3223 	cmd->vdev_id = cpu_to_le32(vdev_id);
3224 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3225 	cmd->tid = cpu_to_le32(tid);
3226 	cmd->statuscode = cpu_to_le32(status);
3227 
3228 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3229 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3230 		   vdev_id, mac, tid, status);
3231 
3232 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3233 
3234 	if (ret) {
3235 		ath12k_warn(ar->ab,
3236 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3237 		dev_kfree_skb(skb);
3238 	}
3239 
3240 	return ret;
3241 }
3242 
3243 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3244 			  u32 tid, u32 buf_size)
3245 {
3246 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3247 	struct wmi_addba_send_cmd *cmd;
3248 	struct sk_buff *skb;
3249 	int ret;
3250 
3251 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3252 	if (!skb)
3253 		return -ENOMEM;
3254 
3255 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3256 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3257 						 sizeof(*cmd));
3258 	cmd->vdev_id = cpu_to_le32(vdev_id);
3259 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3260 	cmd->tid = cpu_to_le32(tid);
3261 	cmd->buffersize = cpu_to_le32(buf_size);
3262 
3263 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3264 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3265 		   vdev_id, mac, tid, buf_size);
3266 
3267 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3268 
3269 	if (ret) {
3270 		ath12k_warn(ar->ab,
3271 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3272 		dev_kfree_skb(skb);
3273 	}
3274 
3275 	return ret;
3276 }
3277 
3278 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3279 {
3280 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3281 	struct wmi_addba_clear_resp_cmd *cmd;
3282 	struct sk_buff *skb;
3283 	int ret;
3284 
3285 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3286 	if (!skb)
3287 		return -ENOMEM;
3288 
3289 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3290 	cmd->tlv_header =
3291 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3292 				       sizeof(*cmd));
3293 	cmd->vdev_id = cpu_to_le32(vdev_id);
3294 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3295 
3296 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3297 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3298 		   vdev_id, mac);
3299 
3300 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3301 
3302 	if (ret) {
3303 		ath12k_warn(ar->ab,
3304 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3305 		dev_kfree_skb(skb);
3306 	}
3307 
3308 	return ret;
3309 }
3310 
3311 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3312 				     struct ath12k_wmi_init_country_arg *arg)
3313 {
3314 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3315 	struct wmi_init_country_cmd *cmd;
3316 	struct sk_buff *skb;
3317 	int ret;
3318 
3319 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3320 	if (!skb)
3321 		return -ENOMEM;
3322 
3323 	cmd = (struct wmi_init_country_cmd *)skb->data;
3324 	cmd->tlv_header =
3325 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3326 				       sizeof(*cmd));
3327 
3328 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3329 
3330 	switch (arg->flags) {
3331 	case ALPHA_IS_SET:
3332 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3333 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3334 		break;
3335 	case CC_IS_SET:
3336 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3337 		cmd->cc_info.country_code =
3338 			cpu_to_le32(arg->cc_info.country_code);
3339 		break;
3340 	case REGDMN_IS_SET:
3341 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3342 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3343 		break;
3344 	default:
3345 		ret = -EINVAL;
3346 		goto out;
3347 	}
3348 
3349 	ret = ath12k_wmi_cmd_send(wmi, skb,
3350 				  WMI_SET_INIT_COUNTRY_CMDID);
3351 
3352 out:
3353 	if (ret) {
3354 		ath12k_warn(ar->ab,
3355 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3356 			    ret);
3357 		dev_kfree_skb(skb);
3358 	}
3359 
3360 	return ret;
3361 }
3362 
3363 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
3364 					    struct wmi_set_current_country_arg *arg)
3365 {
3366 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3367 	struct wmi_set_current_country_cmd *cmd;
3368 	struct sk_buff *skb;
3369 	int ret;
3370 
3371 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3372 	if (!skb)
3373 		return -ENOMEM;
3374 
3375 	cmd = (struct wmi_set_current_country_cmd *)skb->data;
3376 	cmd->tlv_header =
3377 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
3378 				       sizeof(*cmd));
3379 
3380 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3381 	memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
3382 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
3383 
3384 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3385 		   "set current country pdev id %d alpha2 %c%c\n",
3386 		   ar->pdev->pdev_id,
3387 		   arg->alpha2[0],
3388 		   arg->alpha2[1]);
3389 
3390 	if (ret) {
3391 		ath12k_warn(ar->ab,
3392 			    "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
3393 		dev_kfree_skb(skb);
3394 	}
3395 
3396 	return ret;
3397 }
3398 
3399 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
3400 				       struct wmi_11d_scan_start_arg *arg)
3401 {
3402 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3403 	struct wmi_11d_scan_start_cmd *cmd;
3404 	struct sk_buff *skb;
3405 	int ret;
3406 
3407 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3408 	if (!skb)
3409 		return -ENOMEM;
3410 
3411 	cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
3412 	cmd->tlv_header =
3413 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
3414 				       sizeof(*cmd));
3415 
3416 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3417 	cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
3418 	cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
3419 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
3420 
3421 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3422 		   "send 11d scan start vdev id %d period %d ms internal %d ms\n",
3423 		   arg->vdev_id, arg->scan_period_msec,
3424 		   arg->start_interval_msec);
3425 
3426 	if (ret) {
3427 		ath12k_warn(ar->ab,
3428 			    "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
3429 		dev_kfree_skb(skb);
3430 	}
3431 
3432 	return ret;
3433 }
3434 
3435 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
3436 {
3437 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3438 	struct wmi_11d_scan_stop_cmd *cmd;
3439 	struct sk_buff *skb;
3440 	int ret;
3441 
3442 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3443 	if (!skb)
3444 		return -ENOMEM;
3445 
3446 	cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
3447 	cmd->tlv_header =
3448 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
3449 				       sizeof(*cmd));
3450 
3451 	cmd->vdev_id = cpu_to_le32(vdev_id);
3452 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
3453 
3454 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3455 		   "send 11d scan stop vdev id %d\n",
3456 		   cmd->vdev_id);
3457 
3458 	if (ret) {
3459 		ath12k_warn(ar->ab,
3460 			    "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
3461 		dev_kfree_skb(skb);
3462 	}
3463 
3464 	return ret;
3465 }
3466 
3467 int
3468 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3469 {
3470 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3471 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3472 	struct wmi_twt_enable_params_cmd *cmd;
3473 	struct sk_buff *skb;
3474 	int ret, len;
3475 
3476 	len = sizeof(*cmd);
3477 
3478 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3479 	if (!skb)
3480 		return -ENOMEM;
3481 
3482 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3483 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3484 						 len);
3485 	cmd->pdev_id = cpu_to_le32(pdev_id);
3486 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3487 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3488 	cmd->congestion_thresh_setup =
3489 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3490 	cmd->congestion_thresh_teardown =
3491 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3492 	cmd->congestion_thresh_critical =
3493 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3494 	cmd->interference_thresh_teardown =
3495 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3496 	cmd->interference_thresh_setup =
3497 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3498 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3499 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3500 	cmd->no_of_bcast_mcast_slots =
3501 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3502 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3503 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3504 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3505 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3506 	cmd->remove_sta_slot_interval =
3507 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3508 	/* TODO add MBSSID support */
3509 	cmd->mbss_support = 0;
3510 
3511 	ret = ath12k_wmi_cmd_send(wmi, skb,
3512 				  WMI_TWT_ENABLE_CMDID);
3513 	if (ret) {
3514 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3515 		dev_kfree_skb(skb);
3516 	}
3517 	return ret;
3518 }
3519 
3520 int
3521 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3522 {
3523 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3524 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3525 	struct wmi_twt_disable_params_cmd *cmd;
3526 	struct sk_buff *skb;
3527 	int ret, len;
3528 
3529 	len = sizeof(*cmd);
3530 
3531 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3532 	if (!skb)
3533 		return -ENOMEM;
3534 
3535 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3536 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3537 						 len);
3538 	cmd->pdev_id = cpu_to_le32(pdev_id);
3539 
3540 	ret = ath12k_wmi_cmd_send(wmi, skb,
3541 				  WMI_TWT_DISABLE_CMDID);
3542 	if (ret) {
3543 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3544 		dev_kfree_skb(skb);
3545 	}
3546 	return ret;
3547 }
3548 
3549 int
3550 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3551 			     struct ieee80211_he_obss_pd *he_obss_pd)
3552 {
3553 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3554 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3555 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3556 	struct sk_buff *skb;
3557 	int ret, len;
3558 
3559 	len = sizeof(*cmd);
3560 
3561 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3562 	if (!skb)
3563 		return -ENOMEM;
3564 
3565 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3566 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3567 						 len);
3568 	cmd->vdev_id = cpu_to_le32(vdev_id);
3569 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3570 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3571 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3572 
3573 	ret = ath12k_wmi_cmd_send(wmi, skb,
3574 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3575 	if (ret) {
3576 		ath12k_warn(ab,
3577 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3578 		dev_kfree_skb(skb);
3579 	}
3580 	return ret;
3581 }
3582 
3583 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3584 				  u8 bss_color, u32 period,
3585 				  bool enable)
3586 {
3587 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3588 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3589 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3590 	struct sk_buff *skb;
3591 	int ret, len;
3592 
3593 	len = sizeof(*cmd);
3594 
3595 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3596 	if (!skb)
3597 		return -ENOMEM;
3598 
3599 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3600 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3601 						 len);
3602 	cmd->vdev_id = cpu_to_le32(vdev_id);
3603 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3604 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3605 	cmd->current_bss_color = cpu_to_le32(bss_color);
3606 	cmd->detection_period_ms = cpu_to_le32(period);
3607 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3608 	cmd->free_slot_expiry_time_ms = 0;
3609 	cmd->flags = 0;
3610 
3611 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3612 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3613 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3614 		   cmd->detection_period_ms, cmd->scan_period_ms);
3615 
3616 	ret = ath12k_wmi_cmd_send(wmi, skb,
3617 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3618 	if (ret) {
3619 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3620 		dev_kfree_skb(skb);
3621 	}
3622 	return ret;
3623 }
3624 
3625 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3626 						bool enable)
3627 {
3628 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3629 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3630 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3631 	struct sk_buff *skb;
3632 	int ret, len;
3633 
3634 	len = sizeof(*cmd);
3635 
3636 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3637 	if (!skb)
3638 		return -ENOMEM;
3639 
3640 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3641 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3642 						 len);
3643 	cmd->vdev_id = cpu_to_le32(vdev_id);
3644 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3645 
3646 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3647 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3648 		   cmd->vdev_id, cmd->enable);
3649 
3650 	ret = ath12k_wmi_cmd_send(wmi, skb,
3651 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3652 	if (ret) {
3653 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3654 		dev_kfree_skb(skb);
3655 	}
3656 	return ret;
3657 }
3658 
3659 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3660 				   struct sk_buff *tmpl)
3661 {
3662 	struct wmi_tlv *tlv;
3663 	struct sk_buff *skb;
3664 	void *ptr;
3665 	int ret, len;
3666 	size_t aligned_len;
3667 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3668 
3669 	aligned_len = roundup(tmpl->len, 4);
3670 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3671 
3672 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3673 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3674 
3675 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3676 	if (!skb)
3677 		return -ENOMEM;
3678 
3679 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3680 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3681 						 sizeof(*cmd));
3682 	cmd->vdev_id = cpu_to_le32(vdev_id);
3683 	cmd->buf_len = cpu_to_le32(tmpl->len);
3684 	ptr = skb->data + sizeof(*cmd);
3685 
3686 	tlv = ptr;
3687 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3688 	memcpy(tlv->value, tmpl->data, tmpl->len);
3689 
3690 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3691 	if (ret) {
3692 		ath12k_warn(ar->ab,
3693 			    "WMI vdev %i failed to send FILS discovery template command\n",
3694 			    vdev_id);
3695 		dev_kfree_skb(skb);
3696 	}
3697 	return ret;
3698 }
3699 
3700 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3701 			       struct sk_buff *tmpl)
3702 {
3703 	struct wmi_probe_tmpl_cmd *cmd;
3704 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3705 	struct wmi_tlv *tlv;
3706 	struct sk_buff *skb;
3707 	void *ptr;
3708 	int ret, len;
3709 	size_t aligned_len = roundup(tmpl->len, 4);
3710 
3711 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3712 		   "WMI vdev %i set probe response template\n", vdev_id);
3713 
3714 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3715 
3716 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3717 	if (!skb)
3718 		return -ENOMEM;
3719 
3720 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3721 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3722 						 sizeof(*cmd));
3723 	cmd->vdev_id = cpu_to_le32(vdev_id);
3724 	cmd->buf_len = cpu_to_le32(tmpl->len);
3725 
3726 	ptr = skb->data + sizeof(*cmd);
3727 
3728 	probe_info = ptr;
3729 	len = sizeof(*probe_info);
3730 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3731 							len);
3732 	probe_info->caps = 0;
3733 	probe_info->erp = 0;
3734 
3735 	ptr += sizeof(*probe_info);
3736 
3737 	tlv = ptr;
3738 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3739 	memcpy(tlv->value, tmpl->data, tmpl->len);
3740 
3741 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3742 	if (ret) {
3743 		ath12k_warn(ar->ab,
3744 			    "WMI vdev %i failed to send probe response template command\n",
3745 			    vdev_id);
3746 		dev_kfree_skb(skb);
3747 	}
3748 	return ret;
3749 }
3750 
3751 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3752 			      bool unsol_bcast_probe_resp_enabled)
3753 {
3754 	struct sk_buff *skb;
3755 	int ret, len;
3756 	struct wmi_fils_discovery_cmd *cmd;
3757 
3758 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3759 		   "WMI vdev %i set %s interval to %u TU\n",
3760 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3761 		   "unsolicited broadcast probe response" : "FILS discovery",
3762 		   interval);
3763 
3764 	len = sizeof(*cmd);
3765 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3766 	if (!skb)
3767 		return -ENOMEM;
3768 
3769 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3770 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3771 						 len);
3772 	cmd->vdev_id = cpu_to_le32(vdev_id);
3773 	cmd->interval = cpu_to_le32(interval);
3774 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3775 
3776 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3777 	if (ret) {
3778 		ath12k_warn(ar->ab,
3779 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3780 			    vdev_id);
3781 		dev_kfree_skb(skb);
3782 	}
3783 	return ret;
3784 }
3785 
3786 static void
3787 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3788 			      struct ath12k_wmi_pdev_band_arg *arg)
3789 {
3790 	u8 i;
3791 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3792 	struct ath12k_pdev *pdev;
3793 
3794 	for (i = 0; i < soc->num_radios; i++) {
3795 		pdev = &soc->pdevs[i];
3796 		hal_reg_cap = &soc->hal_reg_cap[i];
3797 		arg[i].pdev_id = pdev->pdev_id;
3798 
3799 		switch (pdev->cap.supported_bands) {
3800 		case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
3801 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3802 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3803 			break;
3804 		case WMI_HOST_WLAN_2GHZ_CAP:
3805 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3806 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3807 			break;
3808 		case WMI_HOST_WLAN_5GHZ_CAP:
3809 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3810 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3811 			break;
3812 		default:
3813 			break;
3814 		}
3815 	}
3816 }
3817 
3818 static void
3819 ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
3820 				struct ath12k_wmi_resource_config_params *wmi_cfg,
3821 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3822 {
3823 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3824 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3825 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3826 	wmi_cfg->num_offload_reorder_buffs =
3827 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3828 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3829 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3830 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3831 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3832 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3833 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3834 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3835 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3836 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3837 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3838 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3839 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3840 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3841 	wmi_cfg->roam_offload_max_ap_profiles =
3842 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3843 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3844 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3845 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3846 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3847 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3848 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3849 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3850 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3851 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3852 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3853 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3854 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3855 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3856 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3857 	wmi_cfg->num_tdls_conn_table_entries =
3858 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3859 	wmi_cfg->beacon_tx_offload_max_vdev =
3860 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3861 	wmi_cfg->num_multicast_filter_entries =
3862 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3863 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3864 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3865 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3866 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3867 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3868 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3869 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3870 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3871 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3872 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3873 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3874 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3875 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3876 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3877 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3878 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
3879 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3880 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3881 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3882 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3883 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3884 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3885 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3886 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3887 	if (ab->hw_params->reoq_lut_support)
3888 		wmi_cfg->host_service_flags |=
3889 			cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
3890 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3891 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3892 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3893 }
3894 
3895 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3896 				struct ath12k_wmi_init_cmd_arg *arg)
3897 {
3898 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3899 	struct sk_buff *skb;
3900 	struct wmi_init_cmd *cmd;
3901 	struct ath12k_wmi_resource_config_params *cfg;
3902 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3903 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3904 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3905 	struct wmi_tlv *tlv;
3906 	size_t ret, len;
3907 	void *ptr;
3908 	u32 hw_mode_len = 0;
3909 	u16 idx;
3910 
3911 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3912 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3913 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3914 
3915 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3916 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3917 
3918 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3919 	if (!skb)
3920 		return -ENOMEM;
3921 
3922 	cmd = (struct wmi_init_cmd *)skb->data;
3923 
3924 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3925 						 sizeof(*cmd));
3926 
3927 	ptr = skb->data + sizeof(*cmd);
3928 	cfg = ptr;
3929 
3930 	ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
3931 
3932 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3933 						 sizeof(*cfg));
3934 
3935 	ptr += sizeof(*cfg);
3936 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3937 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3938 
3939 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3940 		host_mem_chunks[idx].tlv_header =
3941 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3942 					   len);
3943 
3944 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3945 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3946 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3947 
3948 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3949 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3950 			   arg->mem_chunks[idx].req_id,
3951 			   (u64)arg->mem_chunks[idx].paddr,
3952 			   arg->mem_chunks[idx].len);
3953 	}
3954 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3955 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3956 
3957 	/* num_mem_chunks is zero */
3958 	tlv = ptr;
3959 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3960 	ptr += TLV_HDR_SIZE + len;
3961 
3962 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3963 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3964 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3965 							     sizeof(*hw_mode));
3966 
3967 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3968 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3969 
3970 		ptr += sizeof(*hw_mode);
3971 
3972 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3973 		tlv = ptr;
3974 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3975 
3976 		ptr += TLV_HDR_SIZE;
3977 		len = sizeof(*band_to_mac);
3978 
3979 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3980 			band_to_mac = (void *)ptr;
3981 
3982 			band_to_mac->tlv_header =
3983 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3984 						       len);
3985 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3986 			band_to_mac->start_freq =
3987 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3988 			band_to_mac->end_freq =
3989 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
3990 			ptr += sizeof(*band_to_mac);
3991 		}
3992 	}
3993 
3994 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3995 	if (ret) {
3996 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3997 		dev_kfree_skb(skb);
3998 	}
3999 
4000 	return ret;
4001 }
4002 
4003 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
4004 			    int pdev_id)
4005 {
4006 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
4007 	struct sk_buff *skb;
4008 	int ret;
4009 
4010 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4011 	if (!skb)
4012 		return -ENOMEM;
4013 
4014 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
4015 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
4016 						 sizeof(*cmd));
4017 
4018 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
4019 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
4020 
4021 	cmd->pdev_id = cpu_to_le32(pdev_id);
4022 
4023 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4024 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
4025 
4026 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
4027 	if (ret) {
4028 		ath12k_warn(ar->ab,
4029 			    "failed to send lro cfg req wmi cmd\n");
4030 		goto err;
4031 	}
4032 
4033 	return 0;
4034 err:
4035 	dev_kfree_skb(skb);
4036 	return ret;
4037 }
4038 
4039 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
4040 {
4041 	unsigned long time_left;
4042 
4043 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
4044 						WMI_SERVICE_READY_TIMEOUT_HZ);
4045 	if (!time_left)
4046 		return -ETIMEDOUT;
4047 
4048 	return 0;
4049 }
4050 
4051 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
4052 {
4053 	unsigned long time_left;
4054 
4055 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
4056 						WMI_SERVICE_READY_TIMEOUT_HZ);
4057 	if (!time_left)
4058 		return -ETIMEDOUT;
4059 
4060 	return 0;
4061 }
4062 
4063 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
4064 			   enum wmi_host_hw_mode_config_type mode)
4065 {
4066 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
4067 	struct sk_buff *skb;
4068 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4069 	int len;
4070 	int ret;
4071 
4072 	len = sizeof(*cmd);
4073 
4074 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
4075 	if (!skb)
4076 		return -ENOMEM;
4077 
4078 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
4079 
4080 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
4081 						 sizeof(*cmd));
4082 
4083 	cmd->pdev_id = WMI_PDEV_ID_SOC;
4084 	cmd->hw_mode_index = cpu_to_le32(mode);
4085 
4086 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
4087 	if (ret) {
4088 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
4089 		dev_kfree_skb(skb);
4090 	}
4091 
4092 	return ret;
4093 }
4094 
4095 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
4096 {
4097 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4098 	struct ath12k_wmi_init_cmd_arg arg = {};
4099 
4100 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
4101 		     ab->wmi_ab.svc_map))
4102 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
4103 
4104 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
4105 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
4106 
4107 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
4108 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
4109 	arg.mem_chunks = wmi_ab->mem_chunks;
4110 
4111 	if (ab->hw_params->single_pdev_only)
4112 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
4113 
4114 	arg.num_band_to_mac = ab->num_radios;
4115 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
4116 
4117 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
4118 
4119 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
4120 }
4121 
4122 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
4123 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
4124 {
4125 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
4126 	struct sk_buff *skb;
4127 	int ret;
4128 
4129 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4130 	if (!skb)
4131 		return -ENOMEM;
4132 
4133 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
4134 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
4135 						 sizeof(*cmd));
4136 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
4137 	cmd->scan_count = cpu_to_le32(arg->scan_count);
4138 	cmd->scan_period = cpu_to_le32(arg->scan_period);
4139 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
4140 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
4141 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
4142 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
4143 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
4144 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
4145 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
4146 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
4147 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
4148 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
4149 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
4150 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
4151 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
4152 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
4153 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
4154 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
4155 
4156 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4157 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
4158 		   arg->vdev_id);
4159 
4160 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4161 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4162 	if (ret) {
4163 		ath12k_warn(ar->ab,
4164 			    "failed to send spectral scan config wmi cmd\n");
4165 		goto err;
4166 	}
4167 
4168 	return 0;
4169 err:
4170 	dev_kfree_skb(skb);
4171 	return ret;
4172 }
4173 
4174 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
4175 				    u32 trigger, u32 enable)
4176 {
4177 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
4178 	struct sk_buff *skb;
4179 	int ret;
4180 
4181 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4182 	if (!skb)
4183 		return -ENOMEM;
4184 
4185 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
4186 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
4187 						 sizeof(*cmd));
4188 
4189 	cmd->vdev_id = cpu_to_le32(vdev_id);
4190 	cmd->trigger_cmd = cpu_to_le32(trigger);
4191 	cmd->enable_cmd = cpu_to_le32(enable);
4192 
4193 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4194 		   "WMI spectral enable cmd vdev id 0x%x\n",
4195 		   vdev_id);
4196 
4197 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4198 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4199 	if (ret) {
4200 		ath12k_warn(ar->ab,
4201 			    "failed to send spectral enable wmi cmd\n");
4202 		goto err;
4203 	}
4204 
4205 	return 0;
4206 err:
4207 	dev_kfree_skb(skb);
4208 	return ret;
4209 }
4210 
4211 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
4212 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
4213 {
4214 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4215 	struct sk_buff *skb;
4216 	int ret;
4217 
4218 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4219 	if (!skb)
4220 		return -ENOMEM;
4221 
4222 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4223 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4224 						 sizeof(*cmd));
4225 
4226 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4227 	cmd->module_id = cpu_to_le32(arg->module_id);
4228 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4229 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4230 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4231 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4232 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4233 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4234 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4235 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4236 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4237 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4238 
4239 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4240 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4241 		   arg->pdev_id);
4242 
4243 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4244 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4245 	if (ret) {
4246 		ath12k_warn(ar->ab,
4247 			    "failed to send dma ring cfg req wmi cmd\n");
4248 		goto err;
4249 	}
4250 
4251 	return 0;
4252 err:
4253 	dev_kfree_skb(skb);
4254 	return ret;
4255 }
4256 
4257 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4258 					  u16 tag, u16 len,
4259 					  const void *ptr, void *data)
4260 {
4261 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4262 
4263 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4264 		return -EPROTO;
4265 
4266 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4267 		return -ENOBUFS;
4268 
4269 	arg->num_buf_entry++;
4270 	return 0;
4271 }
4272 
4273 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4274 					 u16 tag, u16 len,
4275 					 const void *ptr, void *data)
4276 {
4277 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4278 
4279 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4280 		return -EPROTO;
4281 
4282 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4283 		return -ENOBUFS;
4284 
4285 	arg->num_meta++;
4286 
4287 	return 0;
4288 }
4289 
4290 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4291 				    u16 tag, u16 len,
4292 				    const void *ptr, void *data)
4293 {
4294 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4295 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4296 	u32 pdev_id;
4297 	int ret;
4298 
4299 	switch (tag) {
4300 	case WMI_TAG_DMA_BUF_RELEASE:
4301 		fixed = ptr;
4302 		arg->fixed = *fixed;
4303 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4304 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4305 		break;
4306 	case WMI_TAG_ARRAY_STRUCT:
4307 		if (!arg->buf_entry_done) {
4308 			arg->num_buf_entry = 0;
4309 			arg->buf_entry = ptr;
4310 
4311 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4312 						  ath12k_wmi_dma_buf_entry_parse,
4313 						  arg);
4314 			if (ret) {
4315 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4316 					    ret);
4317 				return ret;
4318 			}
4319 
4320 			arg->buf_entry_done = true;
4321 		} else if (!arg->meta_data_done) {
4322 			arg->num_meta = 0;
4323 			arg->meta_data = ptr;
4324 
4325 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4326 						  ath12k_wmi_dma_buf_meta_parse,
4327 						  arg);
4328 			if (ret) {
4329 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4330 					    ret);
4331 				return ret;
4332 			}
4333 
4334 			arg->meta_data_done = true;
4335 		}
4336 		break;
4337 	default:
4338 		break;
4339 	}
4340 	return 0;
4341 }
4342 
4343 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4344 						       struct sk_buff *skb)
4345 {
4346 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4347 	struct ath12k_dbring_buf_release_event param;
4348 	int ret;
4349 
4350 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4351 				  ath12k_wmi_dma_buf_parse,
4352 				  &arg);
4353 	if (ret) {
4354 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4355 		return;
4356 	}
4357 
4358 	param.fixed = arg.fixed;
4359 	param.buf_entry = arg.buf_entry;
4360 	param.num_buf_entry = arg.num_buf_entry;
4361 	param.meta_data = arg.meta_data;
4362 	param.num_meta = arg.num_meta;
4363 
4364 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4365 	if (ret) {
4366 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4367 		return;
4368 	}
4369 }
4370 
4371 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4372 					 u16 tag, u16 len,
4373 					 const void *ptr, void *data)
4374 {
4375 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4376 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4377 	u32 phy_map = 0;
4378 
4379 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4380 		return -EPROTO;
4381 
4382 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4383 		return -ENOBUFS;
4384 
4385 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4386 				   hw_mode_id);
4387 	svc_rdy_ext->n_hw_mode_caps++;
4388 
4389 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4390 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4391 
4392 	return 0;
4393 }
4394 
4395 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4396 				   u16 len, const void *ptr, void *data)
4397 {
4398 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4399 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4400 	enum wmi_host_hw_mode_config_type mode, pref;
4401 	u32 i;
4402 	int ret;
4403 
4404 	svc_rdy_ext->n_hw_mode_caps = 0;
4405 	svc_rdy_ext->hw_mode_caps = ptr;
4406 
4407 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4408 				  ath12k_wmi_hw_mode_caps_parse,
4409 				  svc_rdy_ext);
4410 	if (ret) {
4411 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4412 		return ret;
4413 	}
4414 
4415 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4416 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4417 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4418 
4419 		if (mode >= WMI_HOST_HW_MODE_MAX)
4420 			continue;
4421 
4422 		pref = soc->wmi_ab.preferred_hw_mode;
4423 
4424 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4425 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4426 			soc->wmi_ab.preferred_hw_mode = mode;
4427 		}
4428 	}
4429 
4430 	ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
4431 		   soc->wmi_ab.preferred_hw_mode);
4432 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4433 		return -EINVAL;
4434 
4435 	return 0;
4436 }
4437 
4438 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4439 					 u16 tag, u16 len,
4440 					 const void *ptr, void *data)
4441 {
4442 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4443 
4444 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4445 		return -EPROTO;
4446 
4447 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4448 		return -ENOBUFS;
4449 
4450 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4451 	if (!svc_rdy_ext->n_mac_phy_caps) {
4452 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4453 						    GFP_ATOMIC);
4454 		if (!svc_rdy_ext->mac_phy_caps)
4455 			return -ENOMEM;
4456 	}
4457 
4458 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4459 	svc_rdy_ext->n_mac_phy_caps++;
4460 	return 0;
4461 }
4462 
4463 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4464 					     u16 tag, u16 len,
4465 					     const void *ptr, void *data)
4466 {
4467 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4468 
4469 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4470 		return -EPROTO;
4471 
4472 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4473 		return -ENOBUFS;
4474 
4475 	svc_rdy_ext->n_ext_hal_reg_caps++;
4476 	return 0;
4477 }
4478 
4479 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4480 				       u16 len, const void *ptr, void *data)
4481 {
4482 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4483 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4484 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4485 	int ret;
4486 	u32 i;
4487 
4488 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4489 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4490 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4491 				  ath12k_wmi_ext_hal_reg_caps_parse,
4492 				  svc_rdy_ext);
4493 	if (ret) {
4494 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4495 		return ret;
4496 	}
4497 
4498 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4499 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4500 						      svc_rdy_ext->soc_hal_reg_caps,
4501 						      svc_rdy_ext->ext_hal_reg_caps, i,
4502 						      &reg_cap);
4503 		if (ret) {
4504 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4505 			return ret;
4506 		}
4507 
4508 		if (reg_cap.phy_id >= MAX_RADIOS) {
4509 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4510 			return -EINVAL;
4511 		}
4512 
4513 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4514 	}
4515 	return 0;
4516 }
4517 
4518 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4519 						 u16 len, const void *ptr,
4520 						 void *data)
4521 {
4522 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4523 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4524 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4525 	u32 phy_id_map;
4526 	int pdev_index = 0;
4527 	int ret;
4528 
4529 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4530 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4531 
4532 	soc->num_radios = 0;
4533 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4534 	soc->fw_pdev_count = 0;
4535 
4536 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4537 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4538 							    svc_rdy_ext,
4539 							    hw_mode_id, soc->num_radios,
4540 							    &soc->pdevs[pdev_index]);
4541 		if (ret) {
4542 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4543 				    soc->num_radios);
4544 			return ret;
4545 		}
4546 
4547 		soc->num_radios++;
4548 
4549 		/* For single_pdev_only targets,
4550 		 * save mac_phy capability in the same pdev
4551 		 */
4552 		if (soc->hw_params->single_pdev_only)
4553 			pdev_index = 0;
4554 		else
4555 			pdev_index = soc->num_radios;
4556 
4557 		/* TODO: mac_phy_cap prints */
4558 		phy_id_map >>= 1;
4559 	}
4560 
4561 	if (soc->hw_params->single_pdev_only) {
4562 		soc->num_radios = 1;
4563 		soc->pdevs[0].pdev_id = 0;
4564 	}
4565 
4566 	return 0;
4567 }
4568 
4569 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4570 					  u16 tag, u16 len,
4571 					  const void *ptr, void *data)
4572 {
4573 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4574 
4575 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4576 		return -EPROTO;
4577 
4578 	parse->n_dma_ring_caps++;
4579 	return 0;
4580 }
4581 
4582 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4583 					u32 num_cap)
4584 {
4585 	size_t sz;
4586 	void *ptr;
4587 
4588 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4589 	ptr = kzalloc(sz, GFP_ATOMIC);
4590 	if (!ptr)
4591 		return -ENOMEM;
4592 
4593 	ab->db_caps = ptr;
4594 	ab->num_db_cap = num_cap;
4595 
4596 	return 0;
4597 }
4598 
4599 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4600 {
4601 	kfree(ab->db_caps);
4602 	ab->db_caps = NULL;
4603 	ab->num_db_cap = 0;
4604 }
4605 
4606 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4607 				    u16 len, const void *ptr, void *data)
4608 {
4609 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4610 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4611 	struct ath12k_dbring_cap *dir_buff_caps;
4612 	int ret;
4613 	u32 i;
4614 
4615 	dma_caps_parse->n_dma_ring_caps = 0;
4616 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4617 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4618 				  ath12k_wmi_dma_ring_caps_parse,
4619 				  dma_caps_parse);
4620 	if (ret) {
4621 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4622 		return ret;
4623 	}
4624 
4625 	if (!dma_caps_parse->n_dma_ring_caps)
4626 		return 0;
4627 
4628 	if (ab->num_db_cap) {
4629 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4630 		return 0;
4631 	}
4632 
4633 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4634 	if (ret)
4635 		return ret;
4636 
4637 	dir_buff_caps = ab->db_caps;
4638 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4639 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4640 			ath12k_warn(ab, "Invalid module id %d\n",
4641 				    le32_to_cpu(dma_caps[i].module_id));
4642 			ret = -EINVAL;
4643 			goto free_dir_buff;
4644 		}
4645 
4646 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4647 		dir_buff_caps[i].pdev_id =
4648 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4649 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4650 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4651 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4652 	}
4653 
4654 	return 0;
4655 
4656 free_dir_buff:
4657 	ath12k_wmi_free_dbring_caps(ab);
4658 	return ret;
4659 }
4660 
4661 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4662 					u16 tag, u16 len,
4663 					const void *ptr, void *data)
4664 {
4665 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4666 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4667 	int ret;
4668 
4669 	switch (tag) {
4670 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4671 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4672 						&svc_rdy_ext->arg);
4673 		if (ret) {
4674 			ath12k_warn(ab, "unable to extract ext params\n");
4675 			return ret;
4676 		}
4677 		break;
4678 
4679 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4680 		svc_rdy_ext->hw_caps = ptr;
4681 		svc_rdy_ext->arg.num_hw_modes =
4682 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4683 		break;
4684 
4685 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4686 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4687 							    svc_rdy_ext);
4688 		if (ret)
4689 			return ret;
4690 		break;
4691 
4692 	case WMI_TAG_ARRAY_STRUCT:
4693 		if (!svc_rdy_ext->hw_mode_done) {
4694 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4695 			if (ret)
4696 				return ret;
4697 
4698 			svc_rdy_ext->hw_mode_done = true;
4699 		} else if (!svc_rdy_ext->mac_phy_done) {
4700 			svc_rdy_ext->n_mac_phy_caps = 0;
4701 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4702 						  ath12k_wmi_mac_phy_caps_parse,
4703 						  svc_rdy_ext);
4704 			if (ret) {
4705 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4706 				return ret;
4707 			}
4708 
4709 			svc_rdy_ext->mac_phy_done = true;
4710 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4711 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4712 			if (ret)
4713 				return ret;
4714 
4715 			svc_rdy_ext->ext_hal_reg_done = true;
4716 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4717 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4718 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4719 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4720 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4721 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4722 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4723 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4724 						       &svc_rdy_ext->dma_caps_parse);
4725 			if (ret)
4726 				return ret;
4727 
4728 			svc_rdy_ext->dma_ring_cap_done = true;
4729 		}
4730 		break;
4731 
4732 	default:
4733 		break;
4734 	}
4735 	return 0;
4736 }
4737 
4738 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4739 					  struct sk_buff *skb)
4740 {
4741 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4742 	int ret;
4743 
4744 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4745 				  ath12k_wmi_svc_rdy_ext_parse,
4746 				  &svc_rdy_ext);
4747 	if (ret) {
4748 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4749 		goto err;
4750 	}
4751 
4752 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4753 		complete(&ab->wmi_ab.service_ready);
4754 
4755 	kfree(svc_rdy_ext.mac_phy_caps);
4756 	return 0;
4757 
4758 err:
4759 	kfree(svc_rdy_ext.mac_phy_caps);
4760 	ath12k_wmi_free_dbring_caps(ab);
4761 	return ret;
4762 }
4763 
4764 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4765 				      const void *ptr,
4766 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4767 {
4768 	const struct wmi_service_ready_ext2_event *ev = ptr;
4769 
4770 	if (!ev)
4771 		return -EINVAL;
4772 
4773 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4774 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4775 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4776 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4777 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4778 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4779 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4780 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4781 	return 0;
4782 }
4783 
4784 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4785 				      const __le32 cap_mac_info[],
4786 				      const __le32 cap_phy_info[],
4787 				      const __le32 supp_mcs[],
4788 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4789 				       __le32 cap_info_internal)
4790 {
4791 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4792 	u32 support_320mhz;
4793 	u8 i;
4794 
4795 	if (band == NL80211_BAND_6GHZ)
4796 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4797 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4798 
4799 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4800 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4801 
4802 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4803 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4804 
4805 	if (band == NL80211_BAND_6GHZ)
4806 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4807 
4808 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4809 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4810 	if (band != NL80211_BAND_2GHZ) {
4811 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4812 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4813 	}
4814 
4815 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4816 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4817 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4818 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4819 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4820 
4821 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4822 }
4823 
4824 static int
4825 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4826 				      const struct ath12k_wmi_caps_ext_params *caps,
4827 				      struct ath12k_pdev *pdev)
4828 {
4829 	struct ath12k_band_cap *cap_band;
4830 	u32 bands, support_320mhz;
4831 	int i;
4832 
4833 	if (ab->hw_params->single_pdev_only) {
4834 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4835 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4836 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4837 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4838 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4839 			return 0;
4840 		}
4841 
4842 		for (i = 0; i < ab->fw_pdev_count; i++) {
4843 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4844 
4845 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4846 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4847 				bands = fw_pdev->supported_bands;
4848 				break;
4849 			}
4850 		}
4851 
4852 		if (i == ab->fw_pdev_count)
4853 			return -EINVAL;
4854 	} else {
4855 		bands = pdev->cap.supported_bands;
4856 	}
4857 
4858 	if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
4859 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4860 					  caps->eht_cap_mac_info_2ghz,
4861 					  caps->eht_cap_phy_info_2ghz,
4862 					  caps->eht_supp_mcs_ext_2ghz,
4863 					  &caps->eht_ppet_2ghz,
4864 					  caps->eht_cap_info_internal);
4865 	}
4866 
4867 	if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
4868 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4869 					  caps->eht_cap_mac_info_5ghz,
4870 					  caps->eht_cap_phy_info_5ghz,
4871 					  caps->eht_supp_mcs_ext_5ghz,
4872 					  &caps->eht_ppet_5ghz,
4873 					  caps->eht_cap_info_internal);
4874 
4875 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4876 					  caps->eht_cap_mac_info_5ghz,
4877 					  caps->eht_cap_phy_info_5ghz,
4878 					  caps->eht_supp_mcs_ext_5ghz,
4879 					  &caps->eht_ppet_5ghz,
4880 					  caps->eht_cap_info_internal);
4881 	}
4882 
4883 	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
4884 	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
4885 
4886 	return 0;
4887 }
4888 
4889 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4890 					   u16 len, const void *ptr,
4891 					   void *data)
4892 {
4893 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4894 	int i = 0, ret;
4895 
4896 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4897 		return -EPROTO;
4898 
4899 	if (ab->hw_params->single_pdev_only) {
4900 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4901 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4902 			return 0;
4903 	} else {
4904 		for (i = 0; i < ab->num_radios; i++) {
4905 			if (ab->pdevs[i].pdev_id ==
4906 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4907 				break;
4908 		}
4909 
4910 		if (i == ab->num_radios)
4911 			return -EINVAL;
4912 	}
4913 
4914 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4915 	if (ret) {
4916 		ath12k_warn(ab,
4917 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4918 			    ret, ab->pdevs[i].pdev_id);
4919 		return ret;
4920 	}
4921 
4922 	return 0;
4923 }
4924 
4925 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4926 					 u16 tag, u16 len,
4927 					 const void *ptr, void *data)
4928 {
4929 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4930 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4931 	int ret;
4932 
4933 	switch (tag) {
4934 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4935 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4936 						 &parse->arg);
4937 		if (ret) {
4938 			ath12k_warn(ab,
4939 				    "failed to extract wmi service ready ext2 parameters: %d\n",
4940 				    ret);
4941 			return ret;
4942 		}
4943 		break;
4944 
4945 	case WMI_TAG_ARRAY_STRUCT:
4946 		if (!parse->dma_ring_cap_done) {
4947 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4948 						       &parse->dma_caps_parse);
4949 			if (ret)
4950 				return ret;
4951 
4952 			parse->dma_ring_cap_done = true;
4953 		} else if (!parse->spectral_bin_scaling_done) {
4954 			/* TODO: This is a place-holder as WMI tag for
4955 			 * spectral scaling is before
4956 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4957 			 */
4958 			parse->spectral_bin_scaling_done = true;
4959 		} else if (!parse->mac_phy_caps_ext_done) {
4960 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4961 						  ath12k_wmi_tlv_mac_phy_caps_ext,
4962 						  parse);
4963 			if (ret) {
4964 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4965 					    ret);
4966 				return ret;
4967 			}
4968 
4969 			parse->mac_phy_caps_ext_done = true;
4970 		}
4971 		break;
4972 	default:
4973 		break;
4974 	}
4975 
4976 	return 0;
4977 }
4978 
4979 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4980 					   struct sk_buff *skb)
4981 {
4982 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4983 	int ret;
4984 
4985 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4986 				  ath12k_wmi_svc_rdy_ext2_parse,
4987 				  &svc_rdy_ext2);
4988 	if (ret) {
4989 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4990 		goto err;
4991 	}
4992 
4993 	complete(&ab->wmi_ab.service_ready);
4994 
4995 	return 0;
4996 
4997 err:
4998 	ath12k_wmi_free_dbring_caps(ab);
4999 	return ret;
5000 }
5001 
5002 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5003 					   struct wmi_vdev_start_resp_event *vdev_rsp)
5004 {
5005 	const void **tb;
5006 	const struct wmi_vdev_start_resp_event *ev;
5007 	int ret;
5008 
5009 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5010 	if (IS_ERR(tb)) {
5011 		ret = PTR_ERR(tb);
5012 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5013 		return ret;
5014 	}
5015 
5016 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
5017 	if (!ev) {
5018 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
5019 		kfree(tb);
5020 		return -EPROTO;
5021 	}
5022 
5023 	*vdev_rsp = *ev;
5024 
5025 	kfree(tb);
5026 	return 0;
5027 }
5028 
5029 static struct ath12k_reg_rule
5030 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
5031 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
5032 {
5033 	struct ath12k_reg_rule *reg_rule_ptr;
5034 	u32 count;
5035 
5036 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
5037 			       GFP_ATOMIC);
5038 
5039 	if (!reg_rule_ptr)
5040 		return NULL;
5041 
5042 	for (count = 0; count < num_reg_rules; count++) {
5043 		reg_rule_ptr[count].start_freq =
5044 			le32_get_bits(wmi_reg_rule[count].freq_info,
5045 				      REG_RULE_START_FREQ);
5046 		reg_rule_ptr[count].end_freq =
5047 			le32_get_bits(wmi_reg_rule[count].freq_info,
5048 				      REG_RULE_END_FREQ);
5049 		reg_rule_ptr[count].max_bw =
5050 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5051 				      REG_RULE_MAX_BW);
5052 		reg_rule_ptr[count].reg_power =
5053 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5054 				      REG_RULE_REG_PWR);
5055 		reg_rule_ptr[count].ant_gain =
5056 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5057 				      REG_RULE_ANT_GAIN);
5058 		reg_rule_ptr[count].flags =
5059 			le32_get_bits(wmi_reg_rule[count].flag_info,
5060 				      REG_RULE_FLAGS);
5061 		reg_rule_ptr[count].psd_flag =
5062 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5063 				      REG_RULE_PSD_INFO);
5064 		reg_rule_ptr[count].psd_eirp =
5065 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5066 				      REG_RULE_PSD_EIRP);
5067 	}
5068 
5069 	return reg_rule_ptr;
5070 }
5071 
5072 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
5073 					    u32 num_reg_rules)
5074 {
5075 	u8 num_invalid_5ghz_rules = 0;
5076 	u32 count, start_freq;
5077 
5078 	for (count = 0; count < num_reg_rules; count++) {
5079 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
5080 
5081 		if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
5082 			num_invalid_5ghz_rules++;
5083 	}
5084 
5085 	return num_invalid_5ghz_rules;
5086 }
5087 
5088 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
5089 						   struct sk_buff *skb,
5090 						   struct ath12k_reg_info *reg_info)
5091 {
5092 	const void **tb;
5093 	const struct wmi_reg_chan_list_cc_ext_event *ev;
5094 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
5095 	u32 num_2g_reg_rules, num_5g_reg_rules;
5096 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
5097 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
5098 	u8 num_invalid_5ghz_ext_rules;
5099 	u32 total_reg_rules = 0;
5100 	int ret, i, j;
5101 
5102 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
5103 
5104 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5105 	if (IS_ERR(tb)) {
5106 		ret = PTR_ERR(tb);
5107 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5108 		return ret;
5109 	}
5110 
5111 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
5112 	if (!ev) {
5113 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
5114 		kfree(tb);
5115 		return -EPROTO;
5116 	}
5117 
5118 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
5119 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
5120 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
5121 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
5122 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
5123 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
5124 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
5125 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
5126 
5127 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5128 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5129 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
5130 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5131 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
5132 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5133 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
5134 	}
5135 
5136 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
5137 	total_reg_rules += num_2g_reg_rules;
5138 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
5139 	total_reg_rules += num_5g_reg_rules;
5140 
5141 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
5142 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
5143 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
5144 		kfree(tb);
5145 		return -EINVAL;
5146 	}
5147 
5148 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5149 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
5150 
5151 		if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
5152 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
5153 				    i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
5154 			kfree(tb);
5155 			return -EINVAL;
5156 		}
5157 
5158 		total_reg_rules += num_6g_reg_rules_ap[i];
5159 	}
5160 
5161 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5162 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5163 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5164 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5165 
5166 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5167 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5168 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5169 
5170 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5171 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5172 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5173 
5174 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
5175 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
5176 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6GHZ_REG_RULES) {
5177 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
5178 				    i);
5179 			kfree(tb);
5180 			return -EINVAL;
5181 		}
5182 	}
5183 
5184 	if (!total_reg_rules) {
5185 		ath12k_warn(ab, "No reg rules available\n");
5186 		kfree(tb);
5187 		return -EINVAL;
5188 	}
5189 
5190 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
5191 
5192 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
5193 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
5194 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
5195 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
5196 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
5197 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
5198 
5199 	switch (le32_to_cpu(ev->status_code)) {
5200 	case WMI_REG_SET_CC_STATUS_PASS:
5201 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
5202 		break;
5203 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
5204 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
5205 		break;
5206 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
5207 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
5208 		break;
5209 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
5210 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
5211 		break;
5212 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
5213 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
5214 		break;
5215 	case WMI_REG_SET_CC_STATUS_FAIL:
5216 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
5217 		break;
5218 	}
5219 
5220 	reg_info->is_ext_reg_event = true;
5221 
5222 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
5223 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
5224 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
5225 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
5226 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
5227 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
5228 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
5229 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
5230 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
5231 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
5232 
5233 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5234 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5235 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
5236 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5237 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
5238 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5239 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
5240 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5241 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
5242 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
5243 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
5244 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
5245 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
5246 	}
5247 
5248 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5249 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
5250 		   __func__, reg_info->alpha2, reg_info->dfs_region,
5251 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
5252 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
5253 		   reg_info->phybitmap);
5254 
5255 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5256 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
5257 		   num_2g_reg_rules, num_5g_reg_rules);
5258 
5259 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5260 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
5261 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
5262 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
5263 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
5264 
5265 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5266 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5267 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
5268 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
5269 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
5270 
5271 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5272 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5273 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
5274 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
5275 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
5276 
5277 	ext_wmi_reg_rule =
5278 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
5279 			+ sizeof(*ev)
5280 			+ sizeof(struct wmi_tlv));
5281 
5282 	if (num_2g_reg_rules) {
5283 		reg_info->reg_rules_2g_ptr =
5284 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
5285 						      ext_wmi_reg_rule);
5286 
5287 		if (!reg_info->reg_rules_2g_ptr) {
5288 			kfree(tb);
5289 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
5290 			return -ENOMEM;
5291 		}
5292 	}
5293 
5294 	ext_wmi_reg_rule += num_2g_reg_rules;
5295 
5296 	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
5297 	 * for few countries along with separate 6 GHz rule.
5298 	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
5299 	 * causes intersect check to be true, and same rules will be
5300 	 * shown multiple times in iw cmd.
5301 	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
5302 	 */
5303 	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
5304 								       num_5g_reg_rules);
5305 
5306 	if (num_invalid_5ghz_ext_rules) {
5307 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5308 			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
5309 			   reg_info->alpha2, reg_info->num_5g_reg_rules,
5310 			   num_invalid_5ghz_ext_rules);
5311 
5312 		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
5313 		reg_info->num_5g_reg_rules = num_5g_reg_rules;
5314 	}
5315 
5316 	if (num_5g_reg_rules) {
5317 		reg_info->reg_rules_5g_ptr =
5318 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
5319 						      ext_wmi_reg_rule);
5320 
5321 		if (!reg_info->reg_rules_5g_ptr) {
5322 			kfree(tb);
5323 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5324 			return -ENOMEM;
5325 		}
5326 	}
5327 
5328 	/* We have adjusted the number of 5 GHz reg rules above. But still those
5329 	 * many rules needs to be adjusted in ext_wmi_reg_rule.
5330 	 *
5331 	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
5332 	 */
5333 	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
5334 
5335 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5336 		reg_info->reg_rules_6g_ap_ptr[i] =
5337 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
5338 						      ext_wmi_reg_rule);
5339 
5340 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
5341 			kfree(tb);
5342 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
5343 			return -ENOMEM;
5344 		}
5345 
5346 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
5347 	}
5348 
5349 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
5350 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5351 			reg_info->reg_rules_6g_client_ptr[j][i] =
5352 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
5353 							      ext_wmi_reg_rule);
5354 
5355 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
5356 				kfree(tb);
5357 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
5358 				return -ENOMEM;
5359 			}
5360 
5361 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
5362 		}
5363 	}
5364 
5365 	reg_info->client_type = le32_to_cpu(ev->client_type);
5366 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
5367 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
5368 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
5369 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
5370 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
5371 		le32_to_cpu(ev->domain_code_6g_ap_sp);
5372 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
5373 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
5374 
5375 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5376 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5377 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5378 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5379 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5380 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5381 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5382 	}
5383 
5384 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5385 
5386 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5387 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
5388 
5389 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5390 
5391 	kfree(tb);
5392 	return 0;
5393 }
5394 
5395 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5396 					struct wmi_peer_delete_resp_event *peer_del_resp)
5397 {
5398 	const void **tb;
5399 	const struct wmi_peer_delete_resp_event *ev;
5400 	int ret;
5401 
5402 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5403 	if (IS_ERR(tb)) {
5404 		ret = PTR_ERR(tb);
5405 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5406 		return ret;
5407 	}
5408 
5409 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5410 	if (!ev) {
5411 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
5412 		kfree(tb);
5413 		return -EPROTO;
5414 	}
5415 
5416 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5417 
5418 	peer_del_resp->vdev_id = ev->vdev_id;
5419 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5420 			ev->peer_macaddr.addr);
5421 
5422 	kfree(tb);
5423 	return 0;
5424 }
5425 
5426 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5427 					struct sk_buff *skb,
5428 					u32 *vdev_id)
5429 {
5430 	const void **tb;
5431 	const struct wmi_vdev_delete_resp_event *ev;
5432 	int ret;
5433 
5434 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5435 	if (IS_ERR(tb)) {
5436 		ret = PTR_ERR(tb);
5437 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5438 		return ret;
5439 	}
5440 
5441 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5442 	if (!ev) {
5443 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5444 		kfree(tb);
5445 		return -EPROTO;
5446 	}
5447 
5448 	*vdev_id = le32_to_cpu(ev->vdev_id);
5449 
5450 	kfree(tb);
5451 	return 0;
5452 }
5453 
5454 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5455 					struct sk_buff *skb,
5456 					u32 *vdev_id, u32 *tx_status)
5457 {
5458 	const void **tb;
5459 	const struct wmi_bcn_tx_status_event *ev;
5460 	int ret;
5461 
5462 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5463 	if (IS_ERR(tb)) {
5464 		ret = PTR_ERR(tb);
5465 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5466 		return ret;
5467 	}
5468 
5469 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
5470 	if (!ev) {
5471 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
5472 		kfree(tb);
5473 		return -EPROTO;
5474 	}
5475 
5476 	*vdev_id = le32_to_cpu(ev->vdev_id);
5477 	*tx_status = le32_to_cpu(ev->tx_status);
5478 
5479 	kfree(tb);
5480 	return 0;
5481 }
5482 
5483 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5484 					      u32 *vdev_id)
5485 {
5486 	const void **tb;
5487 	const struct wmi_vdev_stopped_event *ev;
5488 	int ret;
5489 
5490 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5491 	if (IS_ERR(tb)) {
5492 		ret = PTR_ERR(tb);
5493 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5494 		return ret;
5495 	}
5496 
5497 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
5498 	if (!ev) {
5499 		ath12k_warn(ab, "failed to fetch vdev stop ev");
5500 		kfree(tb);
5501 		return -EPROTO;
5502 	}
5503 
5504 	*vdev_id = le32_to_cpu(ev->vdev_id);
5505 
5506 	kfree(tb);
5507 	return 0;
5508 }
5509 
5510 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
5511 					u16 tag, u16 len,
5512 					const void *ptr, void *data)
5513 {
5514 	struct wmi_tlv_mgmt_rx_parse *parse = data;
5515 
5516 	switch (tag) {
5517 	case WMI_TAG_MGMT_RX_HDR:
5518 		parse->fixed = ptr;
5519 		break;
5520 	case WMI_TAG_ARRAY_BYTE:
5521 		if (!parse->frame_buf_done) {
5522 			parse->frame_buf = ptr;
5523 			parse->frame_buf_done = true;
5524 		}
5525 		break;
5526 	}
5527 	return 0;
5528 }
5529 
5530 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
5531 					  struct sk_buff *skb,
5532 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
5533 {
5534 	struct wmi_tlv_mgmt_rx_parse parse = { };
5535 	const struct ath12k_wmi_mgmt_rx_params *ev;
5536 	const u8 *frame;
5537 	int i, ret;
5538 
5539 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5540 				  ath12k_wmi_tlv_mgmt_rx_parse,
5541 				  &parse);
5542 	if (ret) {
5543 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
5544 		return ret;
5545 	}
5546 
5547 	ev = parse.fixed;
5548 	frame = parse.frame_buf;
5549 
5550 	if (!ev || !frame) {
5551 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
5552 		return -EPROTO;
5553 	}
5554 
5555 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
5556 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
5557 	hdr->channel = le32_to_cpu(ev->channel);
5558 	hdr->snr = le32_to_cpu(ev->snr);
5559 	hdr->rate = le32_to_cpu(ev->rate);
5560 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
5561 	hdr->buf_len = le32_to_cpu(ev->buf_len);
5562 	hdr->status = le32_to_cpu(ev->status);
5563 	hdr->flags = le32_to_cpu(ev->flags);
5564 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
5565 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
5566 
5567 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
5568 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
5569 
5570 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
5571 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
5572 		return -EPROTO;
5573 	}
5574 
5575 	/* shift the sk_buff to point to `frame` */
5576 	skb_trim(skb, 0);
5577 	skb_put(skb, frame - skb->data);
5578 	skb_pull(skb, frame - skb->data);
5579 	skb_put(skb, hdr->buf_len);
5580 
5581 	return 0;
5582 }
5583 
5584 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
5585 				    u32 status)
5586 {
5587 	struct sk_buff *msdu;
5588 	struct ieee80211_tx_info *info;
5589 	struct ath12k_skb_cb *skb_cb;
5590 	int num_mgmt;
5591 
5592 	spin_lock_bh(&ar->txmgmt_idr_lock);
5593 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
5594 
5595 	if (!msdu) {
5596 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
5597 			    desc_id);
5598 		spin_unlock_bh(&ar->txmgmt_idr_lock);
5599 		return -ENOENT;
5600 	}
5601 
5602 	idr_remove(&ar->txmgmt_idr, desc_id);
5603 	spin_unlock_bh(&ar->txmgmt_idr_lock);
5604 
5605 	skb_cb = ATH12K_SKB_CB(msdu);
5606 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
5607 
5608 	info = IEEE80211_SKB_CB(msdu);
5609 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
5610 		info->flags |= IEEE80211_TX_STAT_ACK;
5611 
5612 	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
5613 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
5614 
5615 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
5616 
5617 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5618 
5619 	/* WARN when we received this event without doing any mgmt tx */
5620 	if (num_mgmt < 0)
5621 		WARN_ON_ONCE(1);
5622 
5623 	if (!num_mgmt)
5624 		wake_up(&ar->txmgmt_empty_waitq);
5625 
5626 	return 0;
5627 }
5628 
5629 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
5630 					       struct sk_buff *skb,
5631 					       struct wmi_mgmt_tx_compl_event *param)
5632 {
5633 	const void **tb;
5634 	const struct wmi_mgmt_tx_compl_event *ev;
5635 	int ret;
5636 
5637 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5638 	if (IS_ERR(tb)) {
5639 		ret = PTR_ERR(tb);
5640 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5641 		return ret;
5642 	}
5643 
5644 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
5645 	if (!ev) {
5646 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
5647 		kfree(tb);
5648 		return -EPROTO;
5649 	}
5650 
5651 	param->pdev_id = ev->pdev_id;
5652 	param->desc_id = ev->desc_id;
5653 	param->status = ev->status;
5654 
5655 	kfree(tb);
5656 	return 0;
5657 }
5658 
5659 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
5660 {
5661 	lockdep_assert_held(&ar->data_lock);
5662 
5663 	switch (ar->scan.state) {
5664 	case ATH12K_SCAN_IDLE:
5665 	case ATH12K_SCAN_RUNNING:
5666 	case ATH12K_SCAN_ABORTING:
5667 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5668 			    ath12k_scan_state_str(ar->scan.state),
5669 			    ar->scan.state);
5670 		break;
5671 	case ATH12K_SCAN_STARTING:
5672 		ar->scan.state = ATH12K_SCAN_RUNNING;
5673 
5674 		if (ar->scan.is_roc)
5675 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
5676 
5677 		complete(&ar->scan.started);
5678 		break;
5679 	}
5680 }
5681 
5682 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5683 {
5684 	lockdep_assert_held(&ar->data_lock);
5685 
5686 	switch (ar->scan.state) {
5687 	case ATH12K_SCAN_IDLE:
5688 	case ATH12K_SCAN_RUNNING:
5689 	case ATH12K_SCAN_ABORTING:
5690 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5691 			    ath12k_scan_state_str(ar->scan.state),
5692 			    ar->scan.state);
5693 		break;
5694 	case ATH12K_SCAN_STARTING:
5695 		complete(&ar->scan.started);
5696 		__ath12k_mac_scan_finish(ar);
5697 		break;
5698 	}
5699 }
5700 
5701 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5702 {
5703 	lockdep_assert_held(&ar->data_lock);
5704 
5705 	switch (ar->scan.state) {
5706 	case ATH12K_SCAN_IDLE:
5707 	case ATH12K_SCAN_STARTING:
5708 		/* One suspected reason scan can be completed while starting is
5709 		 * if firmware fails to deliver all scan events to the host,
5710 		 * e.g. when transport pipe is full. This has been observed
5711 		 * with spectral scan phyerr events starving wmi transport
5712 		 * pipe. In such case the "scan completed" event should be (and
5713 		 * is) ignored by the host as it may be just firmware's scan
5714 		 * state machine recovering.
5715 		 */
5716 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5717 			    ath12k_scan_state_str(ar->scan.state),
5718 			    ar->scan.state);
5719 		break;
5720 	case ATH12K_SCAN_RUNNING:
5721 	case ATH12K_SCAN_ABORTING:
5722 		__ath12k_mac_scan_finish(ar);
5723 		break;
5724 	}
5725 }
5726 
5727 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5728 {
5729 	lockdep_assert_held(&ar->data_lock);
5730 
5731 	switch (ar->scan.state) {
5732 	case ATH12K_SCAN_IDLE:
5733 	case ATH12K_SCAN_STARTING:
5734 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5735 			    ath12k_scan_state_str(ar->scan.state),
5736 			    ar->scan.state);
5737 		break;
5738 	case ATH12K_SCAN_RUNNING:
5739 	case ATH12K_SCAN_ABORTING:
5740 		ar->scan_channel = NULL;
5741 		break;
5742 	}
5743 }
5744 
5745 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5746 {
5747 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5748 
5749 	lockdep_assert_held(&ar->data_lock);
5750 
5751 	switch (ar->scan.state) {
5752 	case ATH12K_SCAN_IDLE:
5753 	case ATH12K_SCAN_STARTING:
5754 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5755 			    ath12k_scan_state_str(ar->scan.state),
5756 			    ar->scan.state);
5757 		break;
5758 	case ATH12K_SCAN_RUNNING:
5759 	case ATH12K_SCAN_ABORTING:
5760 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
5761 
5762 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
5763 			complete(&ar->scan.on_channel);
5764 
5765 		break;
5766 	}
5767 }
5768 
5769 static const char *
5770 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5771 			       enum wmi_scan_completion_reason reason)
5772 {
5773 	switch (type) {
5774 	case WMI_SCAN_EVENT_STARTED:
5775 		return "started";
5776 	case WMI_SCAN_EVENT_COMPLETED:
5777 		switch (reason) {
5778 		case WMI_SCAN_REASON_COMPLETED:
5779 			return "completed";
5780 		case WMI_SCAN_REASON_CANCELLED:
5781 			return "completed [cancelled]";
5782 		case WMI_SCAN_REASON_PREEMPTED:
5783 			return "completed [preempted]";
5784 		case WMI_SCAN_REASON_TIMEDOUT:
5785 			return "completed [timedout]";
5786 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
5787 			return "completed [internal err]";
5788 		case WMI_SCAN_REASON_MAX:
5789 			break;
5790 		}
5791 		return "completed [unknown]";
5792 	case WMI_SCAN_EVENT_BSS_CHANNEL:
5793 		return "bss channel";
5794 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5795 		return "foreign channel";
5796 	case WMI_SCAN_EVENT_DEQUEUED:
5797 		return "dequeued";
5798 	case WMI_SCAN_EVENT_PREEMPTED:
5799 		return "preempted";
5800 	case WMI_SCAN_EVENT_START_FAILED:
5801 		return "start failed";
5802 	case WMI_SCAN_EVENT_RESTARTED:
5803 		return "restarted";
5804 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5805 		return "foreign channel exit";
5806 	default:
5807 		return "unknown";
5808 	}
5809 }
5810 
5811 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5812 			       struct wmi_scan_event *scan_evt_param)
5813 {
5814 	const void **tb;
5815 	const struct wmi_scan_event *ev;
5816 	int ret;
5817 
5818 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5819 	if (IS_ERR(tb)) {
5820 		ret = PTR_ERR(tb);
5821 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5822 		return ret;
5823 	}
5824 
5825 	ev = tb[WMI_TAG_SCAN_EVENT];
5826 	if (!ev) {
5827 		ath12k_warn(ab, "failed to fetch scan ev");
5828 		kfree(tb);
5829 		return -EPROTO;
5830 	}
5831 
5832 	scan_evt_param->event_type = ev->event_type;
5833 	scan_evt_param->reason = ev->reason;
5834 	scan_evt_param->channel_freq = ev->channel_freq;
5835 	scan_evt_param->scan_req_id = ev->scan_req_id;
5836 	scan_evt_param->scan_id = ev->scan_id;
5837 	scan_evt_param->vdev_id = ev->vdev_id;
5838 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5839 
5840 	kfree(tb);
5841 	return 0;
5842 }
5843 
5844 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5845 					   struct wmi_peer_sta_kickout_arg *arg)
5846 {
5847 	const void **tb;
5848 	const struct wmi_peer_sta_kickout_event *ev;
5849 	int ret;
5850 
5851 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5852 	if (IS_ERR(tb)) {
5853 		ret = PTR_ERR(tb);
5854 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5855 		return ret;
5856 	}
5857 
5858 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5859 	if (!ev) {
5860 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5861 		kfree(tb);
5862 		return -EPROTO;
5863 	}
5864 
5865 	arg->mac_addr = ev->peer_macaddr.addr;
5866 
5867 	kfree(tb);
5868 	return 0;
5869 }
5870 
5871 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5872 			       struct wmi_roam_event *roam_ev)
5873 {
5874 	const void **tb;
5875 	const struct wmi_roam_event *ev;
5876 	int ret;
5877 
5878 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5879 	if (IS_ERR(tb)) {
5880 		ret = PTR_ERR(tb);
5881 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5882 		return ret;
5883 	}
5884 
5885 	ev = tb[WMI_TAG_ROAM_EVENT];
5886 	if (!ev) {
5887 		ath12k_warn(ab, "failed to fetch roam ev");
5888 		kfree(tb);
5889 		return -EPROTO;
5890 	}
5891 
5892 	roam_ev->vdev_id = ev->vdev_id;
5893 	roam_ev->reason = ev->reason;
5894 	roam_ev->rssi = ev->rssi;
5895 
5896 	kfree(tb);
5897 	return 0;
5898 }
5899 
5900 static int freq_to_idx(struct ath12k *ar, int freq)
5901 {
5902 	struct ieee80211_supported_band *sband;
5903 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5904 	int band, ch, idx = 0;
5905 
5906 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5907 		if (!ar->mac.sbands[band].channels)
5908 			continue;
5909 
5910 		sband = hw->wiphy->bands[band];
5911 		if (!sband)
5912 			continue;
5913 
5914 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
5915 			if (sband->channels[ch].center_freq == freq)
5916 				goto exit;
5917 	}
5918 
5919 exit:
5920 	return idx;
5921 }
5922 
5923 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5924 				    struct wmi_chan_info_event *ch_info_ev)
5925 {
5926 	const void **tb;
5927 	const struct wmi_chan_info_event *ev;
5928 	int ret;
5929 
5930 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5931 	if (IS_ERR(tb)) {
5932 		ret = PTR_ERR(tb);
5933 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5934 		return ret;
5935 	}
5936 
5937 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5938 	if (!ev) {
5939 		ath12k_warn(ab, "failed to fetch chan info ev");
5940 		kfree(tb);
5941 		return -EPROTO;
5942 	}
5943 
5944 	ch_info_ev->err_code = ev->err_code;
5945 	ch_info_ev->freq = ev->freq;
5946 	ch_info_ev->cmd_flags = ev->cmd_flags;
5947 	ch_info_ev->noise_floor = ev->noise_floor;
5948 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
5949 	ch_info_ev->cycle_count = ev->cycle_count;
5950 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5951 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5952 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
5953 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5954 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5955 	ch_info_ev->vdev_id = ev->vdev_id;
5956 
5957 	kfree(tb);
5958 	return 0;
5959 }
5960 
5961 static int
5962 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5963 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5964 {
5965 	const void **tb;
5966 	const struct wmi_pdev_bss_chan_info_event *ev;
5967 	int ret;
5968 
5969 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5970 	if (IS_ERR(tb)) {
5971 		ret = PTR_ERR(tb);
5972 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5973 		return ret;
5974 	}
5975 
5976 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5977 	if (!ev) {
5978 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5979 		kfree(tb);
5980 		return -EPROTO;
5981 	}
5982 
5983 	bss_ch_info_ev->pdev_id = ev->pdev_id;
5984 	bss_ch_info_ev->freq = ev->freq;
5985 	bss_ch_info_ev->noise_floor = ev->noise_floor;
5986 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5987 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5988 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5989 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5990 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5991 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5992 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5993 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5994 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5995 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5996 
5997 	kfree(tb);
5998 	return 0;
5999 }
6000 
6001 static int
6002 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
6003 				      struct wmi_vdev_install_key_complete_arg *arg)
6004 {
6005 	const void **tb;
6006 	const struct wmi_vdev_install_key_compl_event *ev;
6007 	int ret;
6008 
6009 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6010 	if (IS_ERR(tb)) {
6011 		ret = PTR_ERR(tb);
6012 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6013 		return ret;
6014 	}
6015 
6016 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
6017 	if (!ev) {
6018 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
6019 		kfree(tb);
6020 		return -EPROTO;
6021 	}
6022 
6023 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
6024 	arg->macaddr = ev->peer_macaddr.addr;
6025 	arg->key_idx = le32_to_cpu(ev->key_idx);
6026 	arg->key_flags = le32_to_cpu(ev->key_flags);
6027 	arg->status = le32_to_cpu(ev->status);
6028 
6029 	kfree(tb);
6030 	return 0;
6031 }
6032 
6033 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
6034 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
6035 {
6036 	const void **tb;
6037 	const struct wmi_peer_assoc_conf_event *ev;
6038 	int ret;
6039 
6040 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6041 	if (IS_ERR(tb)) {
6042 		ret = PTR_ERR(tb);
6043 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6044 		return ret;
6045 	}
6046 
6047 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
6048 	if (!ev) {
6049 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
6050 		kfree(tb);
6051 		return -EPROTO;
6052 	}
6053 
6054 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
6055 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
6056 
6057 	kfree(tb);
6058 	return 0;
6059 }
6060 
6061 static int
6062 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
6063 			 const struct wmi_pdev_temperature_event *ev)
6064 {
6065 	const void **tb;
6066 	int ret;
6067 
6068 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6069 	if (IS_ERR(tb)) {
6070 		ret = PTR_ERR(tb);
6071 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6072 		return ret;
6073 	}
6074 
6075 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
6076 	if (!ev) {
6077 		ath12k_warn(ab, "failed to fetch pdev temp ev");
6078 		kfree(tb);
6079 		return -EPROTO;
6080 	}
6081 
6082 	kfree(tb);
6083 	return 0;
6084 }
6085 
6086 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
6087 {
6088 	/* try to send pending beacons first. they take priority */
6089 	wake_up(&ab->wmi_ab.tx_credits_wq);
6090 }
6091 
6092 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
6093 {
6094 	const struct wmi_11d_new_cc_event *ev;
6095 	struct ath12k *ar;
6096 	struct ath12k_pdev *pdev;
6097 	const void **tb;
6098 	int ret, i;
6099 
6100 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6101 	if (IS_ERR(tb)) {
6102 		ret = PTR_ERR(tb);
6103 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6104 		return ret;
6105 	}
6106 
6107 	ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
6108 	if (!ev) {
6109 		kfree(tb);
6110 		ath12k_warn(ab, "failed to fetch 11d new cc ev");
6111 		return -EPROTO;
6112 	}
6113 
6114 	spin_lock_bh(&ab->base_lock);
6115 	memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
6116 	spin_unlock_bh(&ab->base_lock);
6117 
6118 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
6119 		   ab->new_alpha2[0],
6120 		   ab->new_alpha2[1]);
6121 
6122 	kfree(tb);
6123 
6124 	for (i = 0; i < ab->num_radios; i++) {
6125 		pdev = &ab->pdevs[i];
6126 		ar = pdev->ar;
6127 		ar->state_11d = ATH12K_11D_IDLE;
6128 		ar->ah->regd_updated = false;
6129 		complete(&ar->completed_11d_scan);
6130 	}
6131 
6132 	queue_work(ab->workqueue, &ab->update_11d_work);
6133 
6134 	return 0;
6135 }
6136 
6137 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
6138 				       struct sk_buff *skb)
6139 {
6140 	dev_kfree_skb(skb);
6141 }
6142 
6143 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
6144 {
6145 	struct ath12k_reg_info *reg_info;
6146 	u8 pdev_idx;
6147 	int ret;
6148 
6149 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
6150 	if (!reg_info) {
6151 		ret = -ENOMEM;
6152 		goto fallback;
6153 	}
6154 
6155 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
6156 	if (ret) {
6157 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
6158 		goto mem_free;
6159 	}
6160 
6161 	ret = ath12k_reg_validate_reg_info(ab, reg_info);
6162 	if (ret == ATH12K_REG_STATUS_FALLBACK) {
6163 		ath12k_warn(ab, "failed to validate reg info %d\n", ret);
6164 		/* firmware has successfully switches to new regd but host can not
6165 		 * continue, so free reginfo and fallback to old regd
6166 		 */
6167 		goto mem_free;
6168 	} else if (ret == ATH12K_REG_STATUS_DROP) {
6169 		/* reg info is valid but we will not store it and
6170 		 * not going to create new regd for it
6171 		 */
6172 		ret = ATH12K_REG_STATUS_VALID;
6173 		goto mem_free;
6174 	}
6175 
6176 	/* free old reg_info if it exist */
6177 	pdev_idx = reg_info->phy_id;
6178 	if (ab->reg_info[pdev_idx]) {
6179 		ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
6180 		kfree(ab->reg_info[pdev_idx]);
6181 	}
6182 	/* reg_info is valid, we store it for later use
6183 	 * even below regd build failed
6184 	 */
6185 	ab->reg_info[pdev_idx] = reg_info;
6186 
6187 	ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
6188 					  IEEE80211_REG_UNSET_AP);
6189 	if (ret) {
6190 		ath12k_warn(ab, "failed to handle chan list %d\n", ret);
6191 		goto fallback;
6192 	}
6193 
6194 	goto out;
6195 
6196 mem_free:
6197 	ath12k_reg_reset_reg_info(reg_info);
6198 	kfree(reg_info);
6199 
6200 	if (ret == ATH12K_REG_STATUS_VALID)
6201 		return ret;
6202 
6203 fallback:
6204 	/* Fallback to older reg (by sending previous country setting
6205 	 * again if fw has succeeded and we failed to process here.
6206 	 * The Regdomain should be uniform across driver and fw. Since the
6207 	 * FW has processed the command and sent a success status, we expect
6208 	 * this function to succeed as well. If it doesn't, CTRY needs to be
6209 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
6210 	 */
6211 	/* TODO: This is rare, but still should also be handled */
6212 	WARN_ON(1);
6213 
6214 out:
6215 	return ret;
6216 }
6217 
6218 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
6219 				const void *ptr, void *data)
6220 {
6221 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
6222 	struct wmi_ready_event fixed_param;
6223 	struct ath12k_wmi_mac_addr_params *addr_list;
6224 	struct ath12k_pdev *pdev;
6225 	u32 num_mac_addr;
6226 	int i;
6227 
6228 	switch (tag) {
6229 	case WMI_TAG_READY_EVENT:
6230 		memset(&fixed_param, 0, sizeof(fixed_param));
6231 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
6232 		       min_t(u16, sizeof(fixed_param), len));
6233 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
6234 		rdy_parse->num_extra_mac_addr =
6235 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
6236 
6237 		ether_addr_copy(ab->mac_addr,
6238 				fixed_param.ready_event_min.mac_addr.addr);
6239 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
6240 		ab->wmi_ready = true;
6241 		break;
6242 	case WMI_TAG_ARRAY_FIXED_STRUCT:
6243 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
6244 		num_mac_addr = rdy_parse->num_extra_mac_addr;
6245 
6246 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6247 			break;
6248 
6249 		for (i = 0; i < ab->num_radios; i++) {
6250 			pdev = &ab->pdevs[i];
6251 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6252 		}
6253 		ab->pdevs_macaddr_valid = true;
6254 		break;
6255 	default:
6256 		break;
6257 	}
6258 
6259 	return 0;
6260 }
6261 
6262 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
6263 {
6264 	struct ath12k_wmi_rdy_parse rdy_parse = { };
6265 	int ret;
6266 
6267 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6268 				  ath12k_wmi_rdy_parse, &rdy_parse);
6269 	if (ret) {
6270 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
6271 		return ret;
6272 	}
6273 
6274 	complete(&ab->wmi_ab.unified_ready);
6275 	return 0;
6276 }
6277 
6278 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6279 {
6280 	struct wmi_peer_delete_resp_event peer_del_resp;
6281 	struct ath12k *ar;
6282 
6283 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6284 		ath12k_warn(ab, "failed to extract peer delete resp");
6285 		return;
6286 	}
6287 
6288 	rcu_read_lock();
6289 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
6290 	if (!ar) {
6291 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6292 			    peer_del_resp.vdev_id);
6293 		rcu_read_unlock();
6294 		return;
6295 	}
6296 
6297 	complete(&ar->peer_delete_done);
6298 	rcu_read_unlock();
6299 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6300 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6301 }
6302 
6303 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
6304 					  struct sk_buff *skb)
6305 {
6306 	struct ath12k *ar;
6307 	u32 vdev_id = 0;
6308 
6309 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6310 		ath12k_warn(ab, "failed to extract vdev delete resp");
6311 		return;
6312 	}
6313 
6314 	rcu_read_lock();
6315 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6316 	if (!ar) {
6317 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6318 			    vdev_id);
6319 		rcu_read_unlock();
6320 		return;
6321 	}
6322 
6323 	complete(&ar->vdev_delete_done);
6324 
6325 	rcu_read_unlock();
6326 
6327 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6328 		   vdev_id);
6329 }
6330 
6331 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
6332 {
6333 	switch (vdev_resp_status) {
6334 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6335 		return "invalid vdev id";
6336 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6337 		return "not supported";
6338 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6339 		return "dfs violation";
6340 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6341 		return "invalid regdomain";
6342 	default:
6343 		return "unknown";
6344 	}
6345 }
6346 
6347 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6348 {
6349 	struct wmi_vdev_start_resp_event vdev_start_resp;
6350 	struct ath12k *ar;
6351 	u32 status;
6352 
6353 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6354 		ath12k_warn(ab, "failed to extract vdev start resp");
6355 		return;
6356 	}
6357 
6358 	rcu_read_lock();
6359 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
6360 	if (!ar) {
6361 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6362 			    vdev_start_resp.vdev_id);
6363 		rcu_read_unlock();
6364 		return;
6365 	}
6366 
6367 	ar->last_wmi_vdev_start_status = 0;
6368 
6369 	status = le32_to_cpu(vdev_start_resp.status);
6370 	if (WARN_ON_ONCE(status)) {
6371 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6372 			    status, ath12k_wmi_vdev_resp_print(status));
6373 		ar->last_wmi_vdev_start_status = status;
6374 	}
6375 
6376 	ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
6377 
6378 	complete(&ar->vdev_setup_done);
6379 
6380 	rcu_read_unlock();
6381 
6382 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6383 		   vdev_start_resp.vdev_id);
6384 }
6385 
6386 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6387 {
6388 	u32 vdev_id, tx_status;
6389 
6390 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6391 		ath12k_warn(ab, "failed to extract bcn tx status");
6392 		return;
6393 	}
6394 }
6395 
6396 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6397 {
6398 	struct ath12k *ar;
6399 	u32 vdev_id = 0;
6400 
6401 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6402 		ath12k_warn(ab, "failed to extract vdev stopped event");
6403 		return;
6404 	}
6405 
6406 	rcu_read_lock();
6407 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6408 	if (!ar) {
6409 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6410 			    vdev_id);
6411 		rcu_read_unlock();
6412 		return;
6413 	}
6414 
6415 	complete(&ar->vdev_setup_done);
6416 
6417 	rcu_read_unlock();
6418 
6419 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6420 }
6421 
6422 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6423 {
6424 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6425 	struct ath12k *ar;
6426 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6427 	struct ieee80211_hdr *hdr;
6428 	u16 fc;
6429 	struct ieee80211_supported_band *sband;
6430 
6431 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6432 		ath12k_warn(ab, "failed to extract mgmt rx event");
6433 		dev_kfree_skb(skb);
6434 		return;
6435 	}
6436 
6437 	memset(status, 0, sizeof(*status));
6438 
6439 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
6440 		   rx_ev.status);
6441 
6442 	rcu_read_lock();
6443 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
6444 
6445 	if (!ar) {
6446 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
6447 			    rx_ev.pdev_id);
6448 		dev_kfree_skb(skb);
6449 		goto exit;
6450 	}
6451 
6452 	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
6453 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
6454 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
6455 			     WMI_RX_STATUS_ERR_CRC))) {
6456 		dev_kfree_skb(skb);
6457 		goto exit;
6458 	}
6459 
6460 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
6461 		status->flag |= RX_FLAG_MMIC_ERROR;
6462 
6463 	if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
6464 	    rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
6465 		status->band = NL80211_BAND_6GHZ;
6466 		status->freq = rx_ev.chan_freq;
6467 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
6468 		status->band = NL80211_BAND_2GHZ;
6469 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
6470 		status->band = NL80211_BAND_5GHZ;
6471 	} else {
6472 		/* Shouldn't happen unless list of advertised channels to
6473 		 * mac80211 has been changed.
6474 		 */
6475 		WARN_ON_ONCE(1);
6476 		dev_kfree_skb(skb);
6477 		goto exit;
6478 	}
6479 
6480 	if (rx_ev.phy_mode == MODE_11B &&
6481 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
6482 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6483 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
6484 
6485 	sband = &ar->mac.sbands[status->band];
6486 
6487 	if (status->band != NL80211_BAND_6GHZ)
6488 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
6489 							      status->band);
6490 
6491 	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
6492 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
6493 
6494 	hdr = (struct ieee80211_hdr *)skb->data;
6495 	fc = le16_to_cpu(hdr->frame_control);
6496 
6497 	/* Firmware is guaranteed to report all essential management frames via
6498 	 * WMI while it can deliver some extra via HTT. Since there can be
6499 	 * duplicates split the reporting wrt monitor/sniffing.
6500 	 */
6501 	status->flag |= RX_FLAG_SKIP_MONITOR;
6502 
6503 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
6504 	 * including group privacy action frames.
6505 	 */
6506 	if (ieee80211_has_protected(hdr->frame_control)) {
6507 		status->flag |= RX_FLAG_DECRYPTED;
6508 
6509 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
6510 			status->flag |= RX_FLAG_IV_STRIPPED |
6511 					RX_FLAG_MMIC_STRIPPED;
6512 			hdr->frame_control = __cpu_to_le16(fc &
6513 					     ~IEEE80211_FCTL_PROTECTED);
6514 		}
6515 	}
6516 
6517 	if (ieee80211_is_beacon(hdr->frame_control))
6518 		ath12k_mac_handle_beacon(ar, skb);
6519 
6520 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6521 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
6522 		   skb, skb->len,
6523 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
6524 
6525 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6526 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
6527 		   status->freq, status->band, status->signal,
6528 		   status->rate_idx);
6529 
6530 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
6531 
6532 exit:
6533 	rcu_read_unlock();
6534 }
6535 
6536 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
6537 {
6538 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
6539 	struct ath12k *ar;
6540 
6541 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
6542 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
6543 		return;
6544 	}
6545 
6546 	rcu_read_lock();
6547 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
6548 	if (!ar) {
6549 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
6550 			    tx_compl_param.pdev_id);
6551 		goto exit;
6552 	}
6553 
6554 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
6555 				 le32_to_cpu(tx_compl_param.status));
6556 
6557 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6558 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6559 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
6560 		   tx_compl_param.status);
6561 
6562 exit:
6563 	rcu_read_unlock();
6564 }
6565 
6566 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
6567 						  u32 vdev_id,
6568 						  enum ath12k_scan_state state)
6569 {
6570 	int i;
6571 	struct ath12k_pdev *pdev;
6572 	struct ath12k *ar;
6573 
6574 	for (i = 0; i < ab->num_radios; i++) {
6575 		pdev = rcu_dereference(ab->pdevs_active[i]);
6576 		if (pdev && pdev->ar) {
6577 			ar = pdev->ar;
6578 
6579 			spin_lock_bh(&ar->data_lock);
6580 			if (ar->scan.state == state &&
6581 			    ar->scan.arvif &&
6582 			    ar->scan.arvif->vdev_id == vdev_id) {
6583 				spin_unlock_bh(&ar->data_lock);
6584 				return ar;
6585 			}
6586 			spin_unlock_bh(&ar->data_lock);
6587 		}
6588 	}
6589 	return NULL;
6590 }
6591 
6592 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
6593 {
6594 	struct ath12k *ar;
6595 	struct wmi_scan_event scan_ev = {0};
6596 
6597 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
6598 		ath12k_warn(ab, "failed to extract scan event");
6599 		return;
6600 	}
6601 
6602 	rcu_read_lock();
6603 
6604 	/* In case the scan was cancelled, ex. during interface teardown,
6605 	 * the interface will not be found in active interfaces.
6606 	 * Rather, in such scenarios, iterate over the active pdev's to
6607 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6608 	 * aborting scan's vdev id matches this event info.
6609 	 */
6610 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
6611 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
6612 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6613 						 ATH12K_SCAN_ABORTING);
6614 		if (!ar)
6615 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6616 							 ATH12K_SCAN_RUNNING);
6617 	} else {
6618 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
6619 	}
6620 
6621 	if (!ar) {
6622 		ath12k_warn(ab, "Received scan event for unknown vdev");
6623 		rcu_read_unlock();
6624 		return;
6625 	}
6626 
6627 	spin_lock_bh(&ar->data_lock);
6628 
6629 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6630 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
6631 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
6632 						  le32_to_cpu(scan_ev.reason)),
6633 		   le32_to_cpu(scan_ev.event_type),
6634 		   le32_to_cpu(scan_ev.reason),
6635 		   le32_to_cpu(scan_ev.channel_freq),
6636 		   le32_to_cpu(scan_ev.scan_req_id),
6637 		   le32_to_cpu(scan_ev.scan_id),
6638 		   le32_to_cpu(scan_ev.vdev_id),
6639 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
6640 
6641 	switch (le32_to_cpu(scan_ev.event_type)) {
6642 	case WMI_SCAN_EVENT_STARTED:
6643 		ath12k_wmi_event_scan_started(ar);
6644 		break;
6645 	case WMI_SCAN_EVENT_COMPLETED:
6646 		ath12k_wmi_event_scan_completed(ar);
6647 		break;
6648 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6649 		ath12k_wmi_event_scan_bss_chan(ar);
6650 		break;
6651 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6652 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
6653 		break;
6654 	case WMI_SCAN_EVENT_START_FAILED:
6655 		ath12k_warn(ab, "received scan start failure event\n");
6656 		ath12k_wmi_event_scan_start_failed(ar);
6657 		break;
6658 	case WMI_SCAN_EVENT_DEQUEUED:
6659 		__ath12k_mac_scan_finish(ar);
6660 		break;
6661 	case WMI_SCAN_EVENT_PREEMPTED:
6662 	case WMI_SCAN_EVENT_RESTARTED:
6663 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6664 	default:
6665 		break;
6666 	}
6667 
6668 	spin_unlock_bh(&ar->data_lock);
6669 
6670 	rcu_read_unlock();
6671 }
6672 
6673 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6674 {
6675 	struct wmi_peer_sta_kickout_arg arg = {};
6676 	struct ieee80211_sta *sta;
6677 	struct ath12k_peer *peer;
6678 	struct ath12k *ar;
6679 
6680 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6681 		ath12k_warn(ab, "failed to extract peer sta kickout event");
6682 		return;
6683 	}
6684 
6685 	rcu_read_lock();
6686 
6687 	spin_lock_bh(&ab->base_lock);
6688 
6689 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6690 
6691 	if (!peer) {
6692 		ath12k_warn(ab, "peer not found %pM\n",
6693 			    arg.mac_addr);
6694 		goto exit;
6695 	}
6696 
6697 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6698 	if (!ar) {
6699 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6700 			    peer->vdev_id);
6701 		goto exit;
6702 	}
6703 
6704 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
6705 					   arg.mac_addr, NULL);
6706 	if (!sta) {
6707 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6708 			    arg.mac_addr);
6709 		goto exit;
6710 	}
6711 
6712 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6713 		   arg.mac_addr);
6714 
6715 	ieee80211_report_low_ack(sta, 10);
6716 
6717 exit:
6718 	spin_unlock_bh(&ab->base_lock);
6719 	rcu_read_unlock();
6720 }
6721 
6722 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6723 {
6724 	struct wmi_roam_event roam_ev = {};
6725 	struct ath12k *ar;
6726 	u32 vdev_id;
6727 	u8 roam_reason;
6728 
6729 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6730 		ath12k_warn(ab, "failed to extract roam event");
6731 		return;
6732 	}
6733 
6734 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
6735 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
6736 				   WMI_ROAM_REASON_MASK);
6737 
6738 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6739 		   "wmi roam event vdev %u reason %d rssi %d\n",
6740 		   vdev_id, roam_reason, roam_ev.rssi);
6741 
6742 	rcu_read_lock();
6743 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6744 	if (!ar) {
6745 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
6746 		rcu_read_unlock();
6747 		return;
6748 	}
6749 
6750 	if (roam_reason >= WMI_ROAM_REASON_MAX)
6751 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6752 			    roam_reason, vdev_id);
6753 
6754 	switch (roam_reason) {
6755 	case WMI_ROAM_REASON_BEACON_MISS:
6756 		ath12k_mac_handle_beacon_miss(ar, vdev_id);
6757 		break;
6758 	case WMI_ROAM_REASON_BETTER_AP:
6759 	case WMI_ROAM_REASON_LOW_RSSI:
6760 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6761 	case WMI_ROAM_REASON_HO_FAILED:
6762 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6763 			    roam_reason, vdev_id);
6764 		break;
6765 	}
6766 
6767 	rcu_read_unlock();
6768 }
6769 
6770 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6771 {
6772 	struct wmi_chan_info_event ch_info_ev = {0};
6773 	struct ath12k *ar;
6774 	struct survey_info *survey;
6775 	int idx;
6776 	/* HW channel counters frequency value in hertz */
6777 	u32 cc_freq_hz = ab->cc_freq_hz;
6778 
6779 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
6780 		ath12k_warn(ab, "failed to extract chan info event");
6781 		return;
6782 	}
6783 
6784 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6785 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6786 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6787 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6788 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6789 		   ch_info_ev.mac_clk_mhz);
6790 
6791 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6792 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6793 		return;
6794 	}
6795 
6796 	rcu_read_lock();
6797 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6798 	if (!ar) {
6799 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6800 			    ch_info_ev.vdev_id);
6801 		rcu_read_unlock();
6802 		return;
6803 	}
6804 	spin_lock_bh(&ar->data_lock);
6805 
6806 	switch (ar->scan.state) {
6807 	case ATH12K_SCAN_IDLE:
6808 	case ATH12K_SCAN_STARTING:
6809 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6810 		goto exit;
6811 	case ATH12K_SCAN_RUNNING:
6812 	case ATH12K_SCAN_ABORTING:
6813 		break;
6814 	}
6815 
6816 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6817 	if (idx >= ARRAY_SIZE(ar->survey)) {
6818 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6819 			    ch_info_ev.freq, idx);
6820 		goto exit;
6821 	}
6822 
6823 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
6824 	 * HW channel counters frequency value
6825 	 */
6826 	if (ch_info_ev.mac_clk_mhz)
6827 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6828 
6829 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6830 		survey = &ar->survey[idx];
6831 		memset(survey, 0, sizeof(*survey));
6832 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6833 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6834 				 SURVEY_INFO_TIME_BUSY;
6835 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6836 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6837 					    cc_freq_hz);
6838 	}
6839 exit:
6840 	spin_unlock_bh(&ar->data_lock);
6841 	rcu_read_unlock();
6842 }
6843 
6844 static void
6845 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6846 {
6847 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6848 	struct survey_info *survey;
6849 	struct ath12k *ar;
6850 	u32 cc_freq_hz = ab->cc_freq_hz;
6851 	u64 busy, total, tx, rx, rx_bss;
6852 	int idx;
6853 
6854 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6855 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
6856 		return;
6857 	}
6858 
6859 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6860 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6861 
6862 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6863 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6864 
6865 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6866 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6867 
6868 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6869 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6870 
6871 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6872 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6873 
6874 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6875 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6876 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6877 		   bss_ch_info_ev.noise_floor, busy, total,
6878 		   tx, rx, rx_bss);
6879 
6880 	rcu_read_lock();
6881 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6882 
6883 	if (!ar) {
6884 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6885 			    bss_ch_info_ev.pdev_id);
6886 		rcu_read_unlock();
6887 		return;
6888 	}
6889 
6890 	spin_lock_bh(&ar->data_lock);
6891 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6892 	if (idx >= ARRAY_SIZE(ar->survey)) {
6893 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6894 			    bss_ch_info_ev.freq, idx);
6895 		goto exit;
6896 	}
6897 
6898 	survey = &ar->survey[idx];
6899 
6900 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
6901 	survey->time      = div_u64(total, cc_freq_hz);
6902 	survey->time_busy = div_u64(busy, cc_freq_hz);
6903 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
6904 	survey->time_tx   = div_u64(tx, cc_freq_hz);
6905 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
6906 			     SURVEY_INFO_TIME |
6907 			     SURVEY_INFO_TIME_BUSY |
6908 			     SURVEY_INFO_TIME_RX |
6909 			     SURVEY_INFO_TIME_TX);
6910 exit:
6911 	spin_unlock_bh(&ar->data_lock);
6912 	complete(&ar->bss_survey_done);
6913 
6914 	rcu_read_unlock();
6915 }
6916 
6917 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6918 						struct sk_buff *skb)
6919 {
6920 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6921 	struct ath12k *ar;
6922 
6923 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6924 		ath12k_warn(ab, "failed to extract install key compl event");
6925 		return;
6926 	}
6927 
6928 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6929 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6930 		   install_key_compl.key_idx, install_key_compl.key_flags,
6931 		   install_key_compl.macaddr, install_key_compl.status);
6932 
6933 	rcu_read_lock();
6934 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6935 	if (!ar) {
6936 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6937 			    install_key_compl.vdev_id);
6938 		rcu_read_unlock();
6939 		return;
6940 	}
6941 
6942 	ar->install_key_status = 0;
6943 
6944 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6945 		ath12k_warn(ab, "install key failed for %pM status %d\n",
6946 			    install_key_compl.macaddr, install_key_compl.status);
6947 		ar->install_key_status = install_key_compl.status;
6948 	}
6949 
6950 	complete(&ar->install_key_done);
6951 	rcu_read_unlock();
6952 }
6953 
6954 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6955 					  u16 tag, u16 len,
6956 					  const void *ptr,
6957 					  void *data)
6958 {
6959 	const struct wmi_service_available_event *ev;
6960 	u32 *wmi_ext2_service_bitmap;
6961 	int i, j;
6962 	u16 expected_len;
6963 
6964 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6965 	if (len < expected_len) {
6966 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
6967 			    len, tag);
6968 		return -EINVAL;
6969 	}
6970 
6971 	switch (tag) {
6972 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
6973 		ev = (struct wmi_service_available_event *)ptr;
6974 		for (i = 0, j = WMI_MAX_SERVICE;
6975 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6976 		     i++) {
6977 			do {
6978 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6979 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6980 					set_bit(j, ab->wmi_ab.svc_map);
6981 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6982 		}
6983 
6984 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6985 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
6986 			   ev->wmi_service_segment_bitmap[0],
6987 			   ev->wmi_service_segment_bitmap[1],
6988 			   ev->wmi_service_segment_bitmap[2],
6989 			   ev->wmi_service_segment_bitmap[3]);
6990 		break;
6991 	case WMI_TAG_ARRAY_UINT32:
6992 		wmi_ext2_service_bitmap = (u32 *)ptr;
6993 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
6994 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
6995 		     i++) {
6996 			do {
6997 				if (wmi_ext2_service_bitmap[i] &
6998 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6999 					set_bit(j, ab->wmi_ab.svc_map);
7000 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7001 		}
7002 
7003 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7004 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
7005 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
7006 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
7007 		break;
7008 	}
7009 	return 0;
7010 }
7011 
7012 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
7013 {
7014 	int ret;
7015 
7016 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7017 				  ath12k_wmi_tlv_services_parser,
7018 				  NULL);
7019 	return ret;
7020 }
7021 
7022 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
7023 {
7024 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
7025 	struct ath12k *ar;
7026 
7027 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
7028 		ath12k_warn(ab, "failed to extract peer assoc conf event");
7029 		return;
7030 	}
7031 
7032 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7033 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
7034 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
7035 
7036 	rcu_read_lock();
7037 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
7038 
7039 	if (!ar) {
7040 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
7041 			    peer_assoc_conf.vdev_id);
7042 		rcu_read_unlock();
7043 		return;
7044 	}
7045 
7046 	complete(&ar->peer_assoc_done);
7047 	rcu_read_unlock();
7048 }
7049 
7050 static void
7051 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
7052 			      struct ath12k_fw_stats *fw_stats,
7053 			      char *buf, u32 *length)
7054 {
7055 	const struct ath12k_fw_stats_vdev *vdev;
7056 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7057 	struct ath12k_link_vif *arvif;
7058 	u32 len = *length;
7059 	u8 *vif_macaddr;
7060 	int i;
7061 
7062 	len += scnprintf(buf + len, buf_len - len, "\n");
7063 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7064 			 "ath12k VDEV stats");
7065 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7066 			 "=================");
7067 
7068 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
7069 		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
7070 		if (!arvif)
7071 			continue;
7072 		vif_macaddr = arvif->ahvif->vif->addr;
7073 
7074 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7075 				 "VDEV ID", vdev->vdev_id);
7076 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7077 				 "VDEV MAC address", vif_macaddr);
7078 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7079 				 "beacon snr", vdev->beacon_snr);
7080 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7081 				 "data snr", vdev->data_snr);
7082 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7083 				 "num rx frames", vdev->num_rx_frames);
7084 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7085 				 "num rts fail", vdev->num_rts_fail);
7086 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7087 				 "num rts success", vdev->num_rts_success);
7088 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7089 				 "num rx err", vdev->num_rx_err);
7090 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7091 				 "num rx discard", vdev->num_rx_discard);
7092 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7093 				 "num tx not acked", vdev->num_tx_not_acked);
7094 
7095 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7096 			len += scnprintf(buf + len, buf_len - len,
7097 					"%25s [%02d] %u\n",
7098 					"num tx frames", i,
7099 					vdev->num_tx_frames[i]);
7100 
7101 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7102 			len += scnprintf(buf + len, buf_len - len,
7103 					"%25s [%02d] %u\n",
7104 					"num tx frames retries", i,
7105 					vdev->num_tx_frames_retries[i]);
7106 
7107 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7108 			len += scnprintf(buf + len, buf_len - len,
7109 					"%25s [%02d] %u\n",
7110 					"num tx frames failures", i,
7111 					vdev->num_tx_frames_failures[i]);
7112 
7113 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7114 			len += scnprintf(buf + len, buf_len - len,
7115 					"%25s [%02d] 0x%08x\n",
7116 					"tx rate history", i,
7117 					vdev->tx_rate_history[i]);
7118 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7119 			len += scnprintf(buf + len, buf_len - len,
7120 					"%25s [%02d] %u\n",
7121 					"beacon rssi history", i,
7122 					vdev->beacon_rssi_history[i]);
7123 
7124 		len += scnprintf(buf + len, buf_len - len, "\n");
7125 		*length = len;
7126 	}
7127 }
7128 
7129 static void
7130 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
7131 			     struct ath12k_fw_stats *fw_stats,
7132 			     char *buf, u32 *length)
7133 {
7134 	const struct ath12k_fw_stats_bcn *bcn;
7135 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7136 	struct ath12k_link_vif *arvif;
7137 	u32 len = *length;
7138 	size_t num_bcn;
7139 
7140 	num_bcn = list_count_nodes(&fw_stats->bcn);
7141 
7142 	len += scnprintf(buf + len, buf_len - len, "\n");
7143 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
7144 			 "ath12k Beacon stats", num_bcn);
7145 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7146 			 "===================");
7147 
7148 	list_for_each_entry(bcn, &fw_stats->bcn, list) {
7149 		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
7150 		if (!arvif)
7151 			continue;
7152 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7153 				 "VDEV ID", bcn->vdev_id);
7154 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7155 				 "VDEV MAC address", arvif->ahvif->vif->addr);
7156 		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7157 				 "================");
7158 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7159 				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
7160 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7161 				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
7162 
7163 		len += scnprintf(buf + len, buf_len - len, "\n");
7164 		*length = len;
7165 	}
7166 }
7167 
7168 static void
7169 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7170 				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
7171 {
7172 	u32 len = *length;
7173 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7174 
7175 	len = scnprintf(buf + len, buf_len - len, "\n");
7176 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7177 			"ath12k PDEV stats");
7178 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7179 			"=================");
7180 
7181 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7182 			"Channel noise floor", pdev->ch_noise_floor);
7183 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7184 			"Channel TX power", pdev->chan_tx_power);
7185 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7186 			"TX frame count", pdev->tx_frame_count);
7187 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7188 			"RX frame count", pdev->rx_frame_count);
7189 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7190 			"RX clear count", pdev->rx_clear_count);
7191 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7192 			"Cycle count", pdev->cycle_count);
7193 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7194 			"PHY error count", pdev->phy_err_count);
7195 	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
7196 			"soc drop count", fw_soc_drop_cnt);
7197 
7198 	*length = len;
7199 }
7200 
7201 static void
7202 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7203 				 char *buf, u32 *length)
7204 {
7205 	u32 len = *length;
7206 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7207 
7208 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7209 			 "ath12k PDEV TX stats");
7210 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7211 			 "====================");
7212 
7213 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7214 			 "HTT cookies queued", pdev->comp_queued);
7215 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7216 			 "HTT cookies disp.", pdev->comp_delivered);
7217 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7218 			 "MSDU queued", pdev->msdu_enqued);
7219 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7220 			 "MPDU queued", pdev->mpdu_enqued);
7221 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7222 			 "MSDUs dropped", pdev->wmm_drop);
7223 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7224 			 "Local enqued", pdev->local_enqued);
7225 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7226 			 "Local freed", pdev->local_freed);
7227 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7228 			 "HW queued", pdev->hw_queued);
7229 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7230 			 "PPDUs reaped", pdev->hw_reaped);
7231 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7232 			 "Num underruns", pdev->underrun);
7233 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7234 			 "PPDUs cleaned", pdev->tx_abort);
7235 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7236 			 "MPDUs requeued", pdev->mpdus_requed);
7237 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7238 			 "Excessive retries", pdev->tx_ko);
7239 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7240 			 "HW rate", pdev->data_rc);
7241 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7242 			 "Sched self triggers", pdev->self_triggers);
7243 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7244 			 "Dropped due to SW retries",
7245 			 pdev->sw_retry_failure);
7246 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7247 			 "Illegal rate phy errors",
7248 			 pdev->illgl_rate_phy_err);
7249 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7250 			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
7251 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7252 			 "TX timeout", pdev->pdev_tx_timeout);
7253 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7254 			 "PDEV resets", pdev->pdev_resets);
7255 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7256 			 "Stateless TIDs alloc failures",
7257 			 pdev->stateless_tid_alloc_failure);
7258 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7259 			 "PHY underrun", pdev->phy_underrun);
7260 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7261 			 "MPDU is more than txop limit", pdev->txop_ovf);
7262 	*length = len;
7263 }
7264 
7265 static void
7266 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7267 				 char *buf, u32 *length)
7268 {
7269 	u32 len = *length;
7270 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7271 
7272 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7273 			 "ath12k PDEV RX stats");
7274 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7275 			 "====================");
7276 
7277 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7278 			 "Mid PPDU route change",
7279 			 pdev->mid_ppdu_route_change);
7280 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7281 			 "Tot. number of statuses", pdev->status_rcvd);
7282 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7283 			 "Extra frags on rings 0", pdev->r0_frags);
7284 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7285 			 "Extra frags on rings 1", pdev->r1_frags);
7286 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7287 			 "Extra frags on rings 2", pdev->r2_frags);
7288 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7289 			 "Extra frags on rings 3", pdev->r3_frags);
7290 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7291 			 "MSDUs delivered to HTT", pdev->htt_msdus);
7292 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7293 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
7294 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7295 			 "MSDUs delivered to stack", pdev->loc_msdus);
7296 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7297 			 "MPDUs delivered to stack", pdev->loc_mpdus);
7298 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7299 			 "Oversized AMSUs", pdev->oversize_amsdu);
7300 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7301 			 "PHY errors", pdev->phy_errs);
7302 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7303 			 "PHY errors drops", pdev->phy_err_drop);
7304 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7305 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
7306 	*length = len;
7307 }
7308 
7309 static void
7310 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
7311 			      struct ath12k_fw_stats *fw_stats,
7312 			      char *buf, u32 *length)
7313 {
7314 	const struct ath12k_fw_stats_pdev *pdev;
7315 	u32 len = *length;
7316 
7317 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
7318 					struct ath12k_fw_stats_pdev, list);
7319 	if (!pdev) {
7320 		ath12k_warn(ar->ab, "failed to get pdev stats\n");
7321 		return;
7322 	}
7323 
7324 	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
7325 					   ar->ab->fw_soc_drop_count);
7326 	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
7327 	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
7328 
7329 	*length = len;
7330 }
7331 
7332 void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
7333 			      struct ath12k_fw_stats *fw_stats,
7334 			      u32 stats_id, char *buf)
7335 {
7336 	u32 len = 0;
7337 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7338 
7339 	spin_lock_bh(&ar->data_lock);
7340 
7341 	switch (stats_id) {
7342 	case WMI_REQUEST_VDEV_STAT:
7343 		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
7344 		break;
7345 	case WMI_REQUEST_BCN_STAT:
7346 		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
7347 		break;
7348 	case WMI_REQUEST_PDEV_STAT:
7349 		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
7350 		break;
7351 	default:
7352 		break;
7353 	}
7354 
7355 	spin_unlock_bh(&ar->data_lock);
7356 
7357 	if (len >= buf_len)
7358 		buf[len - 1] = 0;
7359 	else
7360 		buf[len] = 0;
7361 
7362 	ath12k_fw_stats_reset(ar);
7363 }
7364 
7365 static void
7366 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
7367 			   struct ath12k_fw_stats_vdev *dst)
7368 {
7369 	int i;
7370 
7371 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7372 	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
7373 	dst->data_snr = le32_to_cpu(src->data_snr);
7374 	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
7375 	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
7376 	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
7377 	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
7378 	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
7379 	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
7380 
7381 	for (i = 0; i < WLAN_MAX_AC; i++)
7382 		dst->num_tx_frames[i] =
7383 			le32_to_cpu(src->num_tx_frames[i]);
7384 
7385 	for (i = 0; i < WLAN_MAX_AC; i++)
7386 		dst->num_tx_frames_retries[i] =
7387 			le32_to_cpu(src->num_tx_frames_retries[i]);
7388 
7389 	for (i = 0; i < WLAN_MAX_AC; i++)
7390 		dst->num_tx_frames_failures[i] =
7391 			le32_to_cpu(src->num_tx_frames_failures[i]);
7392 
7393 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7394 		dst->tx_rate_history[i] =
7395 			le32_to_cpu(src->tx_rate_history[i]);
7396 
7397 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7398 		dst->beacon_rssi_history[i] =
7399 			le32_to_cpu(src->beacon_rssi_history[i]);
7400 }
7401 
7402 static void
7403 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
7404 			  struct ath12k_fw_stats_bcn *dst)
7405 {
7406 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7407 	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
7408 	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
7409 }
7410 
7411 static void
7412 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
7413 				struct ath12k_fw_stats_pdev *dst)
7414 {
7415 	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
7416 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
7417 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
7418 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
7419 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
7420 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
7421 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
7422 }
7423 
7424 static void
7425 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
7426 			      struct ath12k_fw_stats_pdev *dst)
7427 {
7428 	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
7429 	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
7430 	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
7431 	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
7432 	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
7433 	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
7434 	dst->local_freed = a_sle32_to_cpu(src->local_freed);
7435 	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
7436 	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
7437 	dst->underrun = a_sle32_to_cpu(src->underrun);
7438 	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
7439 	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
7440 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
7441 	dst->data_rc = __le32_to_cpu(src->data_rc);
7442 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
7443 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
7444 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
7445 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
7446 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
7447 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
7448 	dst->stateless_tid_alloc_failure =
7449 		__le32_to_cpu(src->stateless_tid_alloc_failure);
7450 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
7451 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
7452 }
7453 
7454 static void
7455 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
7456 			      struct ath12k_fw_stats_pdev *dst)
7457 {
7458 	dst->mid_ppdu_route_change =
7459 		a_sle32_to_cpu(src->mid_ppdu_route_change);
7460 	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
7461 	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
7462 	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
7463 	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
7464 	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
7465 	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
7466 	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
7467 	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
7468 	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
7469 	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
7470 	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
7471 	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
7472 	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
7473 }
7474 
7475 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
7476 					      struct wmi_tlv_fw_stats_parse *parse,
7477 					      const void *ptr,
7478 					      u16 len)
7479 {
7480 	const struct wmi_stats_event *ev = parse->ev;
7481 	struct ath12k_fw_stats *stats = parse->stats;
7482 	struct ath12k *ar;
7483 	struct ath12k_link_vif *arvif;
7484 	struct ieee80211_sta *sta;
7485 	struct ath12k_sta *ahsta;
7486 	struct ath12k_link_sta *arsta;
7487 	int i, ret = 0;
7488 	const void *data = ptr;
7489 
7490 	if (!ev) {
7491 		ath12k_warn(ab, "failed to fetch update stats ev");
7492 		return -EPROTO;
7493 	}
7494 
7495 	if (!stats)
7496 		return -EINVAL;
7497 
7498 	rcu_read_lock();
7499 
7500 	stats->pdev_id = le32_to_cpu(ev->pdev_id);
7501 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
7502 	if (!ar) {
7503 		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
7504 			    le32_to_cpu(ev->pdev_id));
7505 		ret = -EPROTO;
7506 		goto exit;
7507 	}
7508 
7509 	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
7510 		const struct wmi_vdev_stats_params *src;
7511 		struct ath12k_fw_stats_vdev *dst;
7512 
7513 		src = data;
7514 		if (len < sizeof(*src)) {
7515 			ret = -EPROTO;
7516 			goto exit;
7517 		}
7518 
7519 		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
7520 		if (arvif) {
7521 			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
7522 							   arvif->bssid,
7523 							   NULL);
7524 			if (sta) {
7525 				ahsta = ath12k_sta_to_ahsta(sta);
7526 				arsta = &ahsta->deflink;
7527 				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
7528 				ath12k_dbg(ab, ATH12K_DBG_WMI,
7529 					   "wmi stats vdev id %d snr %d\n",
7530 					   src->vdev_id, src->beacon_snr);
7531 			} else {
7532 				ath12k_dbg(ab, ATH12K_DBG_WMI,
7533 					   "not found station bssid %pM for vdev stat\n",
7534 					   arvif->bssid);
7535 			}
7536 		}
7537 
7538 		data += sizeof(*src);
7539 		len -= sizeof(*src);
7540 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7541 		if (!dst)
7542 			continue;
7543 		ath12k_wmi_pull_vdev_stats(src, dst);
7544 		stats->stats_id = WMI_REQUEST_VDEV_STAT;
7545 		list_add_tail(&dst->list, &stats->vdevs);
7546 	}
7547 	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
7548 		const struct ath12k_wmi_bcn_stats_params *src;
7549 		struct ath12k_fw_stats_bcn *dst;
7550 
7551 		src = data;
7552 		if (len < sizeof(*src)) {
7553 			ret = -EPROTO;
7554 			goto exit;
7555 		}
7556 
7557 		data += sizeof(*src);
7558 		len -= sizeof(*src);
7559 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7560 		if (!dst)
7561 			continue;
7562 		ath12k_wmi_pull_bcn_stats(src, dst);
7563 		stats->stats_id = WMI_REQUEST_BCN_STAT;
7564 		list_add_tail(&dst->list, &stats->bcn);
7565 	}
7566 	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
7567 		const struct ath12k_wmi_pdev_stats_params *src;
7568 		struct ath12k_fw_stats_pdev *dst;
7569 
7570 		src = data;
7571 		if (len < sizeof(*src)) {
7572 			ret = -EPROTO;
7573 			goto exit;
7574 		}
7575 
7576 		stats->stats_id = WMI_REQUEST_PDEV_STAT;
7577 
7578 		data += sizeof(*src);
7579 		len -= sizeof(*src);
7580 
7581 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7582 		if (!dst)
7583 			continue;
7584 
7585 		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
7586 		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
7587 		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
7588 		list_add_tail(&dst->list, &stats->pdevs);
7589 	}
7590 
7591 exit:
7592 	rcu_read_unlock();
7593 	return ret;
7594 }
7595 
7596 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
7597 					 u16 tag, u16 len,
7598 					 const void *ptr, void *data)
7599 {
7600 	struct wmi_tlv_fw_stats_parse *parse = data;
7601 	int ret = 0;
7602 
7603 	switch (tag) {
7604 	case WMI_TAG_STATS_EVENT:
7605 		parse->ev = ptr;
7606 		break;
7607 	case WMI_TAG_ARRAY_BYTE:
7608 		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
7609 		break;
7610 	default:
7611 		break;
7612 	}
7613 	return ret;
7614 }
7615 
7616 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
7617 				    struct ath12k_fw_stats *stats)
7618 {
7619 	struct wmi_tlv_fw_stats_parse parse = {};
7620 
7621 	stats->stats_id = 0;
7622 	parse.stats = stats;
7623 
7624 	return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7625 				   ath12k_wmi_tlv_fw_stats_parse,
7626 				   &parse);
7627 }
7628 
7629 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
7630 {
7631 	struct ath12k_fw_stats stats = {};
7632 	struct ath12k *ar;
7633 	int ret;
7634 
7635 	INIT_LIST_HEAD(&stats.pdevs);
7636 	INIT_LIST_HEAD(&stats.vdevs);
7637 	INIT_LIST_HEAD(&stats.bcn);
7638 
7639 	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
7640 	if (ret) {
7641 		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
7642 		goto free;
7643 	}
7644 
7645 	ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
7646 
7647 	rcu_read_lock();
7648 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
7649 	if (!ar) {
7650 		rcu_read_unlock();
7651 		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
7652 			    stats.pdev_id, ret);
7653 		goto free;
7654 	}
7655 
7656 	spin_lock_bh(&ar->data_lock);
7657 
7658 	/* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
7659 	 * debugfs fw stats. Therefore, processing it separately.
7660 	 */
7661 	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
7662 		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
7663 		ar->fw_stats.fw_stats_done = true;
7664 		goto complete;
7665 	}
7666 
7667 	/* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested only
7668 	 * via debugfs fw stats. Hence, processing these in debugfs context.
7669 	 */
7670 	ath12k_debugfs_fw_stats_process(ar, &stats);
7671 
7672 complete:
7673 	complete(&ar->fw_stats_complete);
7674 	spin_unlock_bh(&ar->data_lock);
7675 	rcu_read_unlock();
7676 
7677 	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
7678 	 * at this point, no need to free the individual list.
7679 	 */
7680 	return;
7681 
7682 free:
7683 	ath12k_fw_stats_free(&stats);
7684 }
7685 
7686 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
7687  * is not part of BDF CTL(Conformance test limits) table entries.
7688  */
7689 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
7690 						 struct sk_buff *skb)
7691 {
7692 	const void **tb;
7693 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
7694 	int ret;
7695 
7696 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7697 	if (IS_ERR(tb)) {
7698 		ret = PTR_ERR(tb);
7699 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7700 		return;
7701 	}
7702 
7703 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
7704 	if (!ev) {
7705 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
7706 		kfree(tb);
7707 		return;
7708 	}
7709 
7710 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7711 		   "pdev ctl failsafe check ev status %d\n",
7712 		   ev->ctl_failsafe_status);
7713 
7714 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
7715 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
7716 	 */
7717 	if (ev->ctl_failsafe_status != 0)
7718 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
7719 			    ev->ctl_failsafe_status);
7720 
7721 	kfree(tb);
7722 }
7723 
7724 static void
7725 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
7726 					  const struct ath12k_wmi_pdev_csa_event *ev,
7727 					  const u32 *vdev_ids)
7728 {
7729 	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
7730 	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
7731 	struct ieee80211_bss_conf *conf;
7732 	struct ath12k_link_vif *arvif;
7733 	struct ath12k_vif *ahvif;
7734 	int i;
7735 
7736 	rcu_read_lock();
7737 	for (i = 0; i < num_vdevs; i++) {
7738 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
7739 
7740 		if (!arvif) {
7741 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
7742 				    vdev_ids[i]);
7743 			continue;
7744 		}
7745 		ahvif = arvif->ahvif;
7746 
7747 		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
7748 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
7749 				    arvif->link_id);
7750 			continue;
7751 		}
7752 
7753 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
7754 		if (!conf) {
7755 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
7756 				    ahvif->vif->addr, arvif->link_id);
7757 			continue;
7758 		}
7759 
7760 		if (!arvif->is_up || !conf->csa_active)
7761 			continue;
7762 
7763 		/* Finish CSA when counter reaches zero */
7764 		if (!current_switch_count) {
7765 			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
7766 			arvif->current_cntdown_counter = 0;
7767 		} else if (current_switch_count > 1) {
7768 			/* If the count in event is not what we expect, don't update the
7769 			 * mac80211 count. Since during beacon Tx failure, count in the
7770 			 * firmware will not decrement and this event will come with the
7771 			 * previous count value again
7772 			 */
7773 			if (current_switch_count != arvif->current_cntdown_counter)
7774 				continue;
7775 
7776 			arvif->current_cntdown_counter =
7777 				ieee80211_beacon_update_cntdwn(ahvif->vif,
7778 							       arvif->link_id);
7779 		}
7780 	}
7781 	rcu_read_unlock();
7782 }
7783 
7784 static void
7785 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
7786 					      struct sk_buff *skb)
7787 {
7788 	const void **tb;
7789 	const struct ath12k_wmi_pdev_csa_event *ev;
7790 	const u32 *vdev_ids;
7791 	int ret;
7792 
7793 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7794 	if (IS_ERR(tb)) {
7795 		ret = PTR_ERR(tb);
7796 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7797 		return;
7798 	}
7799 
7800 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
7801 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
7802 
7803 	if (!ev || !vdev_ids) {
7804 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
7805 		kfree(tb);
7806 		return;
7807 	}
7808 
7809 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7810 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
7811 		   ev->current_switch_count, ev->pdev_id,
7812 		   ev->num_vdevs);
7813 
7814 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
7815 
7816 	kfree(tb);
7817 }
7818 
7819 static void
7820 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
7821 {
7822 	const void **tb;
7823 	struct ath12k_mac_get_any_chanctx_conf_arg arg;
7824 	const struct ath12k_wmi_pdev_radar_event *ev;
7825 	struct ath12k *ar;
7826 	int ret;
7827 
7828 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7829 	if (IS_ERR(tb)) {
7830 		ret = PTR_ERR(tb);
7831 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7832 		return;
7833 	}
7834 
7835 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
7836 
7837 	if (!ev) {
7838 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
7839 		kfree(tb);
7840 		return;
7841 	}
7842 
7843 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7844 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
7845 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
7846 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
7847 		   ev->freq_offset, ev->sidx);
7848 
7849 	rcu_read_lock();
7850 
7851 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
7852 
7853 	if (!ar) {
7854 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
7855 			    ev->pdev_id);
7856 		goto exit;
7857 	}
7858 
7859 	arg.ar = ar;
7860 	arg.chanctx_conf = NULL;
7861 	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
7862 					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
7863 	if (!arg.chanctx_conf) {
7864 		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
7865 		goto exit;
7866 	}
7867 
7868 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
7869 		   ev->pdev_id);
7870 
7871 	if (ar->dfs_block_radar_events)
7872 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
7873 	else
7874 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
7875 
7876 exit:
7877 	rcu_read_unlock();
7878 
7879 	kfree(tb);
7880 }
7881 
7882 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
7883 					  struct sk_buff *skb)
7884 {
7885 	const struct ath12k_wmi_ftm_event *ev;
7886 	const void **tb;
7887 	int ret;
7888 	u16 length;
7889 
7890 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7891 
7892 	if (IS_ERR(tb)) {
7893 		ret = PTR_ERR(tb);
7894 		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
7895 		return;
7896 	}
7897 
7898 	ev = tb[WMI_TAG_ARRAY_BYTE];
7899 	if (!ev) {
7900 		ath12k_warn(ab, "failed to fetch ftm msg\n");
7901 		kfree(tb);
7902 		return;
7903 	}
7904 
7905 	length = skb->len - TLV_HDR_SIZE;
7906 	ath12k_tm_process_event(ab, cmd_id, ev, length);
7907 	kfree(tb);
7908 	tb = NULL;
7909 }
7910 
7911 static void
7912 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
7913 				  struct sk_buff *skb)
7914 {
7915 	struct ath12k *ar;
7916 	struct wmi_pdev_temperature_event ev = {0};
7917 
7918 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
7919 		ath12k_warn(ab, "failed to extract pdev temperature event");
7920 		return;
7921 	}
7922 
7923 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7924 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
7925 
7926 	rcu_read_lock();
7927 
7928 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
7929 	if (!ar) {
7930 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
7931 		goto exit;
7932 	}
7933 
7934 exit:
7935 	rcu_read_unlock();
7936 }
7937 
7938 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
7939 					struct sk_buff *skb)
7940 {
7941 	const void **tb;
7942 	const struct wmi_fils_discovery_event *ev;
7943 	int ret;
7944 
7945 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7946 	if (IS_ERR(tb)) {
7947 		ret = PTR_ERR(tb);
7948 		ath12k_warn(ab,
7949 			    "failed to parse FILS discovery event tlv %d\n",
7950 			    ret);
7951 		return;
7952 	}
7953 
7954 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
7955 	if (!ev) {
7956 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
7957 		kfree(tb);
7958 		return;
7959 	}
7960 
7961 	ath12k_warn(ab,
7962 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
7963 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
7964 
7965 	kfree(tb);
7966 }
7967 
7968 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
7969 					      struct sk_buff *skb)
7970 {
7971 	const void **tb;
7972 	const struct wmi_probe_resp_tx_status_event *ev;
7973 	int ret;
7974 
7975 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7976 	if (IS_ERR(tb)) {
7977 		ret = PTR_ERR(tb);
7978 		ath12k_warn(ab,
7979 			    "failed to parse probe response transmission status event tlv: %d\n",
7980 			    ret);
7981 		return;
7982 	}
7983 
7984 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
7985 	if (!ev) {
7986 		ath12k_warn(ab,
7987 			    "failed to fetch probe response transmission status event");
7988 		kfree(tb);
7989 		return;
7990 	}
7991 
7992 	if (ev->tx_status)
7993 		ath12k_warn(ab,
7994 			    "Probe response transmission failed for vdev_id %u, status %u\n",
7995 			    ev->vdev_id, ev->tx_status);
7996 
7997 	kfree(tb);
7998 }
7999 
8000 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
8001 				    struct sk_buff *skb)
8002 {
8003 	const void **tb;
8004 	const struct wmi_p2p_noa_event *ev;
8005 	const struct ath12k_wmi_p2p_noa_info *noa;
8006 	struct ath12k *ar;
8007 	int ret, vdev_id;
8008 
8009 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8010 	if (IS_ERR(tb)) {
8011 		ret = PTR_ERR(tb);
8012 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
8013 		return ret;
8014 	}
8015 
8016 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
8017 	noa = tb[WMI_TAG_P2P_NOA_INFO];
8018 
8019 	if (!ev || !noa) {
8020 		ret = -EPROTO;
8021 		goto out;
8022 	}
8023 
8024 	vdev_id = __le32_to_cpu(ev->vdev_id);
8025 
8026 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8027 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
8028 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
8029 
8030 	rcu_read_lock();
8031 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
8032 	if (!ar) {
8033 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
8034 			    vdev_id);
8035 		ret = -EINVAL;
8036 		goto unlock;
8037 	}
8038 
8039 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
8040 
8041 	ret = 0;
8042 
8043 unlock:
8044 	rcu_read_unlock();
8045 out:
8046 	kfree(tb);
8047 	return ret;
8048 }
8049 
8050 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
8051 					     struct sk_buff *skb)
8052 {
8053 	const struct wmi_rfkill_state_change_event *ev;
8054 	const void **tb;
8055 	int ret;
8056 
8057 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8058 	if (IS_ERR(tb)) {
8059 		ret = PTR_ERR(tb);
8060 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8061 		return;
8062 	}
8063 
8064 	ev = tb[WMI_TAG_RFKILL_EVENT];
8065 	if (!ev) {
8066 		kfree(tb);
8067 		return;
8068 	}
8069 
8070 	ath12k_dbg(ab, ATH12K_DBG_MAC,
8071 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
8072 		   le32_to_cpu(ev->gpio_pin_num),
8073 		   le32_to_cpu(ev->int_type),
8074 		   le32_to_cpu(ev->radio_state));
8075 
8076 	spin_lock_bh(&ab->base_lock);
8077 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
8078 	spin_unlock_bh(&ab->base_lock);
8079 
8080 	queue_work(ab->workqueue, &ab->rfkill_work);
8081 	kfree(tb);
8082 }
8083 
8084 static void
8085 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
8086 {
8087 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
8088 }
8089 
8090 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
8091 					struct sk_buff *skb)
8092 {
8093 	const void **tb;
8094 	const struct wmi_twt_enable_event *ev;
8095 	int ret;
8096 
8097 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8098 	if (IS_ERR(tb)) {
8099 		ret = PTR_ERR(tb);
8100 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
8101 			    ret);
8102 		return;
8103 	}
8104 
8105 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
8106 	if (!ev) {
8107 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
8108 		goto exit;
8109 	}
8110 
8111 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
8112 		   le32_to_cpu(ev->pdev_id),
8113 		   le32_to_cpu(ev->status));
8114 
8115 exit:
8116 	kfree(tb);
8117 }
8118 
8119 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
8120 					 struct sk_buff *skb)
8121 {
8122 	const void **tb;
8123 	const struct wmi_twt_disable_event *ev;
8124 	int ret;
8125 
8126 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8127 	if (IS_ERR(tb)) {
8128 		ret = PTR_ERR(tb);
8129 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
8130 			    ret);
8131 		return;
8132 	}
8133 
8134 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
8135 	if (!ev) {
8136 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
8137 		goto exit;
8138 	}
8139 
8140 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
8141 		   le32_to_cpu(ev->pdev_id),
8142 		   le32_to_cpu(ev->status));
8143 
8144 exit:
8145 	kfree(tb);
8146 }
8147 
8148 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
8149 					    u16 tag, u16 len,
8150 					    const void *ptr, void *data)
8151 {
8152 	const struct wmi_wow_ev_pg_fault_param *pf_param;
8153 	const struct wmi_wow_ev_param *param;
8154 	struct wmi_wow_ev_arg *arg = data;
8155 	int pf_len;
8156 
8157 	switch (tag) {
8158 	case WMI_TAG_WOW_EVENT_INFO:
8159 		param = ptr;
8160 		arg->wake_reason = le32_to_cpu(param->wake_reason);
8161 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
8162 			   arg->wake_reason, wow_reason(arg->wake_reason));
8163 		break;
8164 
8165 	case WMI_TAG_ARRAY_BYTE:
8166 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
8167 			pf_param = ptr;
8168 			pf_len = le32_to_cpu(pf_param->len);
8169 			if (pf_len > len - sizeof(pf_len) ||
8170 			    pf_len < 0) {
8171 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
8172 					    pf_len);
8173 				return -EINVAL;
8174 			}
8175 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
8176 				   pf_len);
8177 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
8178 					"wow_reason_page_fault packet present",
8179 					"wow_pg_fault ",
8180 					pf_param->data,
8181 					pf_len);
8182 		}
8183 		break;
8184 	default:
8185 		break;
8186 	}
8187 
8188 	return 0;
8189 }
8190 
8191 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
8192 {
8193 	struct wmi_wow_ev_arg arg = { };
8194 	int ret;
8195 
8196 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8197 				  ath12k_wmi_wow_wakeup_host_parse,
8198 				  &arg);
8199 	if (ret) {
8200 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
8201 			    ret);
8202 		return;
8203 	}
8204 
8205 	complete(&ab->wow.wakeup_completed);
8206 }
8207 
8208 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
8209 						struct sk_buff *skb)
8210 {
8211 	const struct wmi_gtk_offload_status_event *ev;
8212 	struct ath12k_link_vif *arvif;
8213 	__be64 replay_ctr_be;
8214 	u64 replay_ctr;
8215 	const void **tb;
8216 	int ret;
8217 
8218 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8219 	if (IS_ERR(tb)) {
8220 		ret = PTR_ERR(tb);
8221 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8222 		return;
8223 	}
8224 
8225 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
8226 	if (!ev) {
8227 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
8228 		kfree(tb);
8229 		return;
8230 	}
8231 
8232 	rcu_read_lock();
8233 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
8234 	if (!arvif) {
8235 		rcu_read_unlock();
8236 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
8237 			    le32_to_cpu(ev->vdev_id));
8238 		kfree(tb);
8239 		return;
8240 	}
8241 
8242 	replay_ctr = le64_to_cpu(ev->replay_ctr);
8243 	arvif->rekey_data.replay_ctr = replay_ctr;
8244 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
8245 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
8246 
8247 	/* supplicant expects big-endian replay counter */
8248 	replay_ctr_be = cpu_to_be64(replay_ctr);
8249 
8250 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
8251 				   (void *)&replay_ctr_be, GFP_ATOMIC);
8252 
8253 	rcu_read_unlock();
8254 
8255 	kfree(tb);
8256 }
8257 
8258 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
8259 						struct sk_buff *skb)
8260 {
8261 	const struct wmi_mlo_setup_complete_event *ev;
8262 	struct ath12k *ar = NULL;
8263 	struct ath12k_pdev *pdev;
8264 	const void **tb;
8265 	int ret, i;
8266 
8267 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8268 	if (IS_ERR(tb)) {
8269 		ret = PTR_ERR(tb);
8270 		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
8271 			    ret);
8272 		return;
8273 	}
8274 
8275 	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
8276 	if (!ev) {
8277 		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
8278 		kfree(tb);
8279 		return;
8280 	}
8281 
8282 	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
8283 		goto skip_lookup;
8284 
8285 	for (i = 0; i < ab->num_radios; i++) {
8286 		pdev = &ab->pdevs[i];
8287 		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
8288 			ar = pdev->ar;
8289 			break;
8290 		}
8291 	}
8292 
8293 skip_lookup:
8294 	if (!ar) {
8295 		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
8296 			    ev->pdev_id, ev->status);
8297 		goto out;
8298 	}
8299 
8300 	ar->mlo_setup_status = le32_to_cpu(ev->status);
8301 	complete(&ar->mlo_setup_done);
8302 
8303 out:
8304 	kfree(tb);
8305 }
8306 
8307 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
8308 					       struct sk_buff *skb)
8309 {
8310 	const struct wmi_mlo_teardown_complete_event *ev;
8311 	const void **tb;
8312 	int ret;
8313 
8314 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8315 	if (IS_ERR(tb)) {
8316 		ret = PTR_ERR(tb);
8317 		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
8318 		return;
8319 	}
8320 
8321 	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
8322 	if (!ev) {
8323 		ath12k_warn(ab, "failed to fetch teardown complete event\n");
8324 		kfree(tb);
8325 		return;
8326 	}
8327 
8328 	kfree(tb);
8329 }
8330 
8331 #ifdef CONFIG_ATH12K_DEBUGFS
8332 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
8333 					    const void *ptr, u16 tag, u16 len,
8334 					    struct wmi_tpc_stats_arg *tpc_stats)
8335 {
8336 	u32 len1, len2, len3, len4;
8337 	s16 *dst_ptr;
8338 	s8 *dst_ptr_ctl;
8339 
8340 	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
8341 	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
8342 	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
8343 	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
8344 
8345 	switch (tpc_stats->event_count) {
8346 	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
8347 		if (len1 > len)
8348 			return -ENOBUFS;
8349 
8350 		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
8351 			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
8352 			memcpy(dst_ptr, ptr, len1);
8353 		}
8354 		break;
8355 	case ATH12K_TPC_STATS_RATES_EVENT1:
8356 		if (len2 > len)
8357 			return -ENOBUFS;
8358 
8359 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
8360 			dst_ptr = tpc_stats->rates_array1.rate_array;
8361 			memcpy(dst_ptr, ptr, len2);
8362 		}
8363 		break;
8364 	case ATH12K_TPC_STATS_RATES_EVENT2:
8365 		if (len3 > len)
8366 			return -ENOBUFS;
8367 
8368 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
8369 			dst_ptr = tpc_stats->rates_array2.rate_array;
8370 			memcpy(dst_ptr, ptr, len3);
8371 		}
8372 		break;
8373 	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
8374 		if (len4 > len)
8375 			return -ENOBUFS;
8376 
8377 		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
8378 			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
8379 			memcpy(dst_ptr_ctl, ptr, len4);
8380 		}
8381 		break;
8382 	}
8383 	return 0;
8384 }
8385 
8386 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
8387 				  struct wmi_tpc_stats_arg *tpc_stats,
8388 				  struct wmi_max_reg_power_fixed_params *ev)
8389 {
8390 	struct wmi_max_reg_power_allowed_arg *reg_pwr;
8391 	u32 total_size;
8392 
8393 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8394 		   "Received reg power array type %d length %d for tpc stats\n",
8395 		   ev->reg_power_type, ev->reg_array_len);
8396 
8397 	switch (le32_to_cpu(ev->reg_power_type)) {
8398 	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
8399 		reg_pwr = &tpc_stats->max_reg_allowed_power;
8400 		break;
8401 	default:
8402 		return -EINVAL;
8403 	}
8404 
8405 	/* Each entry is 2 byte hence multiplying the indices with 2 */
8406 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
8407 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
8408 	if (le32_to_cpu(ev->reg_array_len) != total_size) {
8409 		ath12k_warn(ab,
8410 			    "Total size and reg_array_len doesn't match for tpc stats\n");
8411 		return -EINVAL;
8412 	}
8413 
8414 	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
8415 
8416 	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
8417 					 GFP_ATOMIC);
8418 	if (!reg_pwr->reg_pwr_array)
8419 		return -ENOMEM;
8420 
8421 	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
8422 
8423 	return 0;
8424 }
8425 
8426 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
8427 				     struct wmi_tpc_stats_arg *tpc_stats,
8428 				     struct wmi_tpc_rates_array_fixed_params *ev)
8429 {
8430 	struct wmi_tpc_rates_array_arg *rates_array;
8431 	u32 flag = 0, rate_array_len;
8432 
8433 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8434 		   "Received rates array type %d length %d for tpc stats\n",
8435 		   ev->rate_array_type, ev->rate_array_len);
8436 
8437 	switch (le32_to_cpu(ev->rate_array_type)) {
8438 	case ATH12K_TPC_STATS_RATES_ARRAY1:
8439 		rates_array = &tpc_stats->rates_array1;
8440 		flag = WMI_TPC_RATES_ARRAY1;
8441 		break;
8442 	case ATH12K_TPC_STATS_RATES_ARRAY2:
8443 		rates_array = &tpc_stats->rates_array2;
8444 		flag = WMI_TPC_RATES_ARRAY2;
8445 		break;
8446 	default:
8447 		ath12k_warn(ab,
8448 			    "Received invalid type of rates array for tpc stats\n");
8449 		return -EINVAL;
8450 	}
8451 	memcpy(&rates_array->tpc_rates_array, ev,
8452 	       sizeof(struct wmi_tpc_rates_array_fixed_params));
8453 	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
8454 	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
8455 	if (!rates_array->rate_array)
8456 		return -ENOMEM;
8457 
8458 	tpc_stats->tlvs_rcvd |= flag;
8459 	return 0;
8460 }
8461 
8462 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
8463 				      struct wmi_tpc_stats_arg *tpc_stats,
8464 				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
8465 {
8466 	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
8467 	u32 total_size, ctl_array_len, flag = 0;
8468 
8469 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8470 		   "Received ctl array type %d length %d for tpc stats\n",
8471 		   ev->ctl_array_type, ev->ctl_array_len);
8472 
8473 	switch (le32_to_cpu(ev->ctl_array_type)) {
8474 	case ATH12K_TPC_STATS_CTL_ARRAY:
8475 		ctl_array = &tpc_stats->ctl_array;
8476 		flag = WMI_TPC_CTL_PWR_ARRAY;
8477 		break;
8478 	default:
8479 		ath12k_warn(ab,
8480 			    "Received invalid type of ctl pwr table for tpc stats\n");
8481 		return -EINVAL;
8482 	}
8483 
8484 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
8485 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
8486 	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
8487 		ath12k_warn(ab,
8488 			    "Total size and ctl_array_len doesn't match for tpc stats\n");
8489 		return -EINVAL;
8490 	}
8491 
8492 	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
8493 	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
8494 	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
8495 	if (!ctl_array->ctl_pwr_table)
8496 		return -ENOMEM;
8497 
8498 	tpc_stats->tlvs_rcvd |= flag;
8499 	return 0;
8500 }
8501 
8502 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
8503 					      u16 tag, u16 len,
8504 					      const void *ptr, void *data)
8505 {
8506 	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
8507 	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
8508 	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
8509 	struct wmi_tpc_stats_arg *tpc_stats = data;
8510 	struct wmi_tpc_config_params *tpc_config;
8511 	int ret = 0;
8512 
8513 	if (!tpc_stats) {
8514 		ath12k_warn(ab, "tpc stats memory unavailable\n");
8515 		return -EINVAL;
8516 	}
8517 
8518 	switch (tag) {
8519 	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
8520 		tpc_config = (struct wmi_tpc_config_params *)ptr;
8521 		memcpy(&tpc_stats->tpc_config, tpc_config,
8522 		       sizeof(struct wmi_tpc_config_params));
8523 		break;
8524 	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
8525 		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
8526 		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
8527 		break;
8528 	case WMI_TAG_TPC_STATS_RATES_ARRAY:
8529 		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
8530 		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
8531 		break;
8532 	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
8533 		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
8534 		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
8535 		break;
8536 	default:
8537 		ath12k_warn(ab,
8538 			    "Received invalid tag for tpc stats in subtlvs\n");
8539 		return -EINVAL;
8540 	}
8541 	return ret;
8542 }
8543 
8544 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
8545 					     u16 tag, u16 len,
8546 					     const void *ptr, void *data)
8547 {
8548 	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
8549 	int ret;
8550 
8551 	switch (tag) {
8552 	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
8553 		ret = 0;
8554 		/* Fixed param is already processed*/
8555 		break;
8556 	case WMI_TAG_ARRAY_STRUCT:
8557 		/* len 0 is expected for array of struct when there
8558 		 * is no content of that type to pack inside that tlv
8559 		 */
8560 		if (len == 0)
8561 			return 0;
8562 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
8563 					  ath12k_wmi_tpc_stats_subtlv_parser,
8564 					  tpc_stats);
8565 		break;
8566 	case WMI_TAG_ARRAY_INT16:
8567 		if (len == 0)
8568 			return 0;
8569 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
8570 						       WMI_TAG_ARRAY_INT16,
8571 						       len, tpc_stats);
8572 		break;
8573 	case WMI_TAG_ARRAY_BYTE:
8574 		if (len == 0)
8575 			return 0;
8576 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
8577 						       WMI_TAG_ARRAY_BYTE,
8578 						       len, tpc_stats);
8579 		break;
8580 	default:
8581 		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
8582 		ret = -EINVAL;
8583 		break;
8584 	}
8585 	return ret;
8586 }
8587 
8588 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
8589 {
8590 	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
8591 
8592 	lockdep_assert_held(&ar->data_lock);
8593 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
8594 	if (tpc_stats) {
8595 		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
8596 		kfree(tpc_stats->rates_array1.rate_array);
8597 		kfree(tpc_stats->rates_array2.rate_array);
8598 		kfree(tpc_stats->ctl_array.ctl_pwr_table);
8599 		kfree(tpc_stats);
8600 		ar->debug.tpc_stats = NULL;
8601 	}
8602 }
8603 
8604 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
8605 					 struct sk_buff *skb)
8606 {
8607 	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
8608 	struct wmi_tpc_stats_arg *tpc_stats;
8609 	const struct wmi_tlv *tlv;
8610 	void *ptr = skb->data;
8611 	struct ath12k *ar;
8612 	u16 tlv_tag;
8613 	u32 event_count;
8614 	int ret;
8615 
8616 	if (!skb->data) {
8617 		ath12k_warn(ab, "No data present in tpc stats event\n");
8618 		return;
8619 	}
8620 
8621 	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
8622 		ath12k_warn(ab, "TPC stats event size invalid\n");
8623 		return;
8624 	}
8625 
8626 	tlv = (struct wmi_tlv *)ptr;
8627 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
8628 	ptr += sizeof(*tlv);
8629 
8630 	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
8631 		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
8632 		return;
8633 	}
8634 
8635 	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
8636 	rcu_read_lock();
8637 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
8638 	if (!ar) {
8639 		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
8640 		rcu_read_unlock();
8641 		return;
8642 	}
8643 	spin_lock_bh(&ar->data_lock);
8644 	if (!ar->debug.tpc_request) {
8645 		/* Event is received either without request or the
8646 		 * timeout, if memory is already allocated free it
8647 		 */
8648 		if (ar->debug.tpc_stats) {
8649 			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
8650 			ath12k_wmi_free_tpc_stats_mem(ar);
8651 		}
8652 		goto unlock;
8653 	}
8654 
8655 	event_count = le32_to_cpu(fixed_param->event_count);
8656 	if (event_count == 0) {
8657 		if (ar->debug.tpc_stats) {
8658 			ath12k_warn(ab,
8659 				    "Invalid tpc memory present\n");
8660 			goto unlock;
8661 		}
8662 		ar->debug.tpc_stats =
8663 			kzalloc(sizeof(struct wmi_tpc_stats_arg),
8664 				GFP_ATOMIC);
8665 		if (!ar->debug.tpc_stats) {
8666 			ath12k_warn(ab,
8667 				    "Failed to allocate memory for tpc stats\n");
8668 			goto unlock;
8669 		}
8670 	}
8671 
8672 	tpc_stats = ar->debug.tpc_stats;
8673 	if (!tpc_stats) {
8674 		ath12k_warn(ab, "tpc stats memory unavailable\n");
8675 		goto unlock;
8676 	}
8677 
8678 	if (!(event_count == 0)) {
8679 		if (event_count != tpc_stats->event_count + 1) {
8680 			ath12k_warn(ab,
8681 				    "Invalid tpc event received\n");
8682 			goto unlock;
8683 		}
8684 	}
8685 	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
8686 	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
8687 	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
8688 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8689 		   "tpc stats event_count %d\n",
8690 		   tpc_stats->event_count);
8691 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8692 				  ath12k_wmi_tpc_stats_event_parser,
8693 				  tpc_stats);
8694 	if (ret) {
8695 		ath12k_wmi_free_tpc_stats_mem(ar);
8696 		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
8697 		goto unlock;
8698 	}
8699 
8700 	if (tpc_stats->end_of_event)
8701 		complete(&ar->debug.tpc_complete);
8702 
8703 unlock:
8704 	spin_unlock_bh(&ar->data_lock);
8705 	rcu_read_unlock();
8706 }
8707 #else
8708 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
8709 					 struct sk_buff *skb)
8710 {
8711 }
8712 #endif
8713 
8714 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
8715 {
8716 	struct wmi_cmd_hdr *cmd_hdr;
8717 	enum wmi_tlv_event_id id;
8718 
8719 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
8720 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
8721 
8722 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
8723 		goto out;
8724 
8725 	switch (id) {
8726 		/* Process all the WMI events here */
8727 	case WMI_SERVICE_READY_EVENTID:
8728 		ath12k_service_ready_event(ab, skb);
8729 		break;
8730 	case WMI_SERVICE_READY_EXT_EVENTID:
8731 		ath12k_service_ready_ext_event(ab, skb);
8732 		break;
8733 	case WMI_SERVICE_READY_EXT2_EVENTID:
8734 		ath12k_service_ready_ext2_event(ab, skb);
8735 		break;
8736 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
8737 		ath12k_reg_chan_list_event(ab, skb);
8738 		break;
8739 	case WMI_READY_EVENTID:
8740 		ath12k_ready_event(ab, skb);
8741 		break;
8742 	case WMI_PEER_DELETE_RESP_EVENTID:
8743 		ath12k_peer_delete_resp_event(ab, skb);
8744 		break;
8745 	case WMI_VDEV_START_RESP_EVENTID:
8746 		ath12k_vdev_start_resp_event(ab, skb);
8747 		break;
8748 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
8749 		ath12k_bcn_tx_status_event(ab, skb);
8750 		break;
8751 	case WMI_VDEV_STOPPED_EVENTID:
8752 		ath12k_vdev_stopped_event(ab, skb);
8753 		break;
8754 	case WMI_MGMT_RX_EVENTID:
8755 		ath12k_mgmt_rx_event(ab, skb);
8756 		/* mgmt_rx_event() owns the skb now! */
8757 		return;
8758 	case WMI_MGMT_TX_COMPLETION_EVENTID:
8759 		ath12k_mgmt_tx_compl_event(ab, skb);
8760 		break;
8761 	case WMI_SCAN_EVENTID:
8762 		ath12k_scan_event(ab, skb);
8763 		break;
8764 	case WMI_PEER_STA_KICKOUT_EVENTID:
8765 		ath12k_peer_sta_kickout_event(ab, skb);
8766 		break;
8767 	case WMI_ROAM_EVENTID:
8768 		ath12k_roam_event(ab, skb);
8769 		break;
8770 	case WMI_CHAN_INFO_EVENTID:
8771 		ath12k_chan_info_event(ab, skb);
8772 		break;
8773 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
8774 		ath12k_pdev_bss_chan_info_event(ab, skb);
8775 		break;
8776 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
8777 		ath12k_vdev_install_key_compl_event(ab, skb);
8778 		break;
8779 	case WMI_SERVICE_AVAILABLE_EVENTID:
8780 		ath12k_service_available_event(ab, skb);
8781 		break;
8782 	case WMI_PEER_ASSOC_CONF_EVENTID:
8783 		ath12k_peer_assoc_conf_event(ab, skb);
8784 		break;
8785 	case WMI_UPDATE_STATS_EVENTID:
8786 		ath12k_update_stats_event(ab, skb);
8787 		break;
8788 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
8789 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
8790 		break;
8791 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
8792 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
8793 		break;
8794 	case WMI_PDEV_TEMPERATURE_EVENTID:
8795 		ath12k_wmi_pdev_temperature_event(ab, skb);
8796 		break;
8797 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
8798 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
8799 		break;
8800 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
8801 		ath12k_fils_discovery_event(ab, skb);
8802 		break;
8803 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
8804 		ath12k_probe_resp_tx_status_event(ab, skb);
8805 		break;
8806 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
8807 		ath12k_rfkill_state_change_event(ab, skb);
8808 		break;
8809 	case WMI_TWT_ENABLE_EVENTID:
8810 		ath12k_wmi_twt_enable_event(ab, skb);
8811 		break;
8812 	case WMI_TWT_DISABLE_EVENTID:
8813 		ath12k_wmi_twt_disable_event(ab, skb);
8814 		break;
8815 	case WMI_P2P_NOA_EVENTID:
8816 		ath12k_wmi_p2p_noa_event(ab, skb);
8817 		break;
8818 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
8819 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
8820 		break;
8821 	case WMI_VDEV_DELETE_RESP_EVENTID:
8822 		ath12k_vdev_delete_resp_event(ab, skb);
8823 		break;
8824 	case WMI_DIAG_EVENTID:
8825 		ath12k_wmi_diag_event(ab, skb);
8826 		break;
8827 	case WMI_WOW_WAKEUP_HOST_EVENTID:
8828 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
8829 		break;
8830 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
8831 		ath12k_wmi_gtk_offload_status_event(ab, skb);
8832 		break;
8833 	case WMI_MLO_SETUP_COMPLETE_EVENTID:
8834 		ath12k_wmi_event_mlo_setup_complete(ab, skb);
8835 		break;
8836 	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
8837 		ath12k_wmi_event_teardown_complete(ab, skb);
8838 		break;
8839 	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
8840 		ath12k_wmi_process_tpc_stats(ab, skb);
8841 		break;
8842 	case WMI_11D_NEW_COUNTRY_EVENTID:
8843 		ath12k_reg_11d_new_cc_event(ab, skb);
8844 		break;
8845 	/* add Unsupported events (rare) here */
8846 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
8847 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
8848 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
8849 		ath12k_dbg(ab, ATH12K_DBG_WMI,
8850 			   "ignoring unsupported event 0x%x\n", id);
8851 		break;
8852 	/* add Unsupported events (frequent) here */
8853 	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
8854 	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
8855 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
8856 		/* debug might flood hence silently ignore (no-op) */
8857 		break;
8858 	case WMI_PDEV_UTF_EVENTID:
8859 		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
8860 			ath12k_tm_wmi_event_segmented(ab, id, skb);
8861 		else
8862 			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
8863 		break;
8864 	default:
8865 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
8866 		break;
8867 	}
8868 
8869 out:
8870 	dev_kfree_skb(skb);
8871 }
8872 
8873 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
8874 					   u32 pdev_idx)
8875 {
8876 	int status;
8877 	static const u32 svc_id[] = {
8878 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
8879 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
8880 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
8881 	};
8882 	struct ath12k_htc_svc_conn_req conn_req = {};
8883 	struct ath12k_htc_svc_conn_resp conn_resp = {};
8884 
8885 	/* these fields are the same for all service endpoints */
8886 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
8887 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
8888 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
8889 
8890 	/* connect to control service */
8891 	conn_req.service_id = svc_id[pdev_idx];
8892 
8893 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
8894 	if (status) {
8895 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
8896 			    status);
8897 		return status;
8898 	}
8899 
8900 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
8901 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
8902 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
8903 
8904 	return 0;
8905 }
8906 
8907 static int
8908 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
8909 			      struct wmi_unit_test_cmd ut_cmd,
8910 			      u32 *test_args)
8911 {
8912 	struct ath12k_wmi_pdev *wmi = ar->wmi;
8913 	struct wmi_unit_test_cmd *cmd;
8914 	struct sk_buff *skb;
8915 	struct wmi_tlv *tlv;
8916 	void *ptr;
8917 	u32 *ut_cmd_args;
8918 	int buf_len, arg_len;
8919 	int ret;
8920 	int i;
8921 
8922 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
8923 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
8924 
8925 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
8926 	if (!skb)
8927 		return -ENOMEM;
8928 
8929 	cmd = (struct wmi_unit_test_cmd *)skb->data;
8930 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
8931 						 sizeof(ut_cmd));
8932 
8933 	cmd->vdev_id = ut_cmd.vdev_id;
8934 	cmd->module_id = ut_cmd.module_id;
8935 	cmd->num_args = ut_cmd.num_args;
8936 	cmd->diag_token = ut_cmd.diag_token;
8937 
8938 	ptr = skb->data + sizeof(ut_cmd);
8939 
8940 	tlv = ptr;
8941 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
8942 
8943 	ptr += TLV_HDR_SIZE;
8944 
8945 	ut_cmd_args = ptr;
8946 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
8947 		ut_cmd_args[i] = test_args[i];
8948 
8949 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8950 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
8951 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
8952 		   cmd->diag_token);
8953 
8954 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
8955 
8956 	if (ret) {
8957 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
8958 			    ret);
8959 		dev_kfree_skb(skb);
8960 	}
8961 
8962 	return ret;
8963 }
8964 
8965 int ath12k_wmi_simulate_radar(struct ath12k *ar)
8966 {
8967 	struct ath12k_link_vif *arvif;
8968 	u32 dfs_args[DFS_MAX_TEST_ARGS];
8969 	struct wmi_unit_test_cmd wmi_ut;
8970 	bool arvif_found = false;
8971 
8972 	list_for_each_entry(arvif, &ar->arvifs, list) {
8973 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
8974 			arvif_found = true;
8975 			break;
8976 		}
8977 	}
8978 
8979 	if (!arvif_found)
8980 		return -EINVAL;
8981 
8982 	dfs_args[DFS_TEST_CMDID] = 0;
8983 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
8984 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
8985 	 * freq offset (b3 - b10) to unit test. For simulation
8986 	 * purpose this can be set to 0 which is valid.
8987 	 */
8988 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
8989 
8990 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
8991 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
8992 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
8993 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
8994 
8995 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
8996 
8997 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
8998 }
8999 
9000 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
9001 				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
9002 {
9003 	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
9004 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9005 	struct sk_buff *skb;
9006 	struct wmi_tlv *tlv;
9007 	__le32 *pdev_id;
9008 	u32 buf_len;
9009 	void *ptr;
9010 	int ret;
9011 
9012 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
9013 
9014 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9015 	if (!skb)
9016 		return -ENOMEM;
9017 	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
9018 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
9019 						 sizeof(*cmd));
9020 
9021 	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
9022 	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
9023 	cmd->subid = cpu_to_le32(tpc_stats_type);
9024 
9025 	ptr = skb->data + sizeof(*cmd);
9026 
9027 	/* The below TLV arrays optionally follow this fixed param TLV structure
9028 	 * 1. ARRAY_UINT32 pdev_ids[]
9029 	 *      If this array is present and non-zero length, stats should only
9030 	 *      be provided from the pdevs identified in the array.
9031 	 * 2. ARRAY_UNIT32 vdev_ids[]
9032 	 *      If this array is present and non-zero length, stats should only
9033 	 *      be provided from the vdevs identified in the array.
9034 	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
9035 	 *      If this array is present and non-zero length, stats should only
9036 	 *      be provided from the peers with the MAC addresses specified
9037 	 *      in the array
9038 	 */
9039 	tlv = ptr;
9040 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9041 	ptr += TLV_HDR_SIZE;
9042 
9043 	pdev_id = ptr;
9044 	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
9045 	ptr += sizeof(*pdev_id);
9046 
9047 	tlv = ptr;
9048 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9049 	ptr += TLV_HDR_SIZE;
9050 
9051 	tlv = ptr;
9052 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
9053 	ptr += TLV_HDR_SIZE;
9054 
9055 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
9056 	if (ret) {
9057 		ath12k_warn(ar->ab,
9058 			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
9059 		dev_kfree_skb(skb);
9060 		return ret;
9061 	}
9062 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
9063 		   ar->pdev->pdev_id);
9064 
9065 	return ret;
9066 }
9067 
9068 int ath12k_wmi_connect(struct ath12k_base *ab)
9069 {
9070 	u32 i;
9071 	u8 wmi_ep_count;
9072 
9073 	wmi_ep_count = ab->htc.wmi_ep_count;
9074 	if (wmi_ep_count > ab->hw_params->max_radios)
9075 		return -1;
9076 
9077 	for (i = 0; i < wmi_ep_count; i++)
9078 		ath12k_connect_pdev_htc_service(ab, i);
9079 
9080 	return 0;
9081 }
9082 
9083 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
9084 {
9085 	if (WARN_ON(pdev_id >= MAX_RADIOS))
9086 		return;
9087 
9088 	/* TODO: Deinit any pdev specific wmi resource */
9089 }
9090 
9091 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
9092 			   u8 pdev_id)
9093 {
9094 	struct ath12k_wmi_pdev *wmi_handle;
9095 
9096 	if (pdev_id >= ab->hw_params->max_radios)
9097 		return -EINVAL;
9098 
9099 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
9100 
9101 	wmi_handle->wmi_ab = &ab->wmi_ab;
9102 
9103 	ab->wmi_ab.ab = ab;
9104 	/* TODO: Init remaining resource specific to pdev */
9105 
9106 	return 0;
9107 }
9108 
9109 int ath12k_wmi_attach(struct ath12k_base *ab)
9110 {
9111 	int ret;
9112 
9113 	ret = ath12k_wmi_pdev_attach(ab, 0);
9114 	if (ret)
9115 		return ret;
9116 
9117 	ab->wmi_ab.ab = ab;
9118 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
9119 
9120 	/* It's overwritten when service_ext_ready is handled */
9121 	if (ab->hw_params->single_pdev_only)
9122 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
9123 
9124 	/* TODO: Init remaining wmi soc resources required */
9125 	init_completion(&ab->wmi_ab.service_ready);
9126 	init_completion(&ab->wmi_ab.unified_ready);
9127 
9128 	return 0;
9129 }
9130 
9131 void ath12k_wmi_detach(struct ath12k_base *ab)
9132 {
9133 	int i;
9134 
9135 	/* TODO: Deinit wmi resource specific to SOC as required */
9136 
9137 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
9138 		ath12k_wmi_pdev_detach(ab, i);
9139 
9140 	ath12k_wmi_free_dbring_caps(ab);
9141 }
9142 
9143 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
9144 {
9145 	struct wmi_hw_data_filter_cmd *cmd;
9146 	struct sk_buff *skb;
9147 	int len;
9148 
9149 	len = sizeof(*cmd);
9150 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9151 
9152 	if (!skb)
9153 		return -ENOMEM;
9154 
9155 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
9156 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
9157 						 sizeof(*cmd));
9158 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9159 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
9160 
9161 	/* Set all modes in case of disable */
9162 	if (arg->enable)
9163 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
9164 	else
9165 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
9166 
9167 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9168 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
9169 		   arg->enable, arg->hw_filter_bitmap);
9170 
9171 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
9172 }
9173 
9174 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
9175 {
9176 	struct wmi_wow_host_wakeup_cmd *cmd;
9177 	struct sk_buff *skb;
9178 	size_t len;
9179 
9180 	len = sizeof(*cmd);
9181 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9182 	if (!skb)
9183 		return -ENOMEM;
9184 
9185 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
9186 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
9187 						 sizeof(*cmd));
9188 
9189 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
9190 
9191 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
9192 }
9193 
9194 int ath12k_wmi_wow_enable(struct ath12k *ar)
9195 {
9196 	struct wmi_wow_enable_cmd *cmd;
9197 	struct sk_buff *skb;
9198 	int len;
9199 
9200 	len = sizeof(*cmd);
9201 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9202 	if (!skb)
9203 		return -ENOMEM;
9204 
9205 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
9206 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
9207 						 sizeof(*cmd));
9208 
9209 	cmd->enable = cpu_to_le32(1);
9210 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
9211 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
9212 
9213 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
9214 }
9215 
9216 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
9217 				    enum wmi_wow_wakeup_event event,
9218 				    u32 enable)
9219 {
9220 	struct wmi_wow_add_del_event_cmd *cmd;
9221 	struct sk_buff *skb;
9222 	size_t len;
9223 
9224 	len = sizeof(*cmd);
9225 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9226 	if (!skb)
9227 		return -ENOMEM;
9228 
9229 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
9230 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
9231 						 sizeof(*cmd));
9232 	cmd->vdev_id = cpu_to_le32(vdev_id);
9233 	cmd->is_add = cpu_to_le32(enable);
9234 	cmd->event_bitmap = cpu_to_le32((1 << event));
9235 
9236 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
9237 		   wow_wakeup_event(event), enable, vdev_id);
9238 
9239 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
9240 }
9241 
9242 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
9243 			       const u8 *pattern, const u8 *mask,
9244 			       int pattern_len, int pattern_offset)
9245 {
9246 	struct wmi_wow_add_pattern_cmd *cmd;
9247 	struct wmi_wow_bitmap_pattern_params *bitmap;
9248 	struct wmi_tlv *tlv;
9249 	struct sk_buff *skb;
9250 	void *ptr;
9251 	size_t len;
9252 
9253 	len = sizeof(*cmd) +
9254 	      sizeof(*tlv) +			/* array struct */
9255 	      sizeof(*bitmap) +			/* bitmap */
9256 	      sizeof(*tlv) +			/* empty ipv4 sync */
9257 	      sizeof(*tlv) +			/* empty ipv6 sync */
9258 	      sizeof(*tlv) +			/* empty magic */
9259 	      sizeof(*tlv) +			/* empty info timeout */
9260 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
9261 
9262 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9263 	if (!skb)
9264 		return -ENOMEM;
9265 
9266 	/* cmd */
9267 	ptr = skb->data;
9268 	cmd = ptr;
9269 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
9270 						 sizeof(*cmd));
9271 	cmd->vdev_id = cpu_to_le32(vdev_id);
9272 	cmd->pattern_id = cpu_to_le32(pattern_id);
9273 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
9274 
9275 	ptr += sizeof(*cmd);
9276 
9277 	/* bitmap */
9278 	tlv = ptr;
9279 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
9280 
9281 	ptr += sizeof(*tlv);
9282 
9283 	bitmap = ptr;
9284 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
9285 						    sizeof(*bitmap));
9286 	memcpy(bitmap->patternbuf, pattern, pattern_len);
9287 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
9288 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
9289 	bitmap->pattern_len = cpu_to_le32(pattern_len);
9290 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
9291 	bitmap->pattern_id = cpu_to_le32(pattern_id);
9292 
9293 	ptr += sizeof(*bitmap);
9294 
9295 	/* ipv4 sync */
9296 	tlv = ptr;
9297 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9298 
9299 	ptr += sizeof(*tlv);
9300 
9301 	/* ipv6 sync */
9302 	tlv = ptr;
9303 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9304 
9305 	ptr += sizeof(*tlv);
9306 
9307 	/* magic */
9308 	tlv = ptr;
9309 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9310 
9311 	ptr += sizeof(*tlv);
9312 
9313 	/* pattern info timeout */
9314 	tlv = ptr;
9315 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9316 
9317 	ptr += sizeof(*tlv);
9318 
9319 	/* ratelimit interval */
9320 	tlv = ptr;
9321 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9322 
9323 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
9324 		   vdev_id, pattern_id, pattern_offset, pattern_len);
9325 
9326 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
9327 			bitmap->patternbuf, pattern_len);
9328 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
9329 			bitmap->bitmaskbuf, pattern_len);
9330 
9331 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
9332 }
9333 
9334 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
9335 {
9336 	struct wmi_wow_del_pattern_cmd *cmd;
9337 	struct sk_buff *skb;
9338 	size_t len;
9339 
9340 	len = sizeof(*cmd);
9341 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9342 	if (!skb)
9343 		return -ENOMEM;
9344 
9345 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
9346 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
9347 						 sizeof(*cmd));
9348 	cmd->vdev_id = cpu_to_le32(vdev_id);
9349 	cmd->pattern_id = cpu_to_le32(pattern_id);
9350 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
9351 
9352 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
9353 		   vdev_id, pattern_id);
9354 
9355 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
9356 }
9357 
9358 static struct sk_buff *
9359 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
9360 				   struct wmi_pno_scan_req_arg *pno)
9361 {
9362 	struct nlo_configured_params *nlo_list;
9363 	size_t len, nlo_list_len, channel_list_len;
9364 	struct wmi_wow_nlo_config_cmd *cmd;
9365 	__le32 *channel_list;
9366 	struct wmi_tlv *tlv;
9367 	struct sk_buff *skb;
9368 	void *ptr;
9369 	u32 i;
9370 
9371 	len = sizeof(*cmd) +
9372 	      sizeof(*tlv) +
9373 	      /* TLV place holder for array of structures
9374 	       * nlo_configured_params(nlo_list)
9375 	       */
9376 	      sizeof(*tlv);
9377 	      /* TLV place holder for array of uint32 channel_list */
9378 
9379 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
9380 	len += channel_list_len;
9381 
9382 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
9383 	len += nlo_list_len;
9384 
9385 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9386 	if (!skb)
9387 		return ERR_PTR(-ENOMEM);
9388 
9389 	ptr = skb->data;
9390 	cmd = ptr;
9391 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
9392 
9393 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
9394 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
9395 
9396 	/* current FW does not support min-max range for dwell time */
9397 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
9398 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
9399 
9400 	if (pno->do_passive_scan)
9401 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
9402 
9403 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
9404 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
9405 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
9406 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
9407 
9408 	if (pno->enable_pno_scan_randomization) {
9409 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
9410 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
9411 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
9412 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
9413 	}
9414 
9415 	ptr += sizeof(*cmd);
9416 
9417 	/* nlo_configured_params(nlo_list) */
9418 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
9419 	tlv = ptr;
9420 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
9421 
9422 	ptr += sizeof(*tlv);
9423 	nlo_list = ptr;
9424 	for (i = 0; i < pno->uc_networks_count; i++) {
9425 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
9426 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
9427 						     sizeof(*nlo_list));
9428 
9429 		nlo_list[i].ssid.valid = cpu_to_le32(1);
9430 		nlo_list[i].ssid.ssid.ssid_len =
9431 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
9432 		memcpy(nlo_list[i].ssid.ssid.ssid,
9433 		       pno->a_networks[i].ssid.ssid,
9434 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
9435 
9436 		if (pno->a_networks[i].rssi_threshold &&
9437 		    pno->a_networks[i].rssi_threshold > -300) {
9438 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
9439 			nlo_list[i].rssi_cond.rssi =
9440 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
9441 		}
9442 
9443 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
9444 		nlo_list[i].bcast_nw_type.bcast_nw_type =
9445 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
9446 	}
9447 
9448 	ptr += nlo_list_len;
9449 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
9450 	tlv = ptr;
9451 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
9452 	ptr += sizeof(*tlv);
9453 	channel_list = ptr;
9454 
9455 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
9456 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
9457 
9458 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
9459 		   vdev_id);
9460 
9461 	return skb;
9462 }
9463 
9464 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
9465 							 u32 vdev_id)
9466 {
9467 	struct wmi_wow_nlo_config_cmd *cmd;
9468 	struct sk_buff *skb;
9469 	size_t len;
9470 
9471 	len = sizeof(*cmd);
9472 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9473 	if (!skb)
9474 		return ERR_PTR(-ENOMEM);
9475 
9476 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
9477 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
9478 
9479 	cmd->vdev_id = cpu_to_le32(vdev_id);
9480 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
9481 
9482 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9483 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
9484 	return skb;
9485 }
9486 
9487 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
9488 			      struct wmi_pno_scan_req_arg  *pno_scan)
9489 {
9490 	struct sk_buff *skb;
9491 
9492 	if (pno_scan->enable)
9493 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
9494 	else
9495 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
9496 
9497 	if (IS_ERR_OR_NULL(skb))
9498 		return -ENOMEM;
9499 
9500 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
9501 }
9502 
9503 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
9504 				       struct wmi_arp_ns_offload_arg *offload,
9505 				       void **ptr,
9506 				       bool enable,
9507 				       bool ext)
9508 {
9509 	struct wmi_ns_offload_params *ns;
9510 	struct wmi_tlv *tlv;
9511 	void *buf_ptr = *ptr;
9512 	u32 ns_cnt, ns_ext_tuples;
9513 	int i, max_offloads;
9514 
9515 	ns_cnt = offload->ipv6_count;
9516 
9517 	tlv  = buf_ptr;
9518 
9519 	if (ext) {
9520 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
9521 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9522 						 ns_ext_tuples * sizeof(*ns));
9523 		i = WMI_MAX_NS_OFFLOADS;
9524 		max_offloads = offload->ipv6_count;
9525 	} else {
9526 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9527 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
9528 		i = 0;
9529 		max_offloads = WMI_MAX_NS_OFFLOADS;
9530 	}
9531 
9532 	buf_ptr += sizeof(*tlv);
9533 
9534 	for (; i < max_offloads; i++) {
9535 		ns = buf_ptr;
9536 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
9537 							sizeof(*ns));
9538 
9539 		if (enable) {
9540 			if (i < ns_cnt)
9541 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
9542 
9543 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
9544 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
9545 
9546 			if (offload->ipv6_type[i])
9547 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
9548 
9549 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
9550 
9551 			if (!is_zero_ether_addr(ns->target_mac.addr))
9552 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
9553 
9554 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9555 				   "wmi index %d ns_solicited %pI6 target %pI6",
9556 				   i, ns->solicitation_ipaddr,
9557 				   ns->target_ipaddr[0]);
9558 		}
9559 
9560 		buf_ptr += sizeof(*ns);
9561 	}
9562 
9563 	*ptr = buf_ptr;
9564 }
9565 
9566 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
9567 					struct wmi_arp_ns_offload_arg *offload,
9568 					void **ptr,
9569 					bool enable)
9570 {
9571 	struct wmi_arp_offload_params *arp;
9572 	struct wmi_tlv *tlv;
9573 	void *buf_ptr = *ptr;
9574 	int i;
9575 
9576 	/* fill arp tuple */
9577 	tlv = buf_ptr;
9578 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9579 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
9580 	buf_ptr += sizeof(*tlv);
9581 
9582 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
9583 		arp = buf_ptr;
9584 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
9585 							 sizeof(*arp));
9586 
9587 		if (enable && i < offload->ipv4_count) {
9588 			/* Copy the target ip addr and flags */
9589 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
9590 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
9591 
9592 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
9593 				   arp->target_ipaddr);
9594 		}
9595 
9596 		buf_ptr += sizeof(*arp);
9597 	}
9598 
9599 	*ptr = buf_ptr;
9600 }
9601 
9602 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
9603 			      struct ath12k_link_vif *arvif,
9604 			      struct wmi_arp_ns_offload_arg *offload,
9605 			      bool enable)
9606 {
9607 	struct wmi_set_arp_ns_offload_cmd *cmd;
9608 	struct wmi_tlv *tlv;
9609 	struct sk_buff *skb;
9610 	void *buf_ptr;
9611 	size_t len;
9612 	u8 ns_cnt, ns_ext_tuples = 0;
9613 
9614 	ns_cnt = offload->ipv6_count;
9615 
9616 	len = sizeof(*cmd) +
9617 	      sizeof(*tlv) +
9618 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
9619 	      sizeof(*tlv) +
9620 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
9621 
9622 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
9623 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
9624 		len += sizeof(*tlv) +
9625 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
9626 	}
9627 
9628 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9629 	if (!skb)
9630 		return -ENOMEM;
9631 
9632 	buf_ptr = skb->data;
9633 	cmd = buf_ptr;
9634 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
9635 						 sizeof(*cmd));
9636 	cmd->flags = cpu_to_le32(0);
9637 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9638 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
9639 
9640 	buf_ptr += sizeof(*cmd);
9641 
9642 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
9643 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
9644 
9645 	if (ns_ext_tuples)
9646 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
9647 
9648 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
9649 }
9650 
9651 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
9652 				 struct ath12k_link_vif *arvif, bool enable)
9653 {
9654 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
9655 	struct wmi_gtk_rekey_offload_cmd *cmd;
9656 	struct sk_buff *skb;
9657 	__le64 replay_ctr;
9658 	int len;
9659 
9660 	len = sizeof(*cmd);
9661 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9662 	if (!skb)
9663 		return -ENOMEM;
9664 
9665 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
9666 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
9667 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9668 
9669 	if (enable) {
9670 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
9671 
9672 		/* the length in rekey_data and cmd is equal */
9673 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
9674 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
9675 
9676 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
9677 		memcpy(cmd->replay_ctr, &replay_ctr,
9678 		       sizeof(replay_ctr));
9679 	} else {
9680 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
9681 	}
9682 
9683 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
9684 		   arvif->vdev_id, enable);
9685 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
9686 }
9687 
9688 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
9689 				 struct ath12k_link_vif *arvif)
9690 {
9691 	struct wmi_gtk_rekey_offload_cmd *cmd;
9692 	struct sk_buff *skb;
9693 	int len;
9694 
9695 	len = sizeof(*cmd);
9696 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9697 	if (!skb)
9698 		return -ENOMEM;
9699 
9700 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
9701 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
9702 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9703 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
9704 
9705 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
9706 		   arvif->vdev_id);
9707 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
9708 }
9709 
9710 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
9711 			     const struct wmi_sta_keepalive_arg *arg)
9712 {
9713 	struct wmi_sta_keepalive_arp_resp_params *arp;
9714 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9715 	struct wmi_sta_keepalive_cmd *cmd;
9716 	struct sk_buff *skb;
9717 	size_t len;
9718 
9719 	len = sizeof(*cmd) + sizeof(*arp);
9720 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9721 	if (!skb)
9722 		return -ENOMEM;
9723 
9724 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
9725 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
9726 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9727 	cmd->enabled = cpu_to_le32(arg->enabled);
9728 	cmd->interval = cpu_to_le32(arg->interval);
9729 	cmd->method = cpu_to_le32(arg->method);
9730 
9731 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
9732 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
9733 						 sizeof(*arp));
9734 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
9735 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
9736 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
9737 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
9738 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
9739 	}
9740 
9741 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9742 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
9743 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
9744 
9745 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
9746 }
9747 
9748 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
9749 {
9750 	struct wmi_mlo_setup_cmd *cmd;
9751 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9752 	u32 *partner_links, num_links;
9753 	int i, ret, buf_len, arg_len;
9754 	struct sk_buff *skb;
9755 	struct wmi_tlv *tlv;
9756 	void *ptr;
9757 
9758 	num_links = mlo_params->num_partner_links;
9759 	arg_len = num_links * sizeof(u32);
9760 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
9761 
9762 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9763 	if (!skb)
9764 		return -ENOMEM;
9765 
9766 	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
9767 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
9768 						 sizeof(*cmd));
9769 	cmd->mld_group_id = mlo_params->group_id;
9770 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9771 	ptr = skb->data + sizeof(*cmd);
9772 
9773 	tlv = ptr;
9774 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
9775 	ptr += TLV_HDR_SIZE;
9776 
9777 	partner_links = ptr;
9778 	for (i = 0; i < num_links; i++)
9779 		partner_links[i] = mlo_params->partner_link_id[i];
9780 
9781 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
9782 	if (ret) {
9783 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
9784 			    ret);
9785 		dev_kfree_skb(skb);
9786 		return ret;
9787 	}
9788 
9789 	return 0;
9790 }
9791 
9792 int ath12k_wmi_mlo_ready(struct ath12k *ar)
9793 {
9794 	struct wmi_mlo_ready_cmd *cmd;
9795 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9796 	struct sk_buff *skb;
9797 	int ret, len;
9798 
9799 	len = sizeof(*cmd);
9800 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9801 	if (!skb)
9802 		return -ENOMEM;
9803 
9804 	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
9805 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
9806 						 sizeof(*cmd));
9807 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9808 
9809 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
9810 	if (ret) {
9811 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
9812 			    ret);
9813 		dev_kfree_skb(skb);
9814 		return ret;
9815 	}
9816 
9817 	return 0;
9818 }
9819 
9820 int ath12k_wmi_mlo_teardown(struct ath12k *ar)
9821 {
9822 	struct wmi_mlo_teardown_cmd *cmd;
9823 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9824 	struct sk_buff *skb;
9825 	int ret, len;
9826 
9827 	len = sizeof(*cmd);
9828 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9829 	if (!skb)
9830 		return -ENOMEM;
9831 
9832 	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
9833 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
9834 						 sizeof(*cmd));
9835 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9836 	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
9837 
9838 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
9839 	if (ret) {
9840 		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
9841 			    ret);
9842 		dev_kfree_skb(skb);
9843 		return ret;
9844 	}
9845 
9846 	return 0;
9847 }
9848 
9849 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
9850 {
9851 	return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
9852 			ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
9853 }
9854 
9855 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
9856 				       u32 vdev_id,
9857 				       struct ath12k_reg_tpc_power_info *param)
9858 {
9859 	struct wmi_vdev_set_tpc_power_cmd *cmd;
9860 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9861 	struct wmi_vdev_ch_power_params *ch;
9862 	int i, ret, len, array_len;
9863 	struct sk_buff *skb;
9864 	struct wmi_tlv *tlv;
9865 	u8 *ptr;
9866 
9867 	array_len = sizeof(*ch) * param->num_pwr_levels;
9868 	len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
9869 
9870 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9871 	if (!skb)
9872 		return -ENOMEM;
9873 
9874 	ptr = skb->data;
9875 
9876 	cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
9877 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
9878 						 sizeof(*cmd));
9879 	cmd->vdev_id = cpu_to_le32(vdev_id);
9880 	cmd->psd_power = cpu_to_le32(param->is_psd_power);
9881 	cmd->eirp_power = cpu_to_le32(param->eirp_power);
9882 	cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
9883 
9884 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9885 		   "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
9886 		   vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
9887 
9888 	ptr += sizeof(*cmd);
9889 	tlv = (struct wmi_tlv *)ptr;
9890 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
9891 
9892 	ptr += TLV_HDR_SIZE;
9893 	ch = (struct wmi_vdev_ch_power_params *)ptr;
9894 
9895 	for (i = 0; i < param->num_pwr_levels; i++, ch++) {
9896 		ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
9897 							sizeof(*ch));
9898 		ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
9899 		ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
9900 
9901 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
9902 			   ch->chan_cfreq, ch->tx_power);
9903 	}
9904 
9905 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
9906 	if (ret) {
9907 		ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
9908 		dev_kfree_skb(skb);
9909 		return ret;
9910 	}
9911 
9912 	return 0;
9913 }
9914