xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 14ea4cd1b19162888f629c4ce1ba268c683b0f12)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debug.h"
19 #include "mac.h"
20 #include "hw.h"
21 #include "peer.h"
22 #include "p2p.h"
23 
24 struct ath12k_wmi_svc_ready_parse {
25 	bool wmi_svc_bitmap_done;
26 };
27 
28 struct ath12k_wmi_dma_ring_caps_parse {
29 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
30 	u32 n_dma_ring_caps;
31 };
32 
33 struct ath12k_wmi_service_ext_arg {
34 	u32 default_conc_scan_config_bits;
35 	u32 default_fw_config_bits;
36 	struct ath12k_wmi_ppe_threshold_arg ppet;
37 	u32 he_cap_info;
38 	u32 mpdu_density;
39 	u32 max_bssid_rx_filters;
40 	u32 num_hw_modes;
41 	u32 num_phy;
42 };
43 
44 struct ath12k_wmi_svc_rdy_ext_parse {
45 	struct ath12k_wmi_service_ext_arg arg;
46 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
47 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
48 	u32 n_hw_mode_caps;
49 	u32 tot_phy_id;
50 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
51 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
52 	u32 n_mac_phy_caps;
53 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
54 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
55 	u32 n_ext_hal_reg_caps;
56 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
57 	bool hw_mode_done;
58 	bool mac_phy_done;
59 	bool ext_hal_reg_done;
60 	bool mac_phy_chainmask_combo_done;
61 	bool mac_phy_chainmask_cap_done;
62 	bool oem_dma_ring_cap_done;
63 	bool dma_ring_cap_done;
64 };
65 
66 struct ath12k_wmi_svc_rdy_ext2_arg {
67 	u32 reg_db_version;
68 	u32 hw_min_max_tx_power_2ghz;
69 	u32 hw_min_max_tx_power_5ghz;
70 	u32 chwidth_num_peer_caps;
71 	u32 preamble_puncture_bw;
72 	u32 max_user_per_ppdu_ofdma;
73 	u32 max_user_per_ppdu_mumimo;
74 	u32 target_cap_flags;
75 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
76 	u32 max_num_linkview_peers;
77 	u32 max_num_msduq_supported_per_tid;
78 	u32 default_num_msduq_supported_per_tid;
79 };
80 
81 struct ath12k_wmi_svc_rdy_ext2_parse {
82 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
83 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
84 	bool dma_ring_cap_done;
85 	bool spectral_bin_scaling_done;
86 	bool mac_phy_caps_ext_done;
87 };
88 
89 struct ath12k_wmi_rdy_parse {
90 	u32 num_extra_mac_addr;
91 };
92 
93 struct ath12k_wmi_dma_buf_release_arg {
94 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
95 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
96 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
97 	u32 num_buf_entry;
98 	u32 num_meta;
99 	bool buf_entry_done;
100 	bool meta_data_done;
101 };
102 
103 struct ath12k_wmi_tlv_policy {
104 	size_t min_len;
105 };
106 
107 struct wmi_tlv_mgmt_rx_parse {
108 	const struct ath12k_wmi_mgmt_rx_params *fixed;
109 	const u8 *frame_buf;
110 	bool frame_buf_done;
111 };
112 
113 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
114 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
115 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
116 	[WMI_TAG_SERVICE_READY_EVENT] = {
117 		.min_len = sizeof(struct wmi_service_ready_event) },
118 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
119 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
120 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
121 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
122 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
123 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
124 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
125 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
126 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
127 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
128 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
129 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
130 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
131 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
132 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
133 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
134 	[WMI_TAG_MGMT_RX_HDR] = {
135 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
136 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
137 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
138 	[WMI_TAG_SCAN_EVENT] = {
139 		.min_len = sizeof(struct wmi_scan_event) },
140 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
141 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
142 	[WMI_TAG_ROAM_EVENT] = {
143 		.min_len = sizeof(struct wmi_roam_event) },
144 	[WMI_TAG_CHAN_INFO_EVENT] = {
145 		.min_len = sizeof(struct wmi_chan_info_event) },
146 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
147 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
148 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
149 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
150 	[WMI_TAG_READY_EVENT] = {
151 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
152 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
153 		.min_len = sizeof(struct wmi_service_available_event) },
154 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
155 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
156 	[WMI_TAG_RFKILL_EVENT] = {
157 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
158 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
159 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
160 	[WMI_TAG_HOST_SWFDA_EVENT] = {
161 		.min_len = sizeof(struct wmi_fils_discovery_event) },
162 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
163 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
164 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
165 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
166 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
167 		.min_len = sizeof(struct wmi_twt_enable_event) },
168 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
169 		.min_len = sizeof(struct wmi_twt_disable_event) },
170 	[WMI_TAG_P2P_NOA_INFO] = {
171 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
172 	[WMI_TAG_P2P_NOA_EVENT] = {
173 		.min_len = sizeof(struct wmi_p2p_noa_event) },
174 };
175 
176 static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
177 {
178 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
179 		le32_encode_bits(len, WMI_TLV_LEN);
180 }
181 
182 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
183 {
184 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
185 }
186 
187 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
188 			     struct ath12k_wmi_resource_config_arg *config)
189 {
190 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
191 	config->num_peers = ab->num_radios *
192 		ath12k_core_get_max_peers_per_radio(ab);
193 	config->num_tids = ath12k_core_get_max_num_tids(ab);
194 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
195 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
196 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
197 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
198 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
199 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
200 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
201 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
202 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
203 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
204 
205 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
206 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
207 	else
208 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
209 
210 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
211 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
212 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
213 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
214 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
215 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
216 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
217 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
218 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
219 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
220 	config->rx_skip_defrag_timeout_dup_detection_check =
221 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
222 	config->vow_config = TARGET_VOW_CONFIG;
223 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
224 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
225 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
226 	config->rx_batchmode = TARGET_RX_BATCHMODE;
227 	/* Indicates host supports peer map v3 and unmap v2 support */
228 	config->peer_map_unmap_version = 0x32;
229 	config->twt_ap_pdev_count = ab->num_radios;
230 	config->twt_ap_sta_count = 1000;
231 	config->ema_max_vap_cnt = ab->num_radios;
232 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
233 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
234 
235 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
236 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
237 }
238 
239 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
240 			     struct ath12k_wmi_resource_config_arg *config)
241 {
242 	config->num_vdevs = 4;
243 	config->num_peers = 16;
244 	config->num_tids = 32;
245 
246 	config->num_offload_peers = 3;
247 	config->num_offload_reorder_buffs = 3;
248 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
249 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
250 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
251 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
252 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
253 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
254 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
255 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
256 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
257 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
258 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
259 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
260 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
261 	config->num_mcast_groups = 0;
262 	config->num_mcast_table_elems = 0;
263 	config->mcast2ucast_mode = 0;
264 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
265 	config->num_wds_entries = 0;
266 	config->dma_burst_size = 0;
267 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
268 	config->vow_config = TARGET_VOW_CONFIG;
269 	config->gtk_offload_max_vdev = 2;
270 	config->num_msdu_desc = 0x400;
271 	config->beacon_tx_offload_max_vdev = 2;
272 	config->rx_batchmode = TARGET_RX_BATCHMODE;
273 
274 	config->peer_map_unmap_version = 0x1;
275 	config->use_pdev_id = 1;
276 	config->max_frag_entries = 0xa;
277 	config->num_tdls_vdevs = 0x1;
278 	config->num_tdls_conn_table_entries = 8;
279 	config->beacon_tx_offload_max_vdev = 0x2;
280 	config->num_multicast_filter_entries = 0x20;
281 	config->num_wow_filters = 0x16;
282 	config->num_keep_alive_pattern = 0;
283 }
284 
285 #define PRIMAP(_hw_mode_) \
286 	[_hw_mode_] = _hw_mode_##_PRI
287 
288 static const int ath12k_hw_mode_pri_map[] = {
289 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
290 	PRIMAP(WMI_HOST_HW_MODE_DBS),
291 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
292 	PRIMAP(WMI_HOST_HW_MODE_SBS),
293 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
294 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
295 	/* keep last */
296 	PRIMAP(WMI_HOST_HW_MODE_MAX),
297 };
298 
299 static int
300 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
301 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
302 				const void *ptr, void *data),
303 		    void *data)
304 {
305 	const void *begin = ptr;
306 	const struct wmi_tlv *tlv;
307 	u16 tlv_tag, tlv_len;
308 	int ret;
309 
310 	while (len > 0) {
311 		if (len < sizeof(*tlv)) {
312 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
313 				   ptr - begin, len, sizeof(*tlv));
314 			return -EINVAL;
315 		}
316 
317 		tlv = ptr;
318 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
319 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
320 		ptr += sizeof(*tlv);
321 		len -= sizeof(*tlv);
322 
323 		if (tlv_len > len) {
324 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
325 				   tlv_tag, ptr - begin, len, tlv_len);
326 			return -EINVAL;
327 		}
328 
329 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
330 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
331 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
332 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
333 				   tlv_tag, ptr - begin, tlv_len,
334 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
335 			return -EINVAL;
336 		}
337 
338 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
339 		if (ret)
340 			return ret;
341 
342 		ptr += tlv_len;
343 		len -= tlv_len;
344 	}
345 
346 	return 0;
347 }
348 
349 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
350 				     const void *ptr, void *data)
351 {
352 	const void **tb = data;
353 
354 	if (tag < WMI_TAG_MAX)
355 		tb[tag] = ptr;
356 
357 	return 0;
358 }
359 
360 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
361 				const void *ptr, size_t len)
362 {
363 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
364 				   (void *)tb);
365 }
366 
367 static const void **
368 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
369 			   struct sk_buff *skb, gfp_t gfp)
370 {
371 	const void **tb;
372 	int ret;
373 
374 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
375 	if (!tb)
376 		return ERR_PTR(-ENOMEM);
377 
378 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
379 	if (ret) {
380 		kfree(tb);
381 		return ERR_PTR(ret);
382 	}
383 
384 	return tb;
385 }
386 
387 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
388 				      u32 cmd_id)
389 {
390 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
391 	struct ath12k_base *ab = wmi->wmi_ab->ab;
392 	struct wmi_cmd_hdr *cmd_hdr;
393 	int ret;
394 
395 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
396 		return -ENOMEM;
397 
398 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
399 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
400 
401 	memset(skb_cb, 0, sizeof(*skb_cb));
402 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
403 
404 	if (ret)
405 		goto err_pull;
406 
407 	return 0;
408 
409 err_pull:
410 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
411 	return ret;
412 }
413 
414 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
415 			u32 cmd_id)
416 {
417 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
418 	int ret = -EOPNOTSUPP;
419 
420 	might_sleep();
421 
422 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
423 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
424 
425 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
426 			ret = -ESHUTDOWN;
427 
428 		(ret != -EAGAIN);
429 	}), WMI_SEND_TIMEOUT_HZ);
430 
431 	if (ret == -EAGAIN)
432 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
433 
434 	return ret;
435 }
436 
437 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
438 				     const void *ptr,
439 				     struct ath12k_wmi_service_ext_arg *arg)
440 {
441 	const struct wmi_service_ready_ext_event *ev = ptr;
442 	int i;
443 
444 	if (!ev)
445 		return -EINVAL;
446 
447 	/* Move this to host based bitmap */
448 	arg->default_conc_scan_config_bits =
449 		le32_to_cpu(ev->default_conc_scan_config_bits);
450 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
451 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
452 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
453 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
454 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
455 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
456 
457 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
458 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
459 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
460 
461 	return 0;
462 }
463 
464 static int
465 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
466 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
467 				      u8 hw_mode_id, u8 phy_id,
468 				      struct ath12k_pdev *pdev)
469 {
470 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
471 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
472 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
473 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
474 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
475 	struct ath12k_band_cap *cap_band;
476 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
477 	struct ath12k_fw_pdev *fw_pdev;
478 	u32 phy_map;
479 	u32 hw_idx, phy_idx = 0;
480 	int i;
481 
482 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
483 		return -EINVAL;
484 
485 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
486 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
487 			break;
488 
489 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
490 		phy_idx = fls(phy_map);
491 	}
492 
493 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
494 		return -EINVAL;
495 
496 	phy_idx += phy_id;
497 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
498 		return -EINVAL;
499 
500 	mac_caps = wmi_mac_phy_caps + phy_idx;
501 
502 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
503 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
504 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
505 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
506 
507 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
508 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
509 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
510 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
511 	ab->fw_pdev_count++;
512 
513 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
514 	 * band to band for a single radio, need to see how this should be
515 	 * handled.
516 	 */
517 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
518 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
519 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
520 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
521 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
522 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
523 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
524 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
525 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
526 	} else {
527 		return -EINVAL;
528 	}
529 
530 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
531 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
532 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
533 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
534 	 * will be advertised for second mac or vice-versa. Compute the shift value
535 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
536 	 * mac80211.
537 	 */
538 	pdev_cap->tx_chain_mask_shift =
539 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
540 	pdev_cap->rx_chain_mask_shift =
541 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
542 
543 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
544 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
545 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
546 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
547 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
548 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
549 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
550 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
551 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
552 			cap_band->he_cap_phy_info[i] =
553 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
554 
555 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
556 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
557 
558 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
559 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
560 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
561 	}
562 
563 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
564 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
565 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
566 		cap_band->max_bw_supported =
567 			le32_to_cpu(mac_caps->max_bw_supported_5g);
568 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
569 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
570 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
571 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
572 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
573 			cap_band->he_cap_phy_info[i] =
574 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
575 
576 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
577 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
578 
579 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
580 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
581 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
582 
583 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
584 		cap_band->max_bw_supported =
585 			le32_to_cpu(mac_caps->max_bw_supported_5g);
586 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
587 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
588 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
589 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
590 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
591 			cap_band->he_cap_phy_info[i] =
592 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
593 
594 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
595 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
596 
597 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
598 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
599 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
600 	}
601 
602 	return 0;
603 }
604 
605 static int
606 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
607 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
608 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
609 				u8 phy_idx,
610 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
611 {
612 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
613 
614 	if (!reg_caps || !ext_caps)
615 		return -EINVAL;
616 
617 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
618 		return -EINVAL;
619 
620 	ext_reg_cap = &ext_caps[phy_idx];
621 
622 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
623 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
624 	param->eeprom_reg_domain_ext =
625 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
626 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
627 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
628 	/* check if param->wireless_mode is needed */
629 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
630 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
631 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
632 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
633 
634 	return 0;
635 }
636 
637 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
638 					 const void *evt_buf,
639 					 struct ath12k_wmi_target_cap_arg *cap)
640 {
641 	const struct wmi_service_ready_event *ev = evt_buf;
642 
643 	if (!ev) {
644 		ath12k_err(ab, "%s: failed by NULL param\n",
645 			   __func__);
646 		return -EINVAL;
647 	}
648 
649 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
650 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
651 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
652 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
653 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
654 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
655 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
656 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
657 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
658 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
659 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
660 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
661 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
662 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
663 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
664 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
665 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
666 
667 	return 0;
668 }
669 
670 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
671  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
672  * 4-byte word.
673  */
674 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
675 					   const u32 *wmi_svc_bm)
676 {
677 	int i, j;
678 
679 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
680 		do {
681 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
682 				set_bit(j, wmi->wmi_ab->svc_map);
683 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
684 	}
685 }
686 
687 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
688 				    const void *ptr, void *data)
689 {
690 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
691 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
692 	u16 expect_len;
693 
694 	switch (tag) {
695 	case WMI_TAG_SERVICE_READY_EVENT:
696 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
697 			return -EINVAL;
698 		break;
699 
700 	case WMI_TAG_ARRAY_UINT32:
701 		if (!svc_ready->wmi_svc_bitmap_done) {
702 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
703 			if (len < expect_len) {
704 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
705 					    len, tag);
706 				return -EINVAL;
707 			}
708 
709 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
710 
711 			svc_ready->wmi_svc_bitmap_done = true;
712 		}
713 		break;
714 	default:
715 		break;
716 	}
717 
718 	return 0;
719 }
720 
721 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
722 {
723 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
724 	int ret;
725 
726 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
727 				  ath12k_wmi_svc_rdy_parse,
728 				  &svc_ready);
729 	if (ret) {
730 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
731 		return ret;
732 	}
733 
734 	return 0;
735 }
736 
737 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
738 				    struct ieee80211_tx_info *info)
739 {
740 	struct ath12k_base *ab = ar->ab;
741 	u32 freq = 0;
742 
743 	if (ab->hw_params->single_pdev_only &&
744 	    ar->scan.is_roc &&
745 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
746 		freq = ar->scan.roc_freq;
747 
748 	return freq;
749 }
750 
751 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
752 {
753 	struct sk_buff *skb;
754 	struct ath12k_base *ab = wmi_ab->ab;
755 	u32 round_len = roundup(len, 4);
756 
757 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
758 	if (!skb)
759 		return NULL;
760 
761 	skb_reserve(skb, WMI_SKB_HEADROOM);
762 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
763 		ath12k_warn(ab, "unaligned WMI skb data\n");
764 
765 	skb_put(skb, round_len);
766 	memset(skb->data, 0, round_len);
767 
768 	return skb;
769 }
770 
771 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
772 			 struct sk_buff *frame)
773 {
774 	struct ath12k_wmi_pdev *wmi = ar->wmi;
775 	struct wmi_mgmt_send_cmd *cmd;
776 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
777 	struct wmi_tlv *frame_tlv;
778 	struct sk_buff *skb;
779 	u32 buf_len;
780 	int ret, len;
781 
782 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
783 
784 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
785 
786 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
787 	if (!skb)
788 		return -ENOMEM;
789 
790 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
791 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
792 						 sizeof(*cmd));
793 	cmd->vdev_id = cpu_to_le32(vdev_id);
794 	cmd->desc_id = cpu_to_le32(buf_id);
795 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
796 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
797 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
798 	cmd->frame_len = cpu_to_le32(frame->len);
799 	cmd->buf_len = cpu_to_le32(buf_len);
800 	cmd->tx_params_valid = 0;
801 
802 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
803 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
804 
805 	memcpy(frame_tlv->value, frame->data, buf_len);
806 
807 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
808 	if (ret) {
809 		ath12k_warn(ar->ab,
810 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
811 		dev_kfree_skb(skb);
812 	}
813 
814 	return ret;
815 }
816 
817 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
818 			   struct ath12k_wmi_vdev_create_arg *args)
819 {
820 	struct ath12k_wmi_pdev *wmi = ar->wmi;
821 	struct wmi_vdev_create_cmd *cmd;
822 	struct sk_buff *skb;
823 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
824 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
825 	struct wmi_vdev_create_mlo_params *ml_params;
826 	struct wmi_tlv *tlv;
827 	int ret, len;
828 	void *ptr;
829 
830 	/* It can be optimized my sending tx/rx chain configuration
831 	 * only for supported bands instead of always sending it for
832 	 * both the bands.
833 	 */
834 	len = sizeof(*cmd) + TLV_HDR_SIZE +
835 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
836 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
837 
838 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
839 	if (!skb)
840 		return -ENOMEM;
841 
842 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
843 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
844 						 sizeof(*cmd));
845 
846 	cmd->vdev_id = cpu_to_le32(args->if_id);
847 	cmd->vdev_type = cpu_to_le32(args->type);
848 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
849 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
850 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
851 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
852 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
853 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
854 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
855 
856 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
857 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
858 
859 	ptr = skb->data + sizeof(*cmd);
860 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
861 
862 	tlv = ptr;
863 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
864 
865 	ptr += TLV_HDR_SIZE;
866 	txrx_streams = ptr;
867 	len = sizeof(*txrx_streams);
868 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
869 							  len);
870 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
871 	txrx_streams->supported_tx_streams =
872 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
873 	txrx_streams->supported_rx_streams =
874 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
875 
876 	txrx_streams++;
877 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
878 							  len);
879 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
880 	txrx_streams->supported_tx_streams =
881 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
882 	txrx_streams->supported_rx_streams =
883 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
884 
885 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
886 
887 	if (is_ml_vdev) {
888 		tlv = ptr;
889 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
890 						 sizeof(*ml_params));
891 		ptr += TLV_HDR_SIZE;
892 		ml_params = ptr;
893 
894 		ml_params->tlv_header =
895 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
896 					       sizeof(*ml_params));
897 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
898 	}
899 
900 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
901 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
902 		   args->if_id, args->type, args->subtype,
903 		   macaddr, args->pdev_id);
904 
905 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
906 	if (ret) {
907 		ath12k_warn(ar->ab,
908 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
909 		dev_kfree_skb(skb);
910 	}
911 
912 	return ret;
913 }
914 
915 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
916 {
917 	struct ath12k_wmi_pdev *wmi = ar->wmi;
918 	struct wmi_vdev_delete_cmd *cmd;
919 	struct sk_buff *skb;
920 	int ret;
921 
922 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
923 	if (!skb)
924 		return -ENOMEM;
925 
926 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
927 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
928 						 sizeof(*cmd));
929 	cmd->vdev_id = cpu_to_le32(vdev_id);
930 
931 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
932 
933 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
934 	if (ret) {
935 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
936 		dev_kfree_skb(skb);
937 	}
938 
939 	return ret;
940 }
941 
942 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
943 {
944 	struct ath12k_wmi_pdev *wmi = ar->wmi;
945 	struct wmi_vdev_stop_cmd *cmd;
946 	struct sk_buff *skb;
947 	int ret;
948 
949 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
950 	if (!skb)
951 		return -ENOMEM;
952 
953 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
954 
955 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
956 						 sizeof(*cmd));
957 	cmd->vdev_id = cpu_to_le32(vdev_id);
958 
959 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
960 
961 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
962 	if (ret) {
963 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
964 		dev_kfree_skb(skb);
965 	}
966 
967 	return ret;
968 }
969 
970 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
971 {
972 	struct ath12k_wmi_pdev *wmi = ar->wmi;
973 	struct wmi_vdev_down_cmd *cmd;
974 	struct sk_buff *skb;
975 	int ret;
976 
977 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
978 	if (!skb)
979 		return -ENOMEM;
980 
981 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
982 
983 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
984 						 sizeof(*cmd));
985 	cmd->vdev_id = cpu_to_le32(vdev_id);
986 
987 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
988 
989 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
990 	if (ret) {
991 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
992 		dev_kfree_skb(skb);
993 	}
994 
995 	return ret;
996 }
997 
998 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
999 				       struct wmi_vdev_start_req_arg *arg)
1000 {
1001 	memset(chan, 0, sizeof(*chan));
1002 
1003 	chan->mhz = cpu_to_le32(arg->freq);
1004 	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
1005 	if (arg->mode == MODE_11AC_VHT80_80)
1006 		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
1007 	else
1008 		chan->band_center_freq2 = 0;
1009 
1010 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1011 	if (arg->passive)
1012 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1013 	if (arg->allow_ibss)
1014 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1015 	if (arg->allow_ht)
1016 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1017 	if (arg->allow_vht)
1018 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1019 	if (arg->allow_he)
1020 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1021 	if (arg->ht40plus)
1022 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1023 	if (arg->chan_radar)
1024 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1025 	if (arg->freq2_radar)
1026 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1027 
1028 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1029 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1030 		le32_encode_bits(arg->max_reg_power,
1031 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1032 
1033 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1034 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1035 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1036 }
1037 
1038 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1039 			  bool restart)
1040 {
1041 	struct wmi_vdev_start_mlo_params *ml_params;
1042 	struct wmi_partner_link_info *partner_info;
1043 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1044 	struct wmi_vdev_start_request_cmd *cmd;
1045 	struct sk_buff *skb;
1046 	struct ath12k_wmi_channel_params *chan;
1047 	struct wmi_tlv *tlv;
1048 	void *ptr;
1049 	int ret, len, i, ml_arg_size = 0;
1050 
1051 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1052 		return -EINVAL;
1053 
1054 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1055 
1056 	if (!restart && arg->ml.enabled) {
1057 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1058 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1059 					      sizeof(*partner_info));
1060 		len += ml_arg_size;
1061 	}
1062 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1063 	if (!skb)
1064 		return -ENOMEM;
1065 
1066 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1067 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1068 						 sizeof(*cmd));
1069 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1070 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1071 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1072 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1073 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1074 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1075 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1076 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1077 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1078 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1079 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1080 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1081 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1082 
1083 	if (!restart) {
1084 		if (arg->ssid) {
1085 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1086 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1087 		}
1088 		if (arg->hidden_ssid)
1089 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1090 		if (arg->pmf_enabled)
1091 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1092 	}
1093 
1094 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1095 
1096 	ptr = skb->data + sizeof(*cmd);
1097 	chan = ptr;
1098 
1099 	ath12k_wmi_put_wmi_channel(chan, arg);
1100 
1101 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1102 						  sizeof(*chan));
1103 	ptr += sizeof(*chan);
1104 
1105 	tlv = ptr;
1106 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1107 
1108 	/* Note: This is a nested TLV containing:
1109 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1110 	 */
1111 
1112 	ptr += sizeof(*tlv);
1113 
1114 	if (ml_arg_size) {
1115 		tlv = ptr;
1116 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1117 						 sizeof(*ml_params));
1118 		ptr += TLV_HDR_SIZE;
1119 
1120 		ml_params = ptr;
1121 
1122 		ml_params->tlv_header =
1123 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1124 					       sizeof(*ml_params));
1125 
1126 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1127 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1128 				   le32_encode_bits(arg->ml.assoc_link,
1129 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1130 				   le32_encode_bits(arg->ml.mcast_link,
1131 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1132 				   le32_encode_bits(arg->ml.link_add,
1133 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1134 
1135 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1136 			   arg->vdev_id, ml_params->flags);
1137 
1138 		ptr += sizeof(*ml_params);
1139 
1140 		tlv = ptr;
1141 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1142 						 arg->ml.num_partner_links *
1143 						 sizeof(*partner_info));
1144 		ptr += TLV_HDR_SIZE;
1145 
1146 		partner_info = ptr;
1147 
1148 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1149 			partner_info->tlv_header =
1150 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1151 						       sizeof(*partner_info));
1152 			partner_info->vdev_id =
1153 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1154 			partner_info->hw_link_id =
1155 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1156 			ether_addr_copy(partner_info->vdev_addr.addr,
1157 					arg->ml.partner_info[i].addr);
1158 
1159 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1160 				   partner_info->vdev_id, partner_info->hw_link_id,
1161 				   partner_info->vdev_addr.addr);
1162 
1163 			partner_info++;
1164 		}
1165 
1166 		ptr = partner_info;
1167 	}
1168 
1169 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1170 		   restart ? "restart" : "start", arg->vdev_id,
1171 		   arg->freq, arg->mode);
1172 
1173 	if (restart)
1174 		ret = ath12k_wmi_cmd_send(wmi, skb,
1175 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1176 	else
1177 		ret = ath12k_wmi_cmd_send(wmi, skb,
1178 					  WMI_VDEV_START_REQUEST_CMDID);
1179 	if (ret) {
1180 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1181 			    restart ? "restart" : "start");
1182 		dev_kfree_skb(skb);
1183 	}
1184 
1185 	return ret;
1186 }
1187 
1188 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1189 {
1190 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1191 	struct wmi_vdev_up_cmd *cmd;
1192 	struct sk_buff *skb;
1193 	int ret;
1194 
1195 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1196 	if (!skb)
1197 		return -ENOMEM;
1198 
1199 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1200 
1201 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1202 						 sizeof(*cmd));
1203 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1204 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1205 
1206 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1207 
1208 	if (params->tx_bssid) {
1209 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1210 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1211 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1212 	}
1213 
1214 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1215 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1216 		   params->vdev_id, params->aid, params->bssid);
1217 
1218 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1219 	if (ret) {
1220 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1221 		dev_kfree_skb(skb);
1222 	}
1223 
1224 	return ret;
1225 }
1226 
1227 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1228 				    struct ath12k_wmi_peer_create_arg *arg)
1229 {
1230 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1231 	struct wmi_peer_create_cmd *cmd;
1232 	struct sk_buff *skb;
1233 	int ret, len;
1234 	struct wmi_peer_create_mlo_params *ml_param;
1235 	void *ptr;
1236 	struct wmi_tlv *tlv;
1237 
1238 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1239 
1240 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1241 	if (!skb)
1242 		return -ENOMEM;
1243 
1244 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1245 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1246 						 sizeof(*cmd));
1247 
1248 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1249 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1250 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1251 
1252 	ptr = skb->data + sizeof(*cmd);
1253 	tlv = ptr;
1254 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1255 					 sizeof(*ml_param));
1256 	ptr += TLV_HDR_SIZE;
1257 	ml_param = ptr;
1258 	ml_param->tlv_header =
1259 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1260 					       sizeof(*ml_param));
1261 	if (arg->ml_enabled)
1262 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1263 
1264 	ptr += sizeof(*ml_param);
1265 
1266 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1267 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1268 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1269 
1270 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1271 	if (ret) {
1272 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1273 		dev_kfree_skb(skb);
1274 	}
1275 
1276 	return ret;
1277 }
1278 
1279 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1280 				    const u8 *peer_addr, u8 vdev_id)
1281 {
1282 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1283 	struct wmi_peer_delete_cmd *cmd;
1284 	struct sk_buff *skb;
1285 	int ret;
1286 
1287 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1288 	if (!skb)
1289 		return -ENOMEM;
1290 
1291 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1292 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1293 						 sizeof(*cmd));
1294 
1295 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1296 	cmd->vdev_id = cpu_to_le32(vdev_id);
1297 
1298 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1299 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1300 		   vdev_id,  peer_addr);
1301 
1302 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1303 	if (ret) {
1304 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1305 		dev_kfree_skb(skb);
1306 	}
1307 
1308 	return ret;
1309 }
1310 
1311 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1312 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1313 {
1314 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1315 	struct wmi_pdev_set_regdomain_cmd *cmd;
1316 	struct sk_buff *skb;
1317 	int ret;
1318 
1319 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1320 	if (!skb)
1321 		return -ENOMEM;
1322 
1323 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1324 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1325 						 sizeof(*cmd));
1326 
1327 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1328 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1329 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1330 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1331 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1332 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1333 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1334 
1335 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1336 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1337 		   arg->current_rd_in_use, arg->current_rd_2g,
1338 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1339 
1340 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1341 	if (ret) {
1342 		ath12k_warn(ar->ab,
1343 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1344 		dev_kfree_skb(skb);
1345 	}
1346 
1347 	return ret;
1348 }
1349 
1350 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1351 			      u32 vdev_id, u32 param_id, u32 param_val)
1352 {
1353 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1354 	struct wmi_peer_set_param_cmd *cmd;
1355 	struct sk_buff *skb;
1356 	int ret;
1357 
1358 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1359 	if (!skb)
1360 		return -ENOMEM;
1361 
1362 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1363 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1364 						 sizeof(*cmd));
1365 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1366 	cmd->vdev_id = cpu_to_le32(vdev_id);
1367 	cmd->param_id = cpu_to_le32(param_id);
1368 	cmd->param_value = cpu_to_le32(param_val);
1369 
1370 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1371 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1372 		   vdev_id, peer_addr, param_id, param_val);
1373 
1374 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1375 	if (ret) {
1376 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1377 		dev_kfree_skb(skb);
1378 	}
1379 
1380 	return ret;
1381 }
1382 
1383 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1384 					u8 peer_addr[ETH_ALEN],
1385 					u32 peer_tid_bitmap,
1386 					u8 vdev_id)
1387 {
1388 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1389 	struct wmi_peer_flush_tids_cmd *cmd;
1390 	struct sk_buff *skb;
1391 	int ret;
1392 
1393 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1394 	if (!skb)
1395 		return -ENOMEM;
1396 
1397 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1398 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1399 						 sizeof(*cmd));
1400 
1401 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1402 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1403 	cmd->vdev_id = cpu_to_le32(vdev_id);
1404 
1405 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1406 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1407 		   vdev_id, peer_addr, peer_tid_bitmap);
1408 
1409 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1410 	if (ret) {
1411 		ath12k_warn(ar->ab,
1412 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1413 		dev_kfree_skb(skb);
1414 	}
1415 
1416 	return ret;
1417 }
1418 
1419 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1420 					   int vdev_id, const u8 *addr,
1421 					   dma_addr_t paddr, u8 tid,
1422 					   u8 ba_window_size_valid,
1423 					   u32 ba_window_size)
1424 {
1425 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1426 	struct sk_buff *skb;
1427 	int ret;
1428 
1429 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1430 	if (!skb)
1431 		return -ENOMEM;
1432 
1433 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1434 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1435 						 sizeof(*cmd));
1436 
1437 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1438 	cmd->vdev_id = cpu_to_le32(vdev_id);
1439 	cmd->tid = cpu_to_le32(tid);
1440 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1441 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1442 	cmd->queue_no = cpu_to_le32(tid);
1443 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1444 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1445 
1446 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1447 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1448 		   addr, vdev_id, tid);
1449 
1450 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1451 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1452 	if (ret) {
1453 		ath12k_warn(ar->ab,
1454 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1455 		dev_kfree_skb(skb);
1456 	}
1457 
1458 	return ret;
1459 }
1460 
1461 int
1462 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1463 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1464 {
1465 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1466 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1467 	struct sk_buff *skb;
1468 	int ret;
1469 
1470 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1471 	if (!skb)
1472 		return -ENOMEM;
1473 
1474 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1475 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1476 						 sizeof(*cmd));
1477 
1478 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1479 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1480 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1481 
1482 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1483 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1484 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1485 
1486 	ret = ath12k_wmi_cmd_send(wmi, skb,
1487 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1488 	if (ret) {
1489 		ath12k_warn(ar->ab,
1490 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1491 		dev_kfree_skb(skb);
1492 	}
1493 
1494 	return ret;
1495 }
1496 
1497 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1498 			      u32 param_value, u8 pdev_id)
1499 {
1500 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1501 	struct wmi_pdev_set_param_cmd *cmd;
1502 	struct sk_buff *skb;
1503 	int ret;
1504 
1505 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1506 	if (!skb)
1507 		return -ENOMEM;
1508 
1509 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1510 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1511 						 sizeof(*cmd));
1512 	cmd->pdev_id = cpu_to_le32(pdev_id);
1513 	cmd->param_id = cpu_to_le32(param_id);
1514 	cmd->param_value = cpu_to_le32(param_value);
1515 
1516 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1517 		   "WMI pdev set param %d pdev id %d value %d\n",
1518 		   param_id, pdev_id, param_value);
1519 
1520 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1521 	if (ret) {
1522 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1523 		dev_kfree_skb(skb);
1524 	}
1525 
1526 	return ret;
1527 }
1528 
1529 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1530 {
1531 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1532 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1533 	struct sk_buff *skb;
1534 	int ret;
1535 
1536 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1537 	if (!skb)
1538 		return -ENOMEM;
1539 
1540 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1541 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1542 						 sizeof(*cmd));
1543 	cmd->vdev_id = cpu_to_le32(vdev_id);
1544 	cmd->sta_ps_mode = cpu_to_le32(enable);
1545 
1546 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1547 		   "WMI vdev set psmode %d vdev id %d\n",
1548 		   enable, vdev_id);
1549 
1550 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1551 	if (ret) {
1552 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1553 		dev_kfree_skb(skb);
1554 	}
1555 
1556 	return ret;
1557 }
1558 
1559 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1560 			    u32 pdev_id)
1561 {
1562 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1563 	struct wmi_pdev_suspend_cmd *cmd;
1564 	struct sk_buff *skb;
1565 	int ret;
1566 
1567 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1568 	if (!skb)
1569 		return -ENOMEM;
1570 
1571 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1572 
1573 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1574 						 sizeof(*cmd));
1575 
1576 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1577 	cmd->pdev_id = cpu_to_le32(pdev_id);
1578 
1579 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1580 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1581 
1582 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1583 	if (ret) {
1584 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1585 		dev_kfree_skb(skb);
1586 	}
1587 
1588 	return ret;
1589 }
1590 
1591 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1592 {
1593 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1594 	struct wmi_pdev_resume_cmd *cmd;
1595 	struct sk_buff *skb;
1596 	int ret;
1597 
1598 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1599 	if (!skb)
1600 		return -ENOMEM;
1601 
1602 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1603 
1604 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1605 						 sizeof(*cmd));
1606 	cmd->pdev_id = cpu_to_le32(pdev_id);
1607 
1608 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1609 		   "WMI pdev resume pdev id %d\n", pdev_id);
1610 
1611 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1612 	if (ret) {
1613 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1614 		dev_kfree_skb(skb);
1615 	}
1616 
1617 	return ret;
1618 }
1619 
1620 /* TODO FW Support for the cmd is not available yet.
1621  * Can be tested once the command and corresponding
1622  * event is implemented in FW
1623  */
1624 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1625 					  enum wmi_bss_chan_info_req_type type)
1626 {
1627 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1628 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1629 	struct sk_buff *skb;
1630 	int ret;
1631 
1632 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1633 	if (!skb)
1634 		return -ENOMEM;
1635 
1636 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1637 
1638 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1639 						 sizeof(*cmd));
1640 	cmd->req_type = cpu_to_le32(type);
1641 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1642 
1643 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1644 		   "WMI bss chan info req type %d\n", type);
1645 
1646 	ret = ath12k_wmi_cmd_send(wmi, skb,
1647 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1648 	if (ret) {
1649 		ath12k_warn(ar->ab,
1650 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1651 		dev_kfree_skb(skb);
1652 	}
1653 
1654 	return ret;
1655 }
1656 
1657 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1658 					struct ath12k_wmi_ap_ps_arg *arg)
1659 {
1660 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1661 	struct wmi_ap_ps_peer_cmd *cmd;
1662 	struct sk_buff *skb;
1663 	int ret;
1664 
1665 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1666 	if (!skb)
1667 		return -ENOMEM;
1668 
1669 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1670 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1671 						 sizeof(*cmd));
1672 
1673 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1674 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1675 	cmd->param = cpu_to_le32(arg->param);
1676 	cmd->value = cpu_to_le32(arg->value);
1677 
1678 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1679 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1680 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1681 
1682 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1683 	if (ret) {
1684 		ath12k_warn(ar->ab,
1685 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1686 		dev_kfree_skb(skb);
1687 	}
1688 
1689 	return ret;
1690 }
1691 
1692 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1693 				u32 param, u32 param_value)
1694 {
1695 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1696 	struct wmi_sta_powersave_param_cmd *cmd;
1697 	struct sk_buff *skb;
1698 	int ret;
1699 
1700 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1701 	if (!skb)
1702 		return -ENOMEM;
1703 
1704 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1705 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1706 						 sizeof(*cmd));
1707 
1708 	cmd->vdev_id = cpu_to_le32(vdev_id);
1709 	cmd->param = cpu_to_le32(param);
1710 	cmd->value = cpu_to_le32(param_value);
1711 
1712 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1713 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1714 		   vdev_id, param, param_value);
1715 
1716 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1717 	if (ret) {
1718 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1719 		dev_kfree_skb(skb);
1720 	}
1721 
1722 	return ret;
1723 }
1724 
1725 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1726 {
1727 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1728 	struct wmi_force_fw_hang_cmd *cmd;
1729 	struct sk_buff *skb;
1730 	int ret, len;
1731 
1732 	len = sizeof(*cmd);
1733 
1734 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1735 	if (!skb)
1736 		return -ENOMEM;
1737 
1738 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1739 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1740 						 len);
1741 
1742 	cmd->type = cpu_to_le32(type);
1743 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1744 
1745 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1746 
1747 	if (ret) {
1748 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1749 		dev_kfree_skb(skb);
1750 	}
1751 	return ret;
1752 }
1753 
1754 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1755 				  u32 param_id, u32 param_value)
1756 {
1757 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1758 	struct wmi_vdev_set_param_cmd *cmd;
1759 	struct sk_buff *skb;
1760 	int ret;
1761 
1762 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1763 	if (!skb)
1764 		return -ENOMEM;
1765 
1766 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1767 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1768 						 sizeof(*cmd));
1769 
1770 	cmd->vdev_id = cpu_to_le32(vdev_id);
1771 	cmd->param_id = cpu_to_le32(param_id);
1772 	cmd->param_value = cpu_to_le32(param_value);
1773 
1774 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1775 		   "WMI vdev id 0x%x set param %d value %d\n",
1776 		   vdev_id, param_id, param_value);
1777 
1778 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1779 	if (ret) {
1780 		ath12k_warn(ar->ab,
1781 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1782 		dev_kfree_skb(skb);
1783 	}
1784 
1785 	return ret;
1786 }
1787 
1788 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1789 {
1790 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1791 	struct wmi_get_pdev_temperature_cmd *cmd;
1792 	struct sk_buff *skb;
1793 	int ret;
1794 
1795 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1796 	if (!skb)
1797 		return -ENOMEM;
1798 
1799 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1800 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1801 						 sizeof(*cmd));
1802 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1803 
1804 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1805 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1806 
1807 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1808 	if (ret) {
1809 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1810 		dev_kfree_skb(skb);
1811 	}
1812 
1813 	return ret;
1814 }
1815 
1816 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1817 					    u32 vdev_id, u32 bcn_ctrl_op)
1818 {
1819 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1820 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1821 	struct sk_buff *skb;
1822 	int ret;
1823 
1824 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1825 	if (!skb)
1826 		return -ENOMEM;
1827 
1828 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1829 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1830 						 sizeof(*cmd));
1831 
1832 	cmd->vdev_id = cpu_to_le32(vdev_id);
1833 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1834 
1835 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1836 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1837 		   vdev_id, bcn_ctrl_op);
1838 
1839 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1840 	if (ret) {
1841 		ath12k_warn(ar->ab,
1842 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1843 		dev_kfree_skb(skb);
1844 	}
1845 
1846 	return ret;
1847 }
1848 
1849 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1850 			     const u8 *p2p_ie)
1851 {
1852 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1853 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1854 	size_t p2p_ie_len, aligned_len;
1855 	struct wmi_tlv *tlv;
1856 	struct sk_buff *skb;
1857 	void *ptr;
1858 	int ret, len;
1859 
1860 	p2p_ie_len = p2p_ie[1] + 2;
1861 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1862 
1863 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1864 
1865 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1866 	if (!skb)
1867 		return -ENOMEM;
1868 
1869 	ptr = skb->data;
1870 	cmd = ptr;
1871 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1872 						 sizeof(*cmd));
1873 	cmd->vdev_id = cpu_to_le32(vdev_id);
1874 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1875 
1876 	ptr += sizeof(*cmd);
1877 	tlv = ptr;
1878 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1879 					     aligned_len);
1880 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1881 
1882 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1883 	if (ret) {
1884 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1885 		dev_kfree_skb(skb);
1886 	}
1887 
1888 	return ret;
1889 }
1890 
1891 int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
1892 			struct ieee80211_mutable_offsets *offs,
1893 			struct sk_buff *bcn,
1894 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1895 {
1896 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1897 	struct wmi_bcn_tmpl_cmd *cmd;
1898 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1899 	struct wmi_tlv *tlv;
1900 	struct sk_buff *skb;
1901 	u32 ema_params = 0;
1902 	void *ptr;
1903 	int ret, len;
1904 	size_t aligned_len = roundup(bcn->len, 4);
1905 
1906 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1907 
1908 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1909 	if (!skb)
1910 		return -ENOMEM;
1911 
1912 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1913 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1914 						 sizeof(*cmd));
1915 	cmd->vdev_id = cpu_to_le32(vdev_id);
1916 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1917 	cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
1918 	cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
1919 	cmd->buf_len = cpu_to_le32(bcn->len);
1920 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
1921 	if (ema_args) {
1922 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
1923 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
1924 		if (ema_args->bcn_index == 0)
1925 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
1926 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
1927 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
1928 		cmd->ema_params = cpu_to_le32(ema_params);
1929 	}
1930 
1931 	ptr = skb->data + sizeof(*cmd);
1932 
1933 	bcn_prb_info = ptr;
1934 	len = sizeof(*bcn_prb_info);
1935 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
1936 							  len);
1937 	bcn_prb_info->caps = 0;
1938 	bcn_prb_info->erp = 0;
1939 
1940 	ptr += sizeof(*bcn_prb_info);
1941 
1942 	tlv = ptr;
1943 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
1944 	memcpy(tlv->value, bcn->data, bcn->len);
1945 
1946 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
1947 	if (ret) {
1948 		ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
1949 		dev_kfree_skb(skb);
1950 	}
1951 
1952 	return ret;
1953 }
1954 
1955 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
1956 				struct wmi_vdev_install_key_arg *arg)
1957 {
1958 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1959 	struct wmi_vdev_install_key_cmd *cmd;
1960 	struct wmi_tlv *tlv;
1961 	struct sk_buff *skb;
1962 	int ret, len, key_len_aligned;
1963 
1964 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
1965 	 * length is specified in cmd->key_len.
1966 	 */
1967 	key_len_aligned = roundup(arg->key_len, 4);
1968 
1969 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
1970 
1971 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1972 	if (!skb)
1973 		return -ENOMEM;
1974 
1975 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1976 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
1977 						 sizeof(*cmd));
1978 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1979 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1980 	cmd->key_idx = cpu_to_le32(arg->key_idx);
1981 	cmd->key_flags = cpu_to_le32(arg->key_flags);
1982 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
1983 	cmd->key_len = cpu_to_le32(arg->key_len);
1984 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
1985 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
1986 
1987 	if (arg->key_rsc_counter)
1988 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
1989 
1990 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
1991 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
1992 	memcpy(tlv->value, arg->key_data, arg->key_len);
1993 
1994 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1995 		   "WMI vdev install key idx %d cipher %d len %d\n",
1996 		   arg->key_idx, arg->key_cipher, arg->key_len);
1997 
1998 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1999 	if (ret) {
2000 		ath12k_warn(ar->ab,
2001 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2002 		dev_kfree_skb(skb);
2003 	}
2004 
2005 	return ret;
2006 }
2007 
2008 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2009 				       struct ath12k_wmi_peer_assoc_arg *arg,
2010 				       bool hw_crypto_disabled)
2011 {
2012 	cmd->peer_flags = 0;
2013 	cmd->peer_flags_ext = 0;
2014 
2015 	if (arg->is_wme_set) {
2016 		if (arg->qos_flag)
2017 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2018 		if (arg->apsd_flag)
2019 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2020 		if (arg->ht_flag)
2021 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2022 		if (arg->bw_40)
2023 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2024 		if (arg->bw_80)
2025 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2026 		if (arg->bw_160)
2027 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2028 		if (arg->bw_320)
2029 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2030 
2031 		/* Typically if STBC is enabled for VHT it should be enabled
2032 		 * for HT as well
2033 		 **/
2034 		if (arg->stbc_flag)
2035 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2036 
2037 		/* Typically if LDPC is enabled for VHT it should be enabled
2038 		 * for HT as well
2039 		 **/
2040 		if (arg->ldpc_flag)
2041 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2042 
2043 		if (arg->static_mimops_flag)
2044 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2045 		if (arg->dynamic_mimops_flag)
2046 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2047 		if (arg->spatial_mux_flag)
2048 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2049 		if (arg->vht_flag)
2050 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2051 		if (arg->he_flag)
2052 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2053 		if (arg->twt_requester)
2054 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2055 		if (arg->twt_responder)
2056 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2057 		if (arg->eht_flag)
2058 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2059 	}
2060 
2061 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2062 	 * (during re-association).
2063 	 * Authorization will be done for these modes on key installation.
2064 	 */
2065 	if (arg->auth_flag)
2066 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2067 	if (arg->need_ptk_4_way) {
2068 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2069 		if (!hw_crypto_disabled)
2070 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2071 	}
2072 	if (arg->need_gtk_2_way)
2073 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2074 	/* safe mode bypass the 4-way handshake */
2075 	if (arg->safe_mode_enabled)
2076 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2077 						 WMI_PEER_NEED_GTK_2_WAY));
2078 
2079 	if (arg->is_pmf_enabled)
2080 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2081 
2082 	/* Disable AMSDU for station transmit, if user configures it */
2083 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2084 	 * it
2085 	 * if (arg->amsdu_disable) Add after FW support
2086 	 **/
2087 
2088 	/* Target asserts if node is marked HT and all MCS is set to 0.
2089 	 * Mark the node as non-HT if all the mcs rates are disabled through
2090 	 * iwpriv
2091 	 **/
2092 	if (arg->peer_ht_rates.num_rates == 0)
2093 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2094 }
2095 
2096 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2097 				   struct ath12k_wmi_peer_assoc_arg *arg)
2098 {
2099 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2100 	struct wmi_peer_assoc_complete_cmd *cmd;
2101 	struct ath12k_wmi_vht_rate_set_params *mcs;
2102 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2103 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2104 	struct wmi_peer_assoc_mlo_params *ml_params;
2105 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2106 	struct sk_buff *skb;
2107 	struct wmi_tlv *tlv;
2108 	void *ptr;
2109 	u32 peer_legacy_rates_align;
2110 	u32 peer_ht_rates_align;
2111 	int i, ret, len;
2112 	__le32 v;
2113 
2114 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2115 					  sizeof(u32));
2116 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2117 				      sizeof(u32));
2118 
2119 	len = sizeof(*cmd) +
2120 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2121 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2122 	      sizeof(*mcs) + TLV_HDR_SIZE +
2123 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2124 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2125 
2126 	if (arg->ml.enabled)
2127 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2128 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2129 	else
2130 		len += (2 * TLV_HDR_SIZE);
2131 
2132 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2133 	if (!skb)
2134 		return -ENOMEM;
2135 
2136 	ptr = skb->data;
2137 
2138 	cmd = ptr;
2139 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2140 						 sizeof(*cmd));
2141 
2142 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2143 
2144 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2145 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2146 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2147 
2148 	ath12k_wmi_copy_peer_flags(cmd, arg,
2149 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2150 					    &ar->ab->dev_flags));
2151 
2152 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2153 
2154 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2155 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2156 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2157 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2158 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2159 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2160 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2161 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2162 
2163 	/* Update 11ax capabilities */
2164 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2165 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2166 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2167 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2168 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2169 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2170 		cmd->peer_he_cap_phy[i] =
2171 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2172 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2173 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2174 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2175 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2176 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2177 
2178 	/* Update 11be capabilities */
2179 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2180 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2181 		       0);
2182 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2183 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2184 		       0);
2185 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2186 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2187 
2188 	/* Update peer legacy rate information */
2189 	ptr += sizeof(*cmd);
2190 
2191 	tlv = ptr;
2192 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2193 
2194 	ptr += TLV_HDR_SIZE;
2195 
2196 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2197 	memcpy(ptr, arg->peer_legacy_rates.rates,
2198 	       arg->peer_legacy_rates.num_rates);
2199 
2200 	/* Update peer HT rate information */
2201 	ptr += peer_legacy_rates_align;
2202 
2203 	tlv = ptr;
2204 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2205 	ptr += TLV_HDR_SIZE;
2206 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2207 	memcpy(ptr, arg->peer_ht_rates.rates,
2208 	       arg->peer_ht_rates.num_rates);
2209 
2210 	/* VHT Rates */
2211 	ptr += peer_ht_rates_align;
2212 
2213 	mcs = ptr;
2214 
2215 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2216 						 sizeof(*mcs));
2217 
2218 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2219 
2220 	/* Update bandwidth-NSS mapping */
2221 	cmd->peer_bw_rxnss_override = 0;
2222 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2223 
2224 	if (arg->vht_capable) {
2225 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2226 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2227 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2228 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2229 	}
2230 
2231 	/* HE Rates */
2232 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2233 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2234 
2235 	ptr += sizeof(*mcs);
2236 
2237 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2238 
2239 	tlv = ptr;
2240 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2241 	ptr += TLV_HDR_SIZE;
2242 
2243 	/* Loop through the HE rate set */
2244 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2245 		he_mcs = ptr;
2246 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2247 							    sizeof(*he_mcs));
2248 
2249 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2250 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2251 		ptr += sizeof(*he_mcs);
2252 	}
2253 
2254 	tlv = ptr;
2255 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2256 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2257 	ptr += TLV_HDR_SIZE;
2258 	if (!len)
2259 		goto skip_ml_params;
2260 
2261 	ml_params = ptr;
2262 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2263 						       len);
2264 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2265 
2266 	if (arg->ml.assoc_link)
2267 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2268 
2269 	if (arg->ml.primary_umac)
2270 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2271 
2272 	if (arg->ml.logical_link_idx_valid)
2273 		ml_params->flags |=
2274 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2275 
2276 	if (arg->ml.peer_id_valid)
2277 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2278 
2279 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2280 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2281 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2282 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2283 	ptr += sizeof(*ml_params);
2284 
2285 skip_ml_params:
2286 	/* Loop through the EHT rate set */
2287 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2288 	tlv = ptr;
2289 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2290 	ptr += TLV_HDR_SIZE;
2291 
2292 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2293 		eht_mcs = ptr;
2294 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2295 							     sizeof(*eht_mcs));
2296 
2297 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2298 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2299 		ptr += sizeof(*eht_mcs);
2300 	}
2301 
2302 	tlv = ptr;
2303 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2304 	/* fill ML Partner links */
2305 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2306 	ptr += TLV_HDR_SIZE;
2307 
2308 	if (len == 0)
2309 		goto send;
2310 
2311 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2312 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2313 
2314 		partner_info = ptr;
2315 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2316 								  sizeof(*partner_info));
2317 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2318 		partner_info->hw_link_id =
2319 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2320 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2321 
2322 		if (arg->ml.partner_info[i].assoc_link)
2323 			partner_info->flags |=
2324 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2325 
2326 		if (arg->ml.partner_info[i].primary_umac)
2327 			partner_info->flags |=
2328 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2329 
2330 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2331 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2332 			partner_info->flags |= v;
2333 		}
2334 
2335 		partner_info->logical_link_idx =
2336 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2337 		ptr += sizeof(*partner_info);
2338 	}
2339 
2340 send:
2341 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2342 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
2343 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2344 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2345 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2346 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2347 		   cmd->peer_mpdu_density,
2348 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2349 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2350 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2351 		   cmd->peer_he_cap_phy[2],
2352 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2353 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2354 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2355 		   cmd->peer_eht_cap_phy[2]);
2356 
2357 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2358 	if (ret) {
2359 		ath12k_warn(ar->ab,
2360 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2361 		dev_kfree_skb(skb);
2362 	}
2363 
2364 	return ret;
2365 }
2366 
2367 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2368 				struct ath12k_wmi_scan_req_arg *arg)
2369 {
2370 	/* setup commonly used values */
2371 	arg->scan_req_id = 1;
2372 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2373 	arg->dwell_time_active = 50;
2374 	arg->dwell_time_active_2g = 0;
2375 	arg->dwell_time_passive = 150;
2376 	arg->dwell_time_active_6g = 40;
2377 	arg->dwell_time_passive_6g = 30;
2378 	arg->min_rest_time = 50;
2379 	arg->max_rest_time = 500;
2380 	arg->repeat_probe_time = 0;
2381 	arg->probe_spacing_time = 0;
2382 	arg->idle_time = 0;
2383 	arg->max_scan_time = 20000;
2384 	arg->probe_delay = 5;
2385 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2386 				  WMI_SCAN_EVENT_COMPLETED |
2387 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2388 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2389 				  WMI_SCAN_EVENT_DEQUEUED;
2390 	arg->scan_f_chan_stat_evnt = 1;
2391 	arg->num_bssid = 1;
2392 
2393 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2394 	 * ZEROs in probe request
2395 	 */
2396 	eth_broadcast_addr(arg->bssid_list[0].addr);
2397 }
2398 
2399 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2400 						   struct ath12k_wmi_scan_req_arg *arg)
2401 {
2402 	/* Scan events subscription */
2403 	if (arg->scan_ev_started)
2404 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2405 	if (arg->scan_ev_completed)
2406 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2407 	if (arg->scan_ev_bss_chan)
2408 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2409 	if (arg->scan_ev_foreign_chan)
2410 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2411 	if (arg->scan_ev_dequeued)
2412 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2413 	if (arg->scan_ev_preempted)
2414 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2415 	if (arg->scan_ev_start_failed)
2416 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2417 	if (arg->scan_ev_restarted)
2418 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2419 	if (arg->scan_ev_foreign_chn_exit)
2420 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2421 	if (arg->scan_ev_suspended)
2422 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2423 	if (arg->scan_ev_resumed)
2424 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2425 
2426 	/** Set scan control flags */
2427 	cmd->scan_ctrl_flags = 0;
2428 	if (arg->scan_f_passive)
2429 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2430 	if (arg->scan_f_strict_passive_pch)
2431 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2432 	if (arg->scan_f_promisc_mode)
2433 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2434 	if (arg->scan_f_capture_phy_err)
2435 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2436 	if (arg->scan_f_half_rate)
2437 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2438 	if (arg->scan_f_quarter_rate)
2439 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2440 	if (arg->scan_f_cck_rates)
2441 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2442 	if (arg->scan_f_ofdm_rates)
2443 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2444 	if (arg->scan_f_chan_stat_evnt)
2445 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2446 	if (arg->scan_f_filter_prb_req)
2447 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2448 	if (arg->scan_f_bcast_probe)
2449 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2450 	if (arg->scan_f_offchan_mgmt_tx)
2451 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2452 	if (arg->scan_f_offchan_data_tx)
2453 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2454 	if (arg->scan_f_force_active_dfs_chn)
2455 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2456 	if (arg->scan_f_add_tpc_ie_in_probe)
2457 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2458 	if (arg->scan_f_add_ds_ie_in_probe)
2459 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2460 	if (arg->scan_f_add_spoofed_mac_in_probe)
2461 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2462 	if (arg->scan_f_add_rand_seq_in_probe)
2463 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2464 	if (arg->scan_f_en_ie_whitelist_in_probe)
2465 		cmd->scan_ctrl_flags |=
2466 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2467 
2468 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2469 						 WMI_SCAN_DWELL_MODE_MASK);
2470 }
2471 
2472 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2473 				   struct ath12k_wmi_scan_req_arg *arg)
2474 {
2475 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2476 	struct wmi_start_scan_cmd *cmd;
2477 	struct ath12k_wmi_ssid_params *ssid = NULL;
2478 	struct ath12k_wmi_mac_addr_params *bssid;
2479 	struct sk_buff *skb;
2480 	struct wmi_tlv *tlv;
2481 	void *ptr;
2482 	int i, ret, len;
2483 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2484 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2485 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2486 
2487 	len = sizeof(*cmd);
2488 
2489 	len += TLV_HDR_SIZE;
2490 	if (arg->num_chan)
2491 		len += arg->num_chan * sizeof(u32);
2492 
2493 	len += TLV_HDR_SIZE;
2494 	if (arg->num_ssids)
2495 		len += arg->num_ssids * sizeof(*ssid);
2496 
2497 	len += TLV_HDR_SIZE;
2498 	if (arg->num_bssid)
2499 		len += sizeof(*bssid) * arg->num_bssid;
2500 
2501 	if (arg->num_hint_bssid)
2502 		len += TLV_HDR_SIZE +
2503 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2504 
2505 	if (arg->num_hint_s_ssid)
2506 		len += TLV_HDR_SIZE +
2507 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2508 
2509 	len += TLV_HDR_SIZE;
2510 	if (arg->extraie.len)
2511 		extraie_len_with_pad =
2512 			roundup(arg->extraie.len, sizeof(u32));
2513 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2514 		len += extraie_len_with_pad;
2515 	} else {
2516 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2517 			    arg->extraie.len);
2518 		extraie_len_with_pad = 0;
2519 	}
2520 
2521 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2522 	if (!skb)
2523 		return -ENOMEM;
2524 
2525 	ptr = skb->data;
2526 
2527 	cmd = ptr;
2528 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2529 						 sizeof(*cmd));
2530 
2531 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2532 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2533 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2534 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
2535 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2536 
2537 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2538 
2539 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2540 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2541 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2542 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2543 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2544 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2545 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2546 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2547 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2548 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2549 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2550 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2551 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2552 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2553 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2554 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2555 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2556 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2557 
2558 	ptr += sizeof(*cmd);
2559 
2560 	len = arg->num_chan * sizeof(u32);
2561 
2562 	tlv = ptr;
2563 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2564 	ptr += TLV_HDR_SIZE;
2565 	tmp_ptr = (u32 *)ptr;
2566 
2567 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2568 
2569 	ptr += len;
2570 
2571 	len = arg->num_ssids * sizeof(*ssid);
2572 	tlv = ptr;
2573 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2574 
2575 	ptr += TLV_HDR_SIZE;
2576 
2577 	if (arg->num_ssids) {
2578 		ssid = ptr;
2579 		for (i = 0; i < arg->num_ssids; ++i) {
2580 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2581 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2582 			       arg->ssid[i].ssid_len);
2583 			ssid++;
2584 		}
2585 	}
2586 
2587 	ptr += (arg->num_ssids * sizeof(*ssid));
2588 	len = arg->num_bssid * sizeof(*bssid);
2589 	tlv = ptr;
2590 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2591 
2592 	ptr += TLV_HDR_SIZE;
2593 	bssid = ptr;
2594 
2595 	if (arg->num_bssid) {
2596 		for (i = 0; i < arg->num_bssid; ++i) {
2597 			ether_addr_copy(bssid->addr,
2598 					arg->bssid_list[i].addr);
2599 			bssid++;
2600 		}
2601 	}
2602 
2603 	ptr += arg->num_bssid * sizeof(*bssid);
2604 
2605 	len = extraie_len_with_pad;
2606 	tlv = ptr;
2607 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2608 	ptr += TLV_HDR_SIZE;
2609 
2610 	if (extraie_len_with_pad)
2611 		memcpy(ptr, arg->extraie.ptr,
2612 		       arg->extraie.len);
2613 
2614 	ptr += extraie_len_with_pad;
2615 
2616 	if (arg->num_hint_s_ssid) {
2617 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2618 		tlv = ptr;
2619 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2620 		ptr += TLV_HDR_SIZE;
2621 		s_ssid = ptr;
2622 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2623 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2624 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2625 			s_ssid++;
2626 		}
2627 		ptr += len;
2628 	}
2629 
2630 	if (arg->num_hint_bssid) {
2631 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2632 		tlv = ptr;
2633 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2634 		ptr += TLV_HDR_SIZE;
2635 		hint_bssid = ptr;
2636 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2637 			hint_bssid->freq_flags =
2638 				arg->hint_bssid[i].freq_flags;
2639 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2640 					&hint_bssid->bssid.addr[0]);
2641 			hint_bssid++;
2642 		}
2643 	}
2644 
2645 	ret = ath12k_wmi_cmd_send(wmi, skb,
2646 				  WMI_START_SCAN_CMDID);
2647 	if (ret) {
2648 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2649 		dev_kfree_skb(skb);
2650 	}
2651 
2652 	return ret;
2653 }
2654 
2655 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2656 				  struct ath12k_wmi_scan_cancel_arg *arg)
2657 {
2658 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2659 	struct wmi_stop_scan_cmd *cmd;
2660 	struct sk_buff *skb;
2661 	int ret;
2662 
2663 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2664 	if (!skb)
2665 		return -ENOMEM;
2666 
2667 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2668 
2669 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2670 						 sizeof(*cmd));
2671 
2672 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2673 	cmd->requestor = cpu_to_le32(arg->requester);
2674 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2675 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2676 	/* stop the scan with the corresponding scan_id */
2677 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2678 		/* Cancelling all scans */
2679 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2680 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2681 		/* Cancelling VAP scans */
2682 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2683 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2684 		/* Cancelling specific scan */
2685 		cmd->req_type = WMI_SCAN_STOP_ONE;
2686 	} else {
2687 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2688 			    arg->req_type);
2689 		dev_kfree_skb(skb);
2690 		return -EINVAL;
2691 	}
2692 
2693 	ret = ath12k_wmi_cmd_send(wmi, skb,
2694 				  WMI_STOP_SCAN_CMDID);
2695 	if (ret) {
2696 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2697 		dev_kfree_skb(skb);
2698 	}
2699 
2700 	return ret;
2701 }
2702 
2703 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2704 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2705 {
2706 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2707 	struct wmi_scan_chan_list_cmd *cmd;
2708 	struct sk_buff *skb;
2709 	struct ath12k_wmi_channel_params *chan_info;
2710 	struct ath12k_wmi_channel_arg *channel_arg;
2711 	struct wmi_tlv *tlv;
2712 	void *ptr;
2713 	int i, ret, len;
2714 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2715 	__le32 *reg1, *reg2;
2716 
2717 	channel_arg = &arg->channel[0];
2718 	while (arg->nallchans) {
2719 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2720 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2721 			sizeof(*chan_info);
2722 
2723 		num_send_chans = min(arg->nallchans, max_chan_limit);
2724 
2725 		arg->nallchans -= num_send_chans;
2726 		len += sizeof(*chan_info) * num_send_chans;
2727 
2728 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2729 		if (!skb)
2730 			return -ENOMEM;
2731 
2732 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2733 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2734 							 sizeof(*cmd));
2735 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2736 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2737 		if (num_sends)
2738 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2739 
2740 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2741 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2742 			   num_send_chans, len, cmd->pdev_id, num_sends);
2743 
2744 		ptr = skb->data + sizeof(*cmd);
2745 
2746 		len = sizeof(*chan_info) * num_send_chans;
2747 		tlv = ptr;
2748 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2749 						     len);
2750 		ptr += TLV_HDR_SIZE;
2751 
2752 		for (i = 0; i < num_send_chans; ++i) {
2753 			chan_info = ptr;
2754 			memset(chan_info, 0, sizeof(*chan_info));
2755 			len = sizeof(*chan_info);
2756 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2757 								       len);
2758 
2759 			reg1 = &chan_info->reg_info_1;
2760 			reg2 = &chan_info->reg_info_2;
2761 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2762 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2763 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2764 
2765 			if (channel_arg->is_chan_passive)
2766 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2767 			if (channel_arg->allow_he)
2768 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2769 			else if (channel_arg->allow_vht)
2770 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2771 			else if (channel_arg->allow_ht)
2772 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2773 			if (channel_arg->half_rate)
2774 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2775 			if (channel_arg->quarter_rate)
2776 				chan_info->info |=
2777 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2778 
2779 			if (channel_arg->psc_channel)
2780 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2781 
2782 			if (channel_arg->dfs_set)
2783 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2784 
2785 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2786 							    WMI_CHAN_INFO_MODE);
2787 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2788 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2789 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2790 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2791 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2792 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2793 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2794 						  WMI_CHAN_REG_INFO1_REG_CLS);
2795 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2796 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2797 
2798 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2799 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2800 				   i, chan_info->mhz, chan_info->info);
2801 
2802 			ptr += sizeof(*chan_info);
2803 
2804 			channel_arg++;
2805 		}
2806 
2807 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2808 		if (ret) {
2809 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2810 			dev_kfree_skb(skb);
2811 			return ret;
2812 		}
2813 
2814 		num_sends++;
2815 	}
2816 
2817 	return 0;
2818 }
2819 
2820 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2821 				   struct wmi_wmm_params_all_arg *param)
2822 {
2823 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2824 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2825 	struct wmi_wmm_params *wmm_param;
2826 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2827 	struct sk_buff *skb;
2828 	int ret, ac;
2829 
2830 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2831 	if (!skb)
2832 		return -ENOMEM;
2833 
2834 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2835 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2836 						 sizeof(*cmd));
2837 
2838 	cmd->vdev_id = cpu_to_le32(vdev_id);
2839 	cmd->wmm_param_type = 0;
2840 
2841 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2842 		switch (ac) {
2843 		case WME_AC_BE:
2844 			wmi_wmm_arg = &param->ac_be;
2845 			break;
2846 		case WME_AC_BK:
2847 			wmi_wmm_arg = &param->ac_bk;
2848 			break;
2849 		case WME_AC_VI:
2850 			wmi_wmm_arg = &param->ac_vi;
2851 			break;
2852 		case WME_AC_VO:
2853 			wmi_wmm_arg = &param->ac_vo;
2854 			break;
2855 		}
2856 
2857 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2858 		wmm_param->tlv_header =
2859 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2860 					       sizeof(*wmm_param));
2861 
2862 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2863 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2864 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2865 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2866 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2867 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2868 
2869 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2870 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2871 			   ac, wmm_param->aifs, wmm_param->cwmin,
2872 			   wmm_param->cwmax, wmm_param->txoplimit,
2873 			   wmm_param->acm, wmm_param->no_ack);
2874 	}
2875 	ret = ath12k_wmi_cmd_send(wmi, skb,
2876 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2877 	if (ret) {
2878 		ath12k_warn(ar->ab,
2879 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2880 		dev_kfree_skb(skb);
2881 	}
2882 
2883 	return ret;
2884 }
2885 
2886 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2887 						  u32 pdev_id)
2888 {
2889 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2890 	struct wmi_dfs_phyerr_offload_cmd *cmd;
2891 	struct sk_buff *skb;
2892 	int ret;
2893 
2894 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2895 	if (!skb)
2896 		return -ENOMEM;
2897 
2898 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2899 	cmd->tlv_header =
2900 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2901 				       sizeof(*cmd));
2902 
2903 	cmd->pdev_id = cpu_to_le32(pdev_id);
2904 
2905 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2906 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2907 
2908 	ret = ath12k_wmi_cmd_send(wmi, skb,
2909 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2910 	if (ret) {
2911 		ath12k_warn(ar->ab,
2912 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2913 		dev_kfree_skb(skb);
2914 	}
2915 
2916 	return ret;
2917 }
2918 
2919 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
2920 			    const u8 *buf, size_t buf_len)
2921 {
2922 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2923 	struct wmi_pdev_set_bios_interface_cmd *cmd;
2924 	struct wmi_tlv *tlv;
2925 	struct sk_buff *skb;
2926 	u8 *ptr;
2927 	u32 len, len_aligned;
2928 	int ret;
2929 
2930 	len_aligned = roundup(buf_len, sizeof(u32));
2931 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
2932 
2933 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2934 	if (!skb)
2935 		return -ENOMEM;
2936 
2937 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
2938 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
2939 						 sizeof(*cmd));
2940 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2941 	cmd->param_type_id = cpu_to_le32(param_id);
2942 	cmd->length = cpu_to_le32(buf_len);
2943 
2944 	ptr = skb->data + sizeof(*cmd);
2945 	tlv = (struct wmi_tlv *)ptr;
2946 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
2947 	ptr += TLV_HDR_SIZE;
2948 	memcpy(ptr, buf, buf_len);
2949 
2950 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
2951 				  skb,
2952 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
2953 	if (ret) {
2954 		ath12k_warn(ab,
2955 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
2956 			    param_id, ret);
2957 		dev_kfree_skb(skb);
2958 	}
2959 
2960 	return 0;
2961 }
2962 
2963 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
2964 {
2965 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2966 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
2967 	struct wmi_tlv *tlv;
2968 	struct sk_buff *skb;
2969 	int ret;
2970 	u8 *buf_ptr;
2971 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
2972 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
2973 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
2974 
2975 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
2976 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
2977 					      sizeof(u32));
2978 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
2979 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
2980 
2981 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2982 	if (!skb)
2983 		return -ENOMEM;
2984 
2985 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
2986 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
2987 						 sizeof(*cmd));
2988 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2989 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
2990 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
2991 
2992 	buf_ptr = skb->data + sizeof(*cmd);
2993 	tlv = (struct wmi_tlv *)buf_ptr;
2994 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
2995 					 sar_table_len_aligned);
2996 	buf_ptr += TLV_HDR_SIZE;
2997 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
2998 
2999 	buf_ptr += sar_table_len_aligned;
3000 	tlv = (struct wmi_tlv *)buf_ptr;
3001 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3002 					 sar_dbs_backoff_len_aligned);
3003 	buf_ptr += TLV_HDR_SIZE;
3004 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3005 
3006 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3007 				  skb,
3008 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3009 	if (ret) {
3010 		ath12k_warn(ab,
3011 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3012 			    ret);
3013 		dev_kfree_skb(skb);
3014 	}
3015 
3016 	return ret;
3017 }
3018 
3019 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3020 {
3021 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3022 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3023 	struct wmi_tlv *tlv;
3024 	struct sk_buff *skb;
3025 	int ret;
3026 	u8 *buf_ptr;
3027 	u32 len, sar_geo_len_aligned;
3028 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3029 
3030 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3031 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3032 
3033 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3034 	if (!skb)
3035 		return -ENOMEM;
3036 
3037 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3038 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3039 						 sizeof(*cmd));
3040 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3041 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3042 
3043 	buf_ptr = skb->data + sizeof(*cmd);
3044 	tlv = (struct wmi_tlv *)buf_ptr;
3045 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3046 	buf_ptr += TLV_HDR_SIZE;
3047 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3048 
3049 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3050 				  skb,
3051 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3052 	if (ret) {
3053 		ath12k_warn(ab,
3054 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3055 			    ret);
3056 		dev_kfree_skb(skb);
3057 	}
3058 
3059 	return ret;
3060 }
3061 
3062 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3063 			  u32 tid, u32 initiator, u32 reason)
3064 {
3065 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3066 	struct wmi_delba_send_cmd *cmd;
3067 	struct sk_buff *skb;
3068 	int ret;
3069 
3070 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3071 	if (!skb)
3072 		return -ENOMEM;
3073 
3074 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3075 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3076 						 sizeof(*cmd));
3077 	cmd->vdev_id = cpu_to_le32(vdev_id);
3078 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3079 	cmd->tid = cpu_to_le32(tid);
3080 	cmd->initiator = cpu_to_le32(initiator);
3081 	cmd->reasoncode = cpu_to_le32(reason);
3082 
3083 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3084 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3085 		   vdev_id, mac, tid, initiator, reason);
3086 
3087 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3088 
3089 	if (ret) {
3090 		ath12k_warn(ar->ab,
3091 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3092 		dev_kfree_skb(skb);
3093 	}
3094 
3095 	return ret;
3096 }
3097 
3098 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3099 			      u32 tid, u32 status)
3100 {
3101 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3102 	struct wmi_addba_setresponse_cmd *cmd;
3103 	struct sk_buff *skb;
3104 	int ret;
3105 
3106 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3107 	if (!skb)
3108 		return -ENOMEM;
3109 
3110 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3111 	cmd->tlv_header =
3112 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3113 				       sizeof(*cmd));
3114 	cmd->vdev_id = cpu_to_le32(vdev_id);
3115 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3116 	cmd->tid = cpu_to_le32(tid);
3117 	cmd->statuscode = cpu_to_le32(status);
3118 
3119 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3120 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3121 		   vdev_id, mac, tid, status);
3122 
3123 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3124 
3125 	if (ret) {
3126 		ath12k_warn(ar->ab,
3127 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3128 		dev_kfree_skb(skb);
3129 	}
3130 
3131 	return ret;
3132 }
3133 
3134 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3135 			  u32 tid, u32 buf_size)
3136 {
3137 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3138 	struct wmi_addba_send_cmd *cmd;
3139 	struct sk_buff *skb;
3140 	int ret;
3141 
3142 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3143 	if (!skb)
3144 		return -ENOMEM;
3145 
3146 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3147 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3148 						 sizeof(*cmd));
3149 	cmd->vdev_id = cpu_to_le32(vdev_id);
3150 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3151 	cmd->tid = cpu_to_le32(tid);
3152 	cmd->buffersize = cpu_to_le32(buf_size);
3153 
3154 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3155 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3156 		   vdev_id, mac, tid, buf_size);
3157 
3158 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3159 
3160 	if (ret) {
3161 		ath12k_warn(ar->ab,
3162 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3163 		dev_kfree_skb(skb);
3164 	}
3165 
3166 	return ret;
3167 }
3168 
3169 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3170 {
3171 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3172 	struct wmi_addba_clear_resp_cmd *cmd;
3173 	struct sk_buff *skb;
3174 	int ret;
3175 
3176 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3177 	if (!skb)
3178 		return -ENOMEM;
3179 
3180 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3181 	cmd->tlv_header =
3182 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3183 				       sizeof(*cmd));
3184 	cmd->vdev_id = cpu_to_le32(vdev_id);
3185 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3186 
3187 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3188 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3189 		   vdev_id, mac);
3190 
3191 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3192 
3193 	if (ret) {
3194 		ath12k_warn(ar->ab,
3195 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3196 		dev_kfree_skb(skb);
3197 	}
3198 
3199 	return ret;
3200 }
3201 
3202 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3203 				     struct ath12k_wmi_init_country_arg *arg)
3204 {
3205 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3206 	struct wmi_init_country_cmd *cmd;
3207 	struct sk_buff *skb;
3208 	int ret;
3209 
3210 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3211 	if (!skb)
3212 		return -ENOMEM;
3213 
3214 	cmd = (struct wmi_init_country_cmd *)skb->data;
3215 	cmd->tlv_header =
3216 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3217 				       sizeof(*cmd));
3218 
3219 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3220 
3221 	switch (arg->flags) {
3222 	case ALPHA_IS_SET:
3223 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3224 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3225 		break;
3226 	case CC_IS_SET:
3227 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3228 		cmd->cc_info.country_code =
3229 			cpu_to_le32(arg->cc_info.country_code);
3230 		break;
3231 	case REGDMN_IS_SET:
3232 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3233 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3234 		break;
3235 	default:
3236 		ret = -EINVAL;
3237 		goto out;
3238 	}
3239 
3240 	ret = ath12k_wmi_cmd_send(wmi, skb,
3241 				  WMI_SET_INIT_COUNTRY_CMDID);
3242 
3243 out:
3244 	if (ret) {
3245 		ath12k_warn(ar->ab,
3246 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3247 			    ret);
3248 		dev_kfree_skb(skb);
3249 	}
3250 
3251 	return ret;
3252 }
3253 
3254 int
3255 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3256 {
3257 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3258 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3259 	struct wmi_twt_enable_params_cmd *cmd;
3260 	struct sk_buff *skb;
3261 	int ret, len;
3262 
3263 	len = sizeof(*cmd);
3264 
3265 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3266 	if (!skb)
3267 		return -ENOMEM;
3268 
3269 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3270 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3271 						 len);
3272 	cmd->pdev_id = cpu_to_le32(pdev_id);
3273 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3274 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3275 	cmd->congestion_thresh_setup =
3276 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3277 	cmd->congestion_thresh_teardown =
3278 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3279 	cmd->congestion_thresh_critical =
3280 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3281 	cmd->interference_thresh_teardown =
3282 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3283 	cmd->interference_thresh_setup =
3284 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3285 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3286 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3287 	cmd->no_of_bcast_mcast_slots =
3288 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3289 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3290 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3291 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3292 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3293 	cmd->remove_sta_slot_interval =
3294 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3295 	/* TODO add MBSSID support */
3296 	cmd->mbss_support = 0;
3297 
3298 	ret = ath12k_wmi_cmd_send(wmi, skb,
3299 				  WMI_TWT_ENABLE_CMDID);
3300 	if (ret) {
3301 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3302 		dev_kfree_skb(skb);
3303 	}
3304 	return ret;
3305 }
3306 
3307 int
3308 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3309 {
3310 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3311 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3312 	struct wmi_twt_disable_params_cmd *cmd;
3313 	struct sk_buff *skb;
3314 	int ret, len;
3315 
3316 	len = sizeof(*cmd);
3317 
3318 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3319 	if (!skb)
3320 		return -ENOMEM;
3321 
3322 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3323 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3324 						 len);
3325 	cmd->pdev_id = cpu_to_le32(pdev_id);
3326 
3327 	ret = ath12k_wmi_cmd_send(wmi, skb,
3328 				  WMI_TWT_DISABLE_CMDID);
3329 	if (ret) {
3330 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3331 		dev_kfree_skb(skb);
3332 	}
3333 	return ret;
3334 }
3335 
3336 int
3337 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3338 			     struct ieee80211_he_obss_pd *he_obss_pd)
3339 {
3340 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3341 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3342 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3343 	struct sk_buff *skb;
3344 	int ret, len;
3345 
3346 	len = sizeof(*cmd);
3347 
3348 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3349 	if (!skb)
3350 		return -ENOMEM;
3351 
3352 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3353 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3354 						 len);
3355 	cmd->vdev_id = cpu_to_le32(vdev_id);
3356 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3357 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3358 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3359 
3360 	ret = ath12k_wmi_cmd_send(wmi, skb,
3361 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3362 	if (ret) {
3363 		ath12k_warn(ab,
3364 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3365 		dev_kfree_skb(skb);
3366 	}
3367 	return ret;
3368 }
3369 
3370 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3371 				  u8 bss_color, u32 period,
3372 				  bool enable)
3373 {
3374 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3375 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3376 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3377 	struct sk_buff *skb;
3378 	int ret, len;
3379 
3380 	len = sizeof(*cmd);
3381 
3382 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3383 	if (!skb)
3384 		return -ENOMEM;
3385 
3386 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3387 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3388 						 len);
3389 	cmd->vdev_id = cpu_to_le32(vdev_id);
3390 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3391 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3392 	cmd->current_bss_color = cpu_to_le32(bss_color);
3393 	cmd->detection_period_ms = cpu_to_le32(period);
3394 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3395 	cmd->free_slot_expiry_time_ms = 0;
3396 	cmd->flags = 0;
3397 
3398 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3399 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3400 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3401 		   cmd->detection_period_ms, cmd->scan_period_ms);
3402 
3403 	ret = ath12k_wmi_cmd_send(wmi, skb,
3404 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3405 	if (ret) {
3406 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3407 		dev_kfree_skb(skb);
3408 	}
3409 	return ret;
3410 }
3411 
3412 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3413 						bool enable)
3414 {
3415 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3416 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3417 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3418 	struct sk_buff *skb;
3419 	int ret, len;
3420 
3421 	len = sizeof(*cmd);
3422 
3423 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3424 	if (!skb)
3425 		return -ENOMEM;
3426 
3427 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3428 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3429 						 len);
3430 	cmd->vdev_id = cpu_to_le32(vdev_id);
3431 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3432 
3433 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3434 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3435 		   cmd->vdev_id, cmd->enable);
3436 
3437 	ret = ath12k_wmi_cmd_send(wmi, skb,
3438 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3439 	if (ret) {
3440 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3441 		dev_kfree_skb(skb);
3442 	}
3443 	return ret;
3444 }
3445 
3446 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3447 				   struct sk_buff *tmpl)
3448 {
3449 	struct wmi_tlv *tlv;
3450 	struct sk_buff *skb;
3451 	void *ptr;
3452 	int ret, len;
3453 	size_t aligned_len;
3454 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3455 
3456 	aligned_len = roundup(tmpl->len, 4);
3457 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3458 
3459 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3460 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3461 
3462 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3463 	if (!skb)
3464 		return -ENOMEM;
3465 
3466 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3467 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3468 						 sizeof(*cmd));
3469 	cmd->vdev_id = cpu_to_le32(vdev_id);
3470 	cmd->buf_len = cpu_to_le32(tmpl->len);
3471 	ptr = skb->data + sizeof(*cmd);
3472 
3473 	tlv = ptr;
3474 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3475 	memcpy(tlv->value, tmpl->data, tmpl->len);
3476 
3477 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3478 	if (ret) {
3479 		ath12k_warn(ar->ab,
3480 			    "WMI vdev %i failed to send FILS discovery template command\n",
3481 			    vdev_id);
3482 		dev_kfree_skb(skb);
3483 	}
3484 	return ret;
3485 }
3486 
3487 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3488 			       struct sk_buff *tmpl)
3489 {
3490 	struct wmi_probe_tmpl_cmd *cmd;
3491 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3492 	struct wmi_tlv *tlv;
3493 	struct sk_buff *skb;
3494 	void *ptr;
3495 	int ret, len;
3496 	size_t aligned_len = roundup(tmpl->len, 4);
3497 
3498 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3499 		   "WMI vdev %i set probe response template\n", vdev_id);
3500 
3501 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3502 
3503 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3504 	if (!skb)
3505 		return -ENOMEM;
3506 
3507 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3508 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3509 						 sizeof(*cmd));
3510 	cmd->vdev_id = cpu_to_le32(vdev_id);
3511 	cmd->buf_len = cpu_to_le32(tmpl->len);
3512 
3513 	ptr = skb->data + sizeof(*cmd);
3514 
3515 	probe_info = ptr;
3516 	len = sizeof(*probe_info);
3517 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3518 							len);
3519 	probe_info->caps = 0;
3520 	probe_info->erp = 0;
3521 
3522 	ptr += sizeof(*probe_info);
3523 
3524 	tlv = ptr;
3525 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3526 	memcpy(tlv->value, tmpl->data, tmpl->len);
3527 
3528 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3529 	if (ret) {
3530 		ath12k_warn(ar->ab,
3531 			    "WMI vdev %i failed to send probe response template command\n",
3532 			    vdev_id);
3533 		dev_kfree_skb(skb);
3534 	}
3535 	return ret;
3536 }
3537 
3538 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3539 			      bool unsol_bcast_probe_resp_enabled)
3540 {
3541 	struct sk_buff *skb;
3542 	int ret, len;
3543 	struct wmi_fils_discovery_cmd *cmd;
3544 
3545 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3546 		   "WMI vdev %i set %s interval to %u TU\n",
3547 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3548 		   "unsolicited broadcast probe response" : "FILS discovery",
3549 		   interval);
3550 
3551 	len = sizeof(*cmd);
3552 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3553 	if (!skb)
3554 		return -ENOMEM;
3555 
3556 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3557 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3558 						 len);
3559 	cmd->vdev_id = cpu_to_le32(vdev_id);
3560 	cmd->interval = cpu_to_le32(interval);
3561 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3562 
3563 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3564 	if (ret) {
3565 		ath12k_warn(ar->ab,
3566 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3567 			    vdev_id);
3568 		dev_kfree_skb(skb);
3569 	}
3570 	return ret;
3571 }
3572 
3573 static void
3574 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3575 			      struct ath12k_wmi_pdev_band_arg *arg)
3576 {
3577 	u8 i;
3578 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3579 	struct ath12k_pdev *pdev;
3580 
3581 	for (i = 0; i < soc->num_radios; i++) {
3582 		pdev = &soc->pdevs[i];
3583 		hal_reg_cap = &soc->hal_reg_cap[i];
3584 		arg[i].pdev_id = pdev->pdev_id;
3585 
3586 		switch (pdev->cap.supported_bands) {
3587 		case WMI_HOST_WLAN_2G_5G_CAP:
3588 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3589 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3590 			break;
3591 		case WMI_HOST_WLAN_2G_CAP:
3592 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3593 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3594 			break;
3595 		case WMI_HOST_WLAN_5G_CAP:
3596 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3597 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3598 			break;
3599 		default:
3600 			break;
3601 		}
3602 	}
3603 }
3604 
3605 static void
3606 ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
3607 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3608 {
3609 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3610 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3611 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3612 	wmi_cfg->num_offload_reorder_buffs =
3613 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3614 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3615 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3616 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3617 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3618 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3619 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3620 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3621 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3622 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3623 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3624 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3625 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3626 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3627 	wmi_cfg->roam_offload_max_ap_profiles =
3628 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3629 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3630 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3631 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3632 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3633 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3634 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3635 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3636 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3637 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3638 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3639 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3640 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3641 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3642 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3643 	wmi_cfg->num_tdls_conn_table_entries =
3644 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3645 	wmi_cfg->beacon_tx_offload_max_vdev =
3646 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3647 	wmi_cfg->num_multicast_filter_entries =
3648 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3649 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3650 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3651 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3652 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3653 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3654 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3655 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3656 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3657 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3658 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3659 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3660 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3661 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3662 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3663 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3664 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
3665 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3666 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3667 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3668 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3669 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3670 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3671 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3672 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3673 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3674 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3675 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3676 }
3677 
3678 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3679 				struct ath12k_wmi_init_cmd_arg *arg)
3680 {
3681 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3682 	struct sk_buff *skb;
3683 	struct wmi_init_cmd *cmd;
3684 	struct ath12k_wmi_resource_config_params *cfg;
3685 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3686 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3687 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3688 	struct wmi_tlv *tlv;
3689 	size_t ret, len;
3690 	void *ptr;
3691 	u32 hw_mode_len = 0;
3692 	u16 idx;
3693 
3694 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3695 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3696 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3697 
3698 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3699 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3700 
3701 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3702 	if (!skb)
3703 		return -ENOMEM;
3704 
3705 	cmd = (struct wmi_init_cmd *)skb->data;
3706 
3707 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3708 						 sizeof(*cmd));
3709 
3710 	ptr = skb->data + sizeof(*cmd);
3711 	cfg = ptr;
3712 
3713 	ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
3714 
3715 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3716 						 sizeof(*cfg));
3717 
3718 	ptr += sizeof(*cfg);
3719 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3720 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3721 
3722 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3723 		host_mem_chunks[idx].tlv_header =
3724 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3725 					   len);
3726 
3727 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3728 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3729 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3730 
3731 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3732 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3733 			   arg->mem_chunks[idx].req_id,
3734 			   (u64)arg->mem_chunks[idx].paddr,
3735 			   arg->mem_chunks[idx].len);
3736 	}
3737 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3738 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3739 
3740 	/* num_mem_chunks is zero */
3741 	tlv = ptr;
3742 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3743 	ptr += TLV_HDR_SIZE + len;
3744 
3745 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3746 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3747 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3748 							     sizeof(*hw_mode));
3749 
3750 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3751 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3752 
3753 		ptr += sizeof(*hw_mode);
3754 
3755 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3756 		tlv = ptr;
3757 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3758 
3759 		ptr += TLV_HDR_SIZE;
3760 		len = sizeof(*band_to_mac);
3761 
3762 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3763 			band_to_mac = (void *)ptr;
3764 
3765 			band_to_mac->tlv_header =
3766 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3767 						       len);
3768 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3769 			band_to_mac->start_freq =
3770 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3771 			band_to_mac->end_freq =
3772 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
3773 			ptr += sizeof(*band_to_mac);
3774 		}
3775 	}
3776 
3777 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3778 	if (ret) {
3779 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3780 		dev_kfree_skb(skb);
3781 	}
3782 
3783 	return ret;
3784 }
3785 
3786 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3787 			    int pdev_id)
3788 {
3789 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3790 	struct sk_buff *skb;
3791 	int ret;
3792 
3793 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3794 	if (!skb)
3795 		return -ENOMEM;
3796 
3797 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3798 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3799 						 sizeof(*cmd));
3800 
3801 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3802 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3803 
3804 	cmd->pdev_id = cpu_to_le32(pdev_id);
3805 
3806 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3807 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
3808 
3809 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
3810 	if (ret) {
3811 		ath12k_warn(ar->ab,
3812 			    "failed to send lro cfg req wmi cmd\n");
3813 		goto err;
3814 	}
3815 
3816 	return 0;
3817 err:
3818 	dev_kfree_skb(skb);
3819 	return ret;
3820 }
3821 
3822 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
3823 {
3824 	unsigned long time_left;
3825 
3826 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
3827 						WMI_SERVICE_READY_TIMEOUT_HZ);
3828 	if (!time_left)
3829 		return -ETIMEDOUT;
3830 
3831 	return 0;
3832 }
3833 
3834 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
3835 {
3836 	unsigned long time_left;
3837 
3838 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
3839 						WMI_SERVICE_READY_TIMEOUT_HZ);
3840 	if (!time_left)
3841 		return -ETIMEDOUT;
3842 
3843 	return 0;
3844 }
3845 
3846 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
3847 			   enum wmi_host_hw_mode_config_type mode)
3848 {
3849 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
3850 	struct sk_buff *skb;
3851 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3852 	int len;
3853 	int ret;
3854 
3855 	len = sizeof(*cmd);
3856 
3857 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3858 	if (!skb)
3859 		return -ENOMEM;
3860 
3861 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
3862 
3863 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3864 						 sizeof(*cmd));
3865 
3866 	cmd->pdev_id = WMI_PDEV_ID_SOC;
3867 	cmd->hw_mode_index = cpu_to_le32(mode);
3868 
3869 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
3870 	if (ret) {
3871 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3872 		dev_kfree_skb(skb);
3873 	}
3874 
3875 	return ret;
3876 }
3877 
3878 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
3879 {
3880 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3881 	struct ath12k_wmi_init_cmd_arg arg = {};
3882 
3883 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
3884 		     ab->wmi_ab.svc_map))
3885 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
3886 
3887 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
3888 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
3889 
3890 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
3891 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
3892 	arg.mem_chunks = wmi_ab->mem_chunks;
3893 
3894 	if (ab->hw_params->single_pdev_only)
3895 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
3896 
3897 	arg.num_band_to_mac = ab->num_radios;
3898 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
3899 
3900 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
3901 
3902 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
3903 }
3904 
3905 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
3906 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
3907 {
3908 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
3909 	struct sk_buff *skb;
3910 	int ret;
3911 
3912 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3913 	if (!skb)
3914 		return -ENOMEM;
3915 
3916 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
3917 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
3918 						 sizeof(*cmd));
3919 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3920 	cmd->scan_count = cpu_to_le32(arg->scan_count);
3921 	cmd->scan_period = cpu_to_le32(arg->scan_period);
3922 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
3923 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
3924 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
3925 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
3926 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
3927 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
3928 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
3929 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
3930 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
3931 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
3932 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
3933 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
3934 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
3935 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
3936 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
3937 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
3938 
3939 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3940 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
3941 		   arg->vdev_id);
3942 
3943 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3944 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
3945 	if (ret) {
3946 		ath12k_warn(ar->ab,
3947 			    "failed to send spectral scan config wmi cmd\n");
3948 		goto err;
3949 	}
3950 
3951 	return 0;
3952 err:
3953 	dev_kfree_skb(skb);
3954 	return ret;
3955 }
3956 
3957 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
3958 				    u32 trigger, u32 enable)
3959 {
3960 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
3961 	struct sk_buff *skb;
3962 	int ret;
3963 
3964 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3965 	if (!skb)
3966 		return -ENOMEM;
3967 
3968 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
3969 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
3970 						 sizeof(*cmd));
3971 
3972 	cmd->vdev_id = cpu_to_le32(vdev_id);
3973 	cmd->trigger_cmd = cpu_to_le32(trigger);
3974 	cmd->enable_cmd = cpu_to_le32(enable);
3975 
3976 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3977 		   "WMI spectral enable cmd vdev id 0x%x\n",
3978 		   vdev_id);
3979 
3980 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3981 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
3982 	if (ret) {
3983 		ath12k_warn(ar->ab,
3984 			    "failed to send spectral enable wmi cmd\n");
3985 		goto err;
3986 	}
3987 
3988 	return 0;
3989 err:
3990 	dev_kfree_skb(skb);
3991 	return ret;
3992 }
3993 
3994 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
3995 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
3996 {
3997 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
3998 	struct sk_buff *skb;
3999 	int ret;
4000 
4001 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4002 	if (!skb)
4003 		return -ENOMEM;
4004 
4005 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4006 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4007 						 sizeof(*cmd));
4008 
4009 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4010 	cmd->module_id = cpu_to_le32(arg->module_id);
4011 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4012 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4013 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4014 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4015 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4016 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4017 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4018 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4019 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4020 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4021 
4022 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4023 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4024 		   arg->pdev_id);
4025 
4026 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4027 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4028 	if (ret) {
4029 		ath12k_warn(ar->ab,
4030 			    "failed to send dma ring cfg req wmi cmd\n");
4031 		goto err;
4032 	}
4033 
4034 	return 0;
4035 err:
4036 	dev_kfree_skb(skb);
4037 	return ret;
4038 }
4039 
4040 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4041 					  u16 tag, u16 len,
4042 					  const void *ptr, void *data)
4043 {
4044 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4045 
4046 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4047 		return -EPROTO;
4048 
4049 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4050 		return -ENOBUFS;
4051 
4052 	arg->num_buf_entry++;
4053 	return 0;
4054 }
4055 
4056 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4057 					 u16 tag, u16 len,
4058 					 const void *ptr, void *data)
4059 {
4060 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4061 
4062 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4063 		return -EPROTO;
4064 
4065 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4066 		return -ENOBUFS;
4067 
4068 	arg->num_meta++;
4069 
4070 	return 0;
4071 }
4072 
4073 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4074 				    u16 tag, u16 len,
4075 				    const void *ptr, void *data)
4076 {
4077 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4078 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4079 	u32 pdev_id;
4080 	int ret;
4081 
4082 	switch (tag) {
4083 	case WMI_TAG_DMA_BUF_RELEASE:
4084 		fixed = ptr;
4085 		arg->fixed = *fixed;
4086 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4087 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4088 		break;
4089 	case WMI_TAG_ARRAY_STRUCT:
4090 		if (!arg->buf_entry_done) {
4091 			arg->num_buf_entry = 0;
4092 			arg->buf_entry = ptr;
4093 
4094 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4095 						  ath12k_wmi_dma_buf_entry_parse,
4096 						  arg);
4097 			if (ret) {
4098 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4099 					    ret);
4100 				return ret;
4101 			}
4102 
4103 			arg->buf_entry_done = true;
4104 		} else if (!arg->meta_data_done) {
4105 			arg->num_meta = 0;
4106 			arg->meta_data = ptr;
4107 
4108 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4109 						  ath12k_wmi_dma_buf_meta_parse,
4110 						  arg);
4111 			if (ret) {
4112 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4113 					    ret);
4114 				return ret;
4115 			}
4116 
4117 			arg->meta_data_done = true;
4118 		}
4119 		break;
4120 	default:
4121 		break;
4122 	}
4123 	return 0;
4124 }
4125 
4126 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4127 						       struct sk_buff *skb)
4128 {
4129 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4130 	struct ath12k_dbring_buf_release_event param;
4131 	int ret;
4132 
4133 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4134 				  ath12k_wmi_dma_buf_parse,
4135 				  &arg);
4136 	if (ret) {
4137 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4138 		return;
4139 	}
4140 
4141 	param.fixed = arg.fixed;
4142 	param.buf_entry = arg.buf_entry;
4143 	param.num_buf_entry = arg.num_buf_entry;
4144 	param.meta_data = arg.meta_data;
4145 	param.num_meta = arg.num_meta;
4146 
4147 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4148 	if (ret) {
4149 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4150 		return;
4151 	}
4152 }
4153 
4154 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4155 					 u16 tag, u16 len,
4156 					 const void *ptr, void *data)
4157 {
4158 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4159 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4160 	u32 phy_map = 0;
4161 
4162 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4163 		return -EPROTO;
4164 
4165 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4166 		return -ENOBUFS;
4167 
4168 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4169 				   hw_mode_id);
4170 	svc_rdy_ext->n_hw_mode_caps++;
4171 
4172 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4173 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4174 
4175 	return 0;
4176 }
4177 
4178 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4179 				   u16 len, const void *ptr, void *data)
4180 {
4181 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4182 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4183 	enum wmi_host_hw_mode_config_type mode, pref;
4184 	u32 i;
4185 	int ret;
4186 
4187 	svc_rdy_ext->n_hw_mode_caps = 0;
4188 	svc_rdy_ext->hw_mode_caps = ptr;
4189 
4190 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4191 				  ath12k_wmi_hw_mode_caps_parse,
4192 				  svc_rdy_ext);
4193 	if (ret) {
4194 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4195 		return ret;
4196 	}
4197 
4198 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4199 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4200 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4201 
4202 		if (mode >= WMI_HOST_HW_MODE_MAX)
4203 			continue;
4204 
4205 		pref = soc->wmi_ab.preferred_hw_mode;
4206 
4207 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4208 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4209 			soc->wmi_ab.preferred_hw_mode = mode;
4210 		}
4211 	}
4212 
4213 	ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
4214 		   soc->wmi_ab.preferred_hw_mode);
4215 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4216 		return -EINVAL;
4217 
4218 	return 0;
4219 }
4220 
4221 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4222 					 u16 tag, u16 len,
4223 					 const void *ptr, void *data)
4224 {
4225 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4226 
4227 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4228 		return -EPROTO;
4229 
4230 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4231 		return -ENOBUFS;
4232 
4233 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4234 	if (!svc_rdy_ext->n_mac_phy_caps) {
4235 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4236 						    GFP_ATOMIC);
4237 		if (!svc_rdy_ext->mac_phy_caps)
4238 			return -ENOMEM;
4239 	}
4240 
4241 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4242 	svc_rdy_ext->n_mac_phy_caps++;
4243 	return 0;
4244 }
4245 
4246 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4247 					     u16 tag, u16 len,
4248 					     const void *ptr, void *data)
4249 {
4250 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4251 
4252 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4253 		return -EPROTO;
4254 
4255 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4256 		return -ENOBUFS;
4257 
4258 	svc_rdy_ext->n_ext_hal_reg_caps++;
4259 	return 0;
4260 }
4261 
4262 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4263 				       u16 len, const void *ptr, void *data)
4264 {
4265 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4266 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4267 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4268 	int ret;
4269 	u32 i;
4270 
4271 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4272 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4273 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4274 				  ath12k_wmi_ext_hal_reg_caps_parse,
4275 				  svc_rdy_ext);
4276 	if (ret) {
4277 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4278 		return ret;
4279 	}
4280 
4281 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4282 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4283 						      svc_rdy_ext->soc_hal_reg_caps,
4284 						      svc_rdy_ext->ext_hal_reg_caps, i,
4285 						      &reg_cap);
4286 		if (ret) {
4287 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4288 			return ret;
4289 		}
4290 
4291 		if (reg_cap.phy_id >= MAX_RADIOS) {
4292 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4293 			return -EINVAL;
4294 		}
4295 
4296 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4297 	}
4298 	return 0;
4299 }
4300 
4301 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4302 						 u16 len, const void *ptr,
4303 						 void *data)
4304 {
4305 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4306 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4307 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4308 	u32 phy_id_map;
4309 	int pdev_index = 0;
4310 	int ret;
4311 
4312 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4313 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4314 
4315 	soc->num_radios = 0;
4316 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4317 	soc->fw_pdev_count = 0;
4318 
4319 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4320 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4321 							    svc_rdy_ext,
4322 							    hw_mode_id, soc->num_radios,
4323 							    &soc->pdevs[pdev_index]);
4324 		if (ret) {
4325 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4326 				    soc->num_radios);
4327 			return ret;
4328 		}
4329 
4330 		soc->num_radios++;
4331 
4332 		/* For single_pdev_only targets,
4333 		 * save mac_phy capability in the same pdev
4334 		 */
4335 		if (soc->hw_params->single_pdev_only)
4336 			pdev_index = 0;
4337 		else
4338 			pdev_index = soc->num_radios;
4339 
4340 		/* TODO: mac_phy_cap prints */
4341 		phy_id_map >>= 1;
4342 	}
4343 
4344 	if (soc->hw_params->single_pdev_only) {
4345 		soc->num_radios = 1;
4346 		soc->pdevs[0].pdev_id = 0;
4347 	}
4348 
4349 	return 0;
4350 }
4351 
4352 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4353 					  u16 tag, u16 len,
4354 					  const void *ptr, void *data)
4355 {
4356 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4357 
4358 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4359 		return -EPROTO;
4360 
4361 	parse->n_dma_ring_caps++;
4362 	return 0;
4363 }
4364 
4365 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4366 					u32 num_cap)
4367 {
4368 	size_t sz;
4369 	void *ptr;
4370 
4371 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4372 	ptr = kzalloc(sz, GFP_ATOMIC);
4373 	if (!ptr)
4374 		return -ENOMEM;
4375 
4376 	ab->db_caps = ptr;
4377 	ab->num_db_cap = num_cap;
4378 
4379 	return 0;
4380 }
4381 
4382 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4383 {
4384 	kfree(ab->db_caps);
4385 	ab->db_caps = NULL;
4386 	ab->num_db_cap = 0;
4387 }
4388 
4389 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4390 				    u16 len, const void *ptr, void *data)
4391 {
4392 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4393 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4394 	struct ath12k_dbring_cap *dir_buff_caps;
4395 	int ret;
4396 	u32 i;
4397 
4398 	dma_caps_parse->n_dma_ring_caps = 0;
4399 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4400 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4401 				  ath12k_wmi_dma_ring_caps_parse,
4402 				  dma_caps_parse);
4403 	if (ret) {
4404 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4405 		return ret;
4406 	}
4407 
4408 	if (!dma_caps_parse->n_dma_ring_caps)
4409 		return 0;
4410 
4411 	if (ab->num_db_cap) {
4412 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4413 		return 0;
4414 	}
4415 
4416 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4417 	if (ret)
4418 		return ret;
4419 
4420 	dir_buff_caps = ab->db_caps;
4421 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4422 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4423 			ath12k_warn(ab, "Invalid module id %d\n",
4424 				    le32_to_cpu(dma_caps[i].module_id));
4425 			ret = -EINVAL;
4426 			goto free_dir_buff;
4427 		}
4428 
4429 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4430 		dir_buff_caps[i].pdev_id =
4431 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4432 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4433 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4434 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4435 	}
4436 
4437 	return 0;
4438 
4439 free_dir_buff:
4440 	ath12k_wmi_free_dbring_caps(ab);
4441 	return ret;
4442 }
4443 
4444 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4445 					u16 tag, u16 len,
4446 					const void *ptr, void *data)
4447 {
4448 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4449 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4450 	int ret;
4451 
4452 	switch (tag) {
4453 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4454 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4455 						&svc_rdy_ext->arg);
4456 		if (ret) {
4457 			ath12k_warn(ab, "unable to extract ext params\n");
4458 			return ret;
4459 		}
4460 		break;
4461 
4462 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4463 		svc_rdy_ext->hw_caps = ptr;
4464 		svc_rdy_ext->arg.num_hw_modes =
4465 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4466 		break;
4467 
4468 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4469 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4470 							    svc_rdy_ext);
4471 		if (ret)
4472 			return ret;
4473 		break;
4474 
4475 	case WMI_TAG_ARRAY_STRUCT:
4476 		if (!svc_rdy_ext->hw_mode_done) {
4477 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4478 			if (ret)
4479 				return ret;
4480 
4481 			svc_rdy_ext->hw_mode_done = true;
4482 		} else if (!svc_rdy_ext->mac_phy_done) {
4483 			svc_rdy_ext->n_mac_phy_caps = 0;
4484 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4485 						  ath12k_wmi_mac_phy_caps_parse,
4486 						  svc_rdy_ext);
4487 			if (ret) {
4488 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4489 				return ret;
4490 			}
4491 
4492 			svc_rdy_ext->mac_phy_done = true;
4493 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4494 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4495 			if (ret)
4496 				return ret;
4497 
4498 			svc_rdy_ext->ext_hal_reg_done = true;
4499 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4500 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4501 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4502 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4503 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4504 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4505 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4506 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4507 						       &svc_rdy_ext->dma_caps_parse);
4508 			if (ret)
4509 				return ret;
4510 
4511 			svc_rdy_ext->dma_ring_cap_done = true;
4512 		}
4513 		break;
4514 
4515 	default:
4516 		break;
4517 	}
4518 	return 0;
4519 }
4520 
4521 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4522 					  struct sk_buff *skb)
4523 {
4524 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4525 	int ret;
4526 
4527 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4528 				  ath12k_wmi_svc_rdy_ext_parse,
4529 				  &svc_rdy_ext);
4530 	if (ret) {
4531 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4532 		goto err;
4533 	}
4534 
4535 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4536 		complete(&ab->wmi_ab.service_ready);
4537 
4538 	kfree(svc_rdy_ext.mac_phy_caps);
4539 	return 0;
4540 
4541 err:
4542 	ath12k_wmi_free_dbring_caps(ab);
4543 	return ret;
4544 }
4545 
4546 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4547 				      const void *ptr,
4548 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4549 {
4550 	const struct wmi_service_ready_ext2_event *ev = ptr;
4551 
4552 	if (!ev)
4553 		return -EINVAL;
4554 
4555 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4556 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4557 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4558 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4559 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4560 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4561 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4562 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4563 	return 0;
4564 }
4565 
4566 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4567 				      const __le32 cap_mac_info[],
4568 				      const __le32 cap_phy_info[],
4569 				      const __le32 supp_mcs[],
4570 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4571 				       __le32 cap_info_internal)
4572 {
4573 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4574 	u32 support_320mhz;
4575 	u8 i;
4576 
4577 	if (band == NL80211_BAND_6GHZ)
4578 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4579 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4580 
4581 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4582 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4583 
4584 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4585 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4586 
4587 	if (band == NL80211_BAND_6GHZ)
4588 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4589 
4590 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4591 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4592 	if (band != NL80211_BAND_2GHZ) {
4593 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4594 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4595 	}
4596 
4597 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4598 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4599 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4600 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4601 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4602 
4603 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4604 }
4605 
4606 static int
4607 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4608 				      const struct ath12k_wmi_caps_ext_params *caps,
4609 				      struct ath12k_pdev *pdev)
4610 {
4611 	struct ath12k_band_cap *cap_band;
4612 	u32 bands, support_320mhz;
4613 	int i;
4614 
4615 	if (ab->hw_params->single_pdev_only) {
4616 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4617 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4618 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4619 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4620 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4621 			return 0;
4622 		}
4623 
4624 		for (i = 0; i < ab->fw_pdev_count; i++) {
4625 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4626 
4627 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4628 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4629 				bands = fw_pdev->supported_bands;
4630 				break;
4631 			}
4632 		}
4633 
4634 		if (i == ab->fw_pdev_count)
4635 			return -EINVAL;
4636 	} else {
4637 		bands = pdev->cap.supported_bands;
4638 	}
4639 
4640 	if (bands & WMI_HOST_WLAN_2G_CAP) {
4641 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4642 					  caps->eht_cap_mac_info_2ghz,
4643 					  caps->eht_cap_phy_info_2ghz,
4644 					  caps->eht_supp_mcs_ext_2ghz,
4645 					  &caps->eht_ppet_2ghz,
4646 					  caps->eht_cap_info_internal);
4647 	}
4648 
4649 	if (bands & WMI_HOST_WLAN_5G_CAP) {
4650 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4651 					  caps->eht_cap_mac_info_5ghz,
4652 					  caps->eht_cap_phy_info_5ghz,
4653 					  caps->eht_supp_mcs_ext_5ghz,
4654 					  &caps->eht_ppet_5ghz,
4655 					  caps->eht_cap_info_internal);
4656 
4657 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4658 					  caps->eht_cap_mac_info_5ghz,
4659 					  caps->eht_cap_phy_info_5ghz,
4660 					  caps->eht_supp_mcs_ext_5ghz,
4661 					  &caps->eht_ppet_5ghz,
4662 					  caps->eht_cap_info_internal);
4663 	}
4664 
4665 	return 0;
4666 }
4667 
4668 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4669 					   u16 len, const void *ptr,
4670 					   void *data)
4671 {
4672 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4673 	int i = 0, ret;
4674 
4675 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4676 		return -EPROTO;
4677 
4678 	if (ab->hw_params->single_pdev_only) {
4679 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4680 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4681 			return 0;
4682 	} else {
4683 		for (i = 0; i < ab->num_radios; i++) {
4684 			if (ab->pdevs[i].pdev_id ==
4685 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4686 				break;
4687 		}
4688 
4689 		if (i == ab->num_radios)
4690 			return -EINVAL;
4691 	}
4692 
4693 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4694 	if (ret) {
4695 		ath12k_warn(ab,
4696 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4697 			    ret, ab->pdevs[i].pdev_id);
4698 		return ret;
4699 	}
4700 
4701 	return 0;
4702 }
4703 
4704 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4705 					 u16 tag, u16 len,
4706 					 const void *ptr, void *data)
4707 {
4708 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4709 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4710 	int ret;
4711 
4712 	switch (tag) {
4713 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4714 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4715 						 &parse->arg);
4716 		if (ret) {
4717 			ath12k_warn(ab,
4718 				    "failed to extract wmi service ready ext2 parameters: %d\n",
4719 				    ret);
4720 			return ret;
4721 		}
4722 		break;
4723 
4724 	case WMI_TAG_ARRAY_STRUCT:
4725 		if (!parse->dma_ring_cap_done) {
4726 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4727 						       &parse->dma_caps_parse);
4728 			if (ret)
4729 				return ret;
4730 
4731 			parse->dma_ring_cap_done = true;
4732 		} else if (!parse->spectral_bin_scaling_done) {
4733 			/* TODO: This is a place-holder as WMI tag for
4734 			 * spectral scaling is before
4735 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4736 			 */
4737 			parse->spectral_bin_scaling_done = true;
4738 		} else if (!parse->mac_phy_caps_ext_done) {
4739 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4740 						  ath12k_wmi_tlv_mac_phy_caps_ext,
4741 						  parse);
4742 			if (ret) {
4743 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4744 					    ret);
4745 				return ret;
4746 			}
4747 
4748 			parse->mac_phy_caps_ext_done = true;
4749 		}
4750 		break;
4751 	default:
4752 		break;
4753 	}
4754 
4755 	return 0;
4756 }
4757 
4758 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4759 					   struct sk_buff *skb)
4760 {
4761 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4762 	int ret;
4763 
4764 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4765 				  ath12k_wmi_svc_rdy_ext2_parse,
4766 				  &svc_rdy_ext2);
4767 	if (ret) {
4768 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4769 		goto err;
4770 	}
4771 
4772 	complete(&ab->wmi_ab.service_ready);
4773 
4774 	return 0;
4775 
4776 err:
4777 	ath12k_wmi_free_dbring_caps(ab);
4778 	return ret;
4779 }
4780 
4781 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4782 					   struct wmi_vdev_start_resp_event *vdev_rsp)
4783 {
4784 	const void **tb;
4785 	const struct wmi_vdev_start_resp_event *ev;
4786 	int ret;
4787 
4788 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4789 	if (IS_ERR(tb)) {
4790 		ret = PTR_ERR(tb);
4791 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4792 		return ret;
4793 	}
4794 
4795 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4796 	if (!ev) {
4797 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
4798 		kfree(tb);
4799 		return -EPROTO;
4800 	}
4801 
4802 	*vdev_rsp = *ev;
4803 
4804 	kfree(tb);
4805 	return 0;
4806 }
4807 
4808 static struct ath12k_reg_rule
4809 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
4810 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
4811 {
4812 	struct ath12k_reg_rule *reg_rule_ptr;
4813 	u32 count;
4814 
4815 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
4816 			       GFP_ATOMIC);
4817 
4818 	if (!reg_rule_ptr)
4819 		return NULL;
4820 
4821 	for (count = 0; count < num_reg_rules; count++) {
4822 		reg_rule_ptr[count].start_freq =
4823 			le32_get_bits(wmi_reg_rule[count].freq_info,
4824 				      REG_RULE_START_FREQ);
4825 		reg_rule_ptr[count].end_freq =
4826 			le32_get_bits(wmi_reg_rule[count].freq_info,
4827 				      REG_RULE_END_FREQ);
4828 		reg_rule_ptr[count].max_bw =
4829 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4830 				      REG_RULE_MAX_BW);
4831 		reg_rule_ptr[count].reg_power =
4832 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4833 				      REG_RULE_REG_PWR);
4834 		reg_rule_ptr[count].ant_gain =
4835 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4836 				      REG_RULE_ANT_GAIN);
4837 		reg_rule_ptr[count].flags =
4838 			le32_get_bits(wmi_reg_rule[count].flag_info,
4839 				      REG_RULE_FLAGS);
4840 		reg_rule_ptr[count].psd_flag =
4841 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4842 				      REG_RULE_PSD_INFO);
4843 		reg_rule_ptr[count].psd_eirp =
4844 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4845 				      REG_RULE_PSD_EIRP);
4846 	}
4847 
4848 	return reg_rule_ptr;
4849 }
4850 
4851 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
4852 						   struct sk_buff *skb,
4853 						   struct ath12k_reg_info *reg_info)
4854 {
4855 	const void **tb;
4856 	const struct wmi_reg_chan_list_cc_ext_event *ev;
4857 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
4858 	u32 num_2g_reg_rules, num_5g_reg_rules;
4859 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
4860 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
4861 	u32 total_reg_rules = 0;
4862 	int ret, i, j;
4863 
4864 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
4865 
4866 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4867 	if (IS_ERR(tb)) {
4868 		ret = PTR_ERR(tb);
4869 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4870 		return ret;
4871 	}
4872 
4873 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
4874 	if (!ev) {
4875 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
4876 		kfree(tb);
4877 		return -EPROTO;
4878 	}
4879 
4880 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
4881 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
4882 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
4883 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
4884 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
4885 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
4886 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
4887 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
4888 
4889 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4890 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4891 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
4892 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4893 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
4894 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4895 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
4896 	}
4897 
4898 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
4899 	total_reg_rules += num_2g_reg_rules;
4900 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
4901 	total_reg_rules += num_5g_reg_rules;
4902 
4903 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
4904 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
4905 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
4906 		kfree(tb);
4907 		return -EINVAL;
4908 	}
4909 
4910 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4911 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
4912 
4913 		if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
4914 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
4915 				    i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
4916 			kfree(tb);
4917 			return -EINVAL;
4918 		}
4919 
4920 		total_reg_rules += num_6g_reg_rules_ap[i];
4921 	}
4922 
4923 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4924 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4925 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4926 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4927 
4928 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4929 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4930 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4931 
4932 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4933 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4934 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4935 
4936 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
4937 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
4938 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6G_REG_RULES) {
4939 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
4940 				    i);
4941 			kfree(tb);
4942 			return -EINVAL;
4943 		}
4944 	}
4945 
4946 	if (!total_reg_rules) {
4947 		ath12k_warn(ab, "No reg rules available\n");
4948 		kfree(tb);
4949 		return -EINVAL;
4950 	}
4951 
4952 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
4953 
4954 	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
4955 	 * list for country US.
4956 	 * Having same 6G reg rule in 5G and 6G rules list causes
4957 	 * intersect check to be true, and same rules will be shown
4958 	 * multiple times in iw cmd. So added hack below to avoid
4959 	 * parsing 6G rule from 5G reg rule list, and this can be
4960 	 * removed later, after FW updates to remove 6G reg rule
4961 	 * from 5G rules list.
4962 	 */
4963 	if (memcmp(reg_info->alpha2, "US", 2) == 0) {
4964 		reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
4965 		num_5g_reg_rules = reg_info->num_5g_reg_rules;
4966 	}
4967 
4968 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
4969 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
4970 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
4971 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
4972 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
4973 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
4974 
4975 	switch (le32_to_cpu(ev->status_code)) {
4976 	case WMI_REG_SET_CC_STATUS_PASS:
4977 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
4978 		break;
4979 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
4980 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
4981 		break;
4982 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
4983 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
4984 		break;
4985 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
4986 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
4987 		break;
4988 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
4989 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
4990 		break;
4991 	case WMI_REG_SET_CC_STATUS_FAIL:
4992 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
4993 		break;
4994 	}
4995 
4996 	reg_info->is_ext_reg_event = true;
4997 
4998 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
4999 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
5000 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
5001 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
5002 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
5003 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
5004 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
5005 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
5006 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
5007 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
5008 
5009 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5010 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5011 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
5012 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5013 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
5014 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5015 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
5016 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5017 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
5018 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
5019 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
5020 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
5021 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
5022 	}
5023 
5024 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5025 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
5026 		   __func__, reg_info->alpha2, reg_info->dfs_region,
5027 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
5028 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
5029 		   reg_info->phybitmap);
5030 
5031 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5032 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
5033 		   num_2g_reg_rules, num_5g_reg_rules);
5034 
5035 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5036 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
5037 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
5038 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
5039 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
5040 
5041 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5042 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5043 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
5044 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
5045 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
5046 
5047 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5048 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5049 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
5050 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
5051 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
5052 
5053 	ext_wmi_reg_rule =
5054 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
5055 			+ sizeof(*ev)
5056 			+ sizeof(struct wmi_tlv));
5057 
5058 	if (num_2g_reg_rules) {
5059 		reg_info->reg_rules_2g_ptr =
5060 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
5061 						      ext_wmi_reg_rule);
5062 
5063 		if (!reg_info->reg_rules_2g_ptr) {
5064 			kfree(tb);
5065 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
5066 			return -ENOMEM;
5067 		}
5068 	}
5069 
5070 	if (num_5g_reg_rules) {
5071 		ext_wmi_reg_rule += num_2g_reg_rules;
5072 		reg_info->reg_rules_5g_ptr =
5073 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
5074 						      ext_wmi_reg_rule);
5075 
5076 		if (!reg_info->reg_rules_5g_ptr) {
5077 			kfree(tb);
5078 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5079 			return -ENOMEM;
5080 		}
5081 	}
5082 
5083 	ext_wmi_reg_rule += num_5g_reg_rules;
5084 
5085 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5086 		reg_info->reg_rules_6g_ap_ptr[i] =
5087 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
5088 						      ext_wmi_reg_rule);
5089 
5090 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
5091 			kfree(tb);
5092 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
5093 			return -ENOMEM;
5094 		}
5095 
5096 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
5097 	}
5098 
5099 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
5100 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5101 			reg_info->reg_rules_6g_client_ptr[j][i] =
5102 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
5103 							      ext_wmi_reg_rule);
5104 
5105 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
5106 				kfree(tb);
5107 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
5108 				return -ENOMEM;
5109 			}
5110 
5111 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
5112 		}
5113 	}
5114 
5115 	reg_info->client_type = le32_to_cpu(ev->client_type);
5116 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
5117 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
5118 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
5119 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
5120 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
5121 		le32_to_cpu(ev->domain_code_6g_ap_sp);
5122 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
5123 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
5124 
5125 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5126 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5127 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5128 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5129 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5130 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5131 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5132 	}
5133 
5134 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5135 
5136 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5137 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
5138 
5139 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5140 
5141 	kfree(tb);
5142 	return 0;
5143 }
5144 
5145 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5146 					struct wmi_peer_delete_resp_event *peer_del_resp)
5147 {
5148 	const void **tb;
5149 	const struct wmi_peer_delete_resp_event *ev;
5150 	int ret;
5151 
5152 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5153 	if (IS_ERR(tb)) {
5154 		ret = PTR_ERR(tb);
5155 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5156 		return ret;
5157 	}
5158 
5159 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5160 	if (!ev) {
5161 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
5162 		kfree(tb);
5163 		return -EPROTO;
5164 	}
5165 
5166 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5167 
5168 	peer_del_resp->vdev_id = ev->vdev_id;
5169 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5170 			ev->peer_macaddr.addr);
5171 
5172 	kfree(tb);
5173 	return 0;
5174 }
5175 
5176 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5177 					struct sk_buff *skb,
5178 					u32 *vdev_id)
5179 {
5180 	const void **tb;
5181 	const struct wmi_vdev_delete_resp_event *ev;
5182 	int ret;
5183 
5184 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5185 	if (IS_ERR(tb)) {
5186 		ret = PTR_ERR(tb);
5187 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5188 		return ret;
5189 	}
5190 
5191 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5192 	if (!ev) {
5193 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5194 		kfree(tb);
5195 		return -EPROTO;
5196 	}
5197 
5198 	*vdev_id = le32_to_cpu(ev->vdev_id);
5199 
5200 	kfree(tb);
5201 	return 0;
5202 }
5203 
5204 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5205 					struct sk_buff *skb,
5206 					u32 *vdev_id, u32 *tx_status)
5207 {
5208 	const void **tb;
5209 	const struct wmi_bcn_tx_status_event *ev;
5210 	int ret;
5211 
5212 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5213 	if (IS_ERR(tb)) {
5214 		ret = PTR_ERR(tb);
5215 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5216 		return ret;
5217 	}
5218 
5219 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
5220 	if (!ev) {
5221 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
5222 		kfree(tb);
5223 		return -EPROTO;
5224 	}
5225 
5226 	*vdev_id = le32_to_cpu(ev->vdev_id);
5227 	*tx_status = le32_to_cpu(ev->tx_status);
5228 
5229 	kfree(tb);
5230 	return 0;
5231 }
5232 
5233 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5234 					      u32 *vdev_id)
5235 {
5236 	const void **tb;
5237 	const struct wmi_vdev_stopped_event *ev;
5238 	int ret;
5239 
5240 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5241 	if (IS_ERR(tb)) {
5242 		ret = PTR_ERR(tb);
5243 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5244 		return ret;
5245 	}
5246 
5247 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
5248 	if (!ev) {
5249 		ath12k_warn(ab, "failed to fetch vdev stop ev");
5250 		kfree(tb);
5251 		return -EPROTO;
5252 	}
5253 
5254 	*vdev_id = le32_to_cpu(ev->vdev_id);
5255 
5256 	kfree(tb);
5257 	return 0;
5258 }
5259 
5260 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
5261 					u16 tag, u16 len,
5262 					const void *ptr, void *data)
5263 {
5264 	struct wmi_tlv_mgmt_rx_parse *parse = data;
5265 
5266 	switch (tag) {
5267 	case WMI_TAG_MGMT_RX_HDR:
5268 		parse->fixed = ptr;
5269 		break;
5270 	case WMI_TAG_ARRAY_BYTE:
5271 		if (!parse->frame_buf_done) {
5272 			parse->frame_buf = ptr;
5273 			parse->frame_buf_done = true;
5274 		}
5275 		break;
5276 	}
5277 	return 0;
5278 }
5279 
5280 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
5281 					  struct sk_buff *skb,
5282 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
5283 {
5284 	struct wmi_tlv_mgmt_rx_parse parse = { };
5285 	const struct ath12k_wmi_mgmt_rx_params *ev;
5286 	const u8 *frame;
5287 	int i, ret;
5288 
5289 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5290 				  ath12k_wmi_tlv_mgmt_rx_parse,
5291 				  &parse);
5292 	if (ret) {
5293 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
5294 		return ret;
5295 	}
5296 
5297 	ev = parse.fixed;
5298 	frame = parse.frame_buf;
5299 
5300 	if (!ev || !frame) {
5301 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
5302 		return -EPROTO;
5303 	}
5304 
5305 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
5306 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
5307 	hdr->channel = le32_to_cpu(ev->channel);
5308 	hdr->snr = le32_to_cpu(ev->snr);
5309 	hdr->rate = le32_to_cpu(ev->rate);
5310 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
5311 	hdr->buf_len = le32_to_cpu(ev->buf_len);
5312 	hdr->status = le32_to_cpu(ev->status);
5313 	hdr->flags = le32_to_cpu(ev->flags);
5314 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
5315 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
5316 
5317 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
5318 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
5319 
5320 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
5321 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
5322 		return -EPROTO;
5323 	}
5324 
5325 	/* shift the sk_buff to point to `frame` */
5326 	skb_trim(skb, 0);
5327 	skb_put(skb, frame - skb->data);
5328 	skb_pull(skb, frame - skb->data);
5329 	skb_put(skb, hdr->buf_len);
5330 
5331 	return 0;
5332 }
5333 
5334 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
5335 				    u32 status)
5336 {
5337 	struct sk_buff *msdu;
5338 	struct ieee80211_tx_info *info;
5339 	struct ath12k_skb_cb *skb_cb;
5340 	int num_mgmt;
5341 
5342 	spin_lock_bh(&ar->txmgmt_idr_lock);
5343 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
5344 
5345 	if (!msdu) {
5346 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
5347 			    desc_id);
5348 		spin_unlock_bh(&ar->txmgmt_idr_lock);
5349 		return -ENOENT;
5350 	}
5351 
5352 	idr_remove(&ar->txmgmt_idr, desc_id);
5353 	spin_unlock_bh(&ar->txmgmt_idr_lock);
5354 
5355 	skb_cb = ATH12K_SKB_CB(msdu);
5356 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
5357 
5358 	info = IEEE80211_SKB_CB(msdu);
5359 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
5360 		info->flags |= IEEE80211_TX_STAT_ACK;
5361 
5362 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
5363 
5364 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5365 
5366 	/* WARN when we received this event without doing any mgmt tx */
5367 	if (num_mgmt < 0)
5368 		WARN_ON_ONCE(1);
5369 
5370 	if (!num_mgmt)
5371 		wake_up(&ar->txmgmt_empty_waitq);
5372 
5373 	return 0;
5374 }
5375 
5376 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
5377 					       struct sk_buff *skb,
5378 					       struct wmi_mgmt_tx_compl_event *param)
5379 {
5380 	const void **tb;
5381 	const struct wmi_mgmt_tx_compl_event *ev;
5382 	int ret;
5383 
5384 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5385 	if (IS_ERR(tb)) {
5386 		ret = PTR_ERR(tb);
5387 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5388 		return ret;
5389 	}
5390 
5391 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
5392 	if (!ev) {
5393 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
5394 		kfree(tb);
5395 		return -EPROTO;
5396 	}
5397 
5398 	param->pdev_id = ev->pdev_id;
5399 	param->desc_id = ev->desc_id;
5400 	param->status = ev->status;
5401 
5402 	kfree(tb);
5403 	return 0;
5404 }
5405 
5406 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
5407 {
5408 	lockdep_assert_held(&ar->data_lock);
5409 
5410 	switch (ar->scan.state) {
5411 	case ATH12K_SCAN_IDLE:
5412 	case ATH12K_SCAN_RUNNING:
5413 	case ATH12K_SCAN_ABORTING:
5414 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5415 			    ath12k_scan_state_str(ar->scan.state),
5416 			    ar->scan.state);
5417 		break;
5418 	case ATH12K_SCAN_STARTING:
5419 		ar->scan.state = ATH12K_SCAN_RUNNING;
5420 
5421 		if (ar->scan.is_roc)
5422 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
5423 
5424 		complete(&ar->scan.started);
5425 		break;
5426 	}
5427 }
5428 
5429 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5430 {
5431 	lockdep_assert_held(&ar->data_lock);
5432 
5433 	switch (ar->scan.state) {
5434 	case ATH12K_SCAN_IDLE:
5435 	case ATH12K_SCAN_RUNNING:
5436 	case ATH12K_SCAN_ABORTING:
5437 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5438 			    ath12k_scan_state_str(ar->scan.state),
5439 			    ar->scan.state);
5440 		break;
5441 	case ATH12K_SCAN_STARTING:
5442 		complete(&ar->scan.started);
5443 		__ath12k_mac_scan_finish(ar);
5444 		break;
5445 	}
5446 }
5447 
5448 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5449 {
5450 	lockdep_assert_held(&ar->data_lock);
5451 
5452 	switch (ar->scan.state) {
5453 	case ATH12K_SCAN_IDLE:
5454 	case ATH12K_SCAN_STARTING:
5455 		/* One suspected reason scan can be completed while starting is
5456 		 * if firmware fails to deliver all scan events to the host,
5457 		 * e.g. when transport pipe is full. This has been observed
5458 		 * with spectral scan phyerr events starving wmi transport
5459 		 * pipe. In such case the "scan completed" event should be (and
5460 		 * is) ignored by the host as it may be just firmware's scan
5461 		 * state machine recovering.
5462 		 */
5463 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5464 			    ath12k_scan_state_str(ar->scan.state),
5465 			    ar->scan.state);
5466 		break;
5467 	case ATH12K_SCAN_RUNNING:
5468 	case ATH12K_SCAN_ABORTING:
5469 		__ath12k_mac_scan_finish(ar);
5470 		break;
5471 	}
5472 }
5473 
5474 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5475 {
5476 	lockdep_assert_held(&ar->data_lock);
5477 
5478 	switch (ar->scan.state) {
5479 	case ATH12K_SCAN_IDLE:
5480 	case ATH12K_SCAN_STARTING:
5481 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5482 			    ath12k_scan_state_str(ar->scan.state),
5483 			    ar->scan.state);
5484 		break;
5485 	case ATH12K_SCAN_RUNNING:
5486 	case ATH12K_SCAN_ABORTING:
5487 		ar->scan_channel = NULL;
5488 		break;
5489 	}
5490 }
5491 
5492 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5493 {
5494 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5495 
5496 	lockdep_assert_held(&ar->data_lock);
5497 
5498 	switch (ar->scan.state) {
5499 	case ATH12K_SCAN_IDLE:
5500 	case ATH12K_SCAN_STARTING:
5501 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5502 			    ath12k_scan_state_str(ar->scan.state),
5503 			    ar->scan.state);
5504 		break;
5505 	case ATH12K_SCAN_RUNNING:
5506 	case ATH12K_SCAN_ABORTING:
5507 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
5508 
5509 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
5510 			complete(&ar->scan.on_channel);
5511 
5512 		break;
5513 	}
5514 }
5515 
5516 static const char *
5517 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5518 			       enum wmi_scan_completion_reason reason)
5519 {
5520 	switch (type) {
5521 	case WMI_SCAN_EVENT_STARTED:
5522 		return "started";
5523 	case WMI_SCAN_EVENT_COMPLETED:
5524 		switch (reason) {
5525 		case WMI_SCAN_REASON_COMPLETED:
5526 			return "completed";
5527 		case WMI_SCAN_REASON_CANCELLED:
5528 			return "completed [cancelled]";
5529 		case WMI_SCAN_REASON_PREEMPTED:
5530 			return "completed [preempted]";
5531 		case WMI_SCAN_REASON_TIMEDOUT:
5532 			return "completed [timedout]";
5533 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
5534 			return "completed [internal err]";
5535 		case WMI_SCAN_REASON_MAX:
5536 			break;
5537 		}
5538 		return "completed [unknown]";
5539 	case WMI_SCAN_EVENT_BSS_CHANNEL:
5540 		return "bss channel";
5541 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5542 		return "foreign channel";
5543 	case WMI_SCAN_EVENT_DEQUEUED:
5544 		return "dequeued";
5545 	case WMI_SCAN_EVENT_PREEMPTED:
5546 		return "preempted";
5547 	case WMI_SCAN_EVENT_START_FAILED:
5548 		return "start failed";
5549 	case WMI_SCAN_EVENT_RESTARTED:
5550 		return "restarted";
5551 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5552 		return "foreign channel exit";
5553 	default:
5554 		return "unknown";
5555 	}
5556 }
5557 
5558 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5559 			       struct wmi_scan_event *scan_evt_param)
5560 {
5561 	const void **tb;
5562 	const struct wmi_scan_event *ev;
5563 	int ret;
5564 
5565 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5566 	if (IS_ERR(tb)) {
5567 		ret = PTR_ERR(tb);
5568 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5569 		return ret;
5570 	}
5571 
5572 	ev = tb[WMI_TAG_SCAN_EVENT];
5573 	if (!ev) {
5574 		ath12k_warn(ab, "failed to fetch scan ev");
5575 		kfree(tb);
5576 		return -EPROTO;
5577 	}
5578 
5579 	scan_evt_param->event_type = ev->event_type;
5580 	scan_evt_param->reason = ev->reason;
5581 	scan_evt_param->channel_freq = ev->channel_freq;
5582 	scan_evt_param->scan_req_id = ev->scan_req_id;
5583 	scan_evt_param->scan_id = ev->scan_id;
5584 	scan_evt_param->vdev_id = ev->vdev_id;
5585 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5586 
5587 	kfree(tb);
5588 	return 0;
5589 }
5590 
5591 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5592 					   struct wmi_peer_sta_kickout_arg *arg)
5593 {
5594 	const void **tb;
5595 	const struct wmi_peer_sta_kickout_event *ev;
5596 	int ret;
5597 
5598 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5599 	if (IS_ERR(tb)) {
5600 		ret = PTR_ERR(tb);
5601 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5602 		return ret;
5603 	}
5604 
5605 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5606 	if (!ev) {
5607 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5608 		kfree(tb);
5609 		return -EPROTO;
5610 	}
5611 
5612 	arg->mac_addr = ev->peer_macaddr.addr;
5613 
5614 	kfree(tb);
5615 	return 0;
5616 }
5617 
5618 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5619 			       struct wmi_roam_event *roam_ev)
5620 {
5621 	const void **tb;
5622 	const struct wmi_roam_event *ev;
5623 	int ret;
5624 
5625 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5626 	if (IS_ERR(tb)) {
5627 		ret = PTR_ERR(tb);
5628 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5629 		return ret;
5630 	}
5631 
5632 	ev = tb[WMI_TAG_ROAM_EVENT];
5633 	if (!ev) {
5634 		ath12k_warn(ab, "failed to fetch roam ev");
5635 		kfree(tb);
5636 		return -EPROTO;
5637 	}
5638 
5639 	roam_ev->vdev_id = ev->vdev_id;
5640 	roam_ev->reason = ev->reason;
5641 	roam_ev->rssi = ev->rssi;
5642 
5643 	kfree(tb);
5644 	return 0;
5645 }
5646 
5647 static int freq_to_idx(struct ath12k *ar, int freq)
5648 {
5649 	struct ieee80211_supported_band *sband;
5650 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5651 	int band, ch, idx = 0;
5652 
5653 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5654 		if (!ar->mac.sbands[band].channels)
5655 			continue;
5656 
5657 		sband = hw->wiphy->bands[band];
5658 		if (!sband)
5659 			continue;
5660 
5661 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
5662 			if (sband->channels[ch].center_freq == freq)
5663 				goto exit;
5664 	}
5665 
5666 exit:
5667 	return idx;
5668 }
5669 
5670 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5671 				    struct wmi_chan_info_event *ch_info_ev)
5672 {
5673 	const void **tb;
5674 	const struct wmi_chan_info_event *ev;
5675 	int ret;
5676 
5677 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5678 	if (IS_ERR(tb)) {
5679 		ret = PTR_ERR(tb);
5680 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5681 		return ret;
5682 	}
5683 
5684 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5685 	if (!ev) {
5686 		ath12k_warn(ab, "failed to fetch chan info ev");
5687 		kfree(tb);
5688 		return -EPROTO;
5689 	}
5690 
5691 	ch_info_ev->err_code = ev->err_code;
5692 	ch_info_ev->freq = ev->freq;
5693 	ch_info_ev->cmd_flags = ev->cmd_flags;
5694 	ch_info_ev->noise_floor = ev->noise_floor;
5695 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
5696 	ch_info_ev->cycle_count = ev->cycle_count;
5697 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5698 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5699 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
5700 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5701 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5702 	ch_info_ev->vdev_id = ev->vdev_id;
5703 
5704 	kfree(tb);
5705 	return 0;
5706 }
5707 
5708 static int
5709 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5710 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5711 {
5712 	const void **tb;
5713 	const struct wmi_pdev_bss_chan_info_event *ev;
5714 	int ret;
5715 
5716 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5717 	if (IS_ERR(tb)) {
5718 		ret = PTR_ERR(tb);
5719 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5720 		return ret;
5721 	}
5722 
5723 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5724 	if (!ev) {
5725 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5726 		kfree(tb);
5727 		return -EPROTO;
5728 	}
5729 
5730 	bss_ch_info_ev->pdev_id = ev->pdev_id;
5731 	bss_ch_info_ev->freq = ev->freq;
5732 	bss_ch_info_ev->noise_floor = ev->noise_floor;
5733 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5734 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5735 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5736 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5737 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5738 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5739 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5740 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5741 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5742 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5743 
5744 	kfree(tb);
5745 	return 0;
5746 }
5747 
5748 static int
5749 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5750 				      struct wmi_vdev_install_key_complete_arg *arg)
5751 {
5752 	const void **tb;
5753 	const struct wmi_vdev_install_key_compl_event *ev;
5754 	int ret;
5755 
5756 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5757 	if (IS_ERR(tb)) {
5758 		ret = PTR_ERR(tb);
5759 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5760 		return ret;
5761 	}
5762 
5763 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5764 	if (!ev) {
5765 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5766 		kfree(tb);
5767 		return -EPROTO;
5768 	}
5769 
5770 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
5771 	arg->macaddr = ev->peer_macaddr.addr;
5772 	arg->key_idx = le32_to_cpu(ev->key_idx);
5773 	arg->key_flags = le32_to_cpu(ev->key_flags);
5774 	arg->status = le32_to_cpu(ev->status);
5775 
5776 	kfree(tb);
5777 	return 0;
5778 }
5779 
5780 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
5781 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5782 {
5783 	const void **tb;
5784 	const struct wmi_peer_assoc_conf_event *ev;
5785 	int ret;
5786 
5787 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5788 	if (IS_ERR(tb)) {
5789 		ret = PTR_ERR(tb);
5790 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5791 		return ret;
5792 	}
5793 
5794 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5795 	if (!ev) {
5796 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
5797 		kfree(tb);
5798 		return -EPROTO;
5799 	}
5800 
5801 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
5802 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5803 
5804 	kfree(tb);
5805 	return 0;
5806 }
5807 
5808 static int
5809 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5810 			 const struct wmi_pdev_temperature_event *ev)
5811 {
5812 	const void **tb;
5813 	int ret;
5814 
5815 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5816 	if (IS_ERR(tb)) {
5817 		ret = PTR_ERR(tb);
5818 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5819 		return ret;
5820 	}
5821 
5822 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
5823 	if (!ev) {
5824 		ath12k_warn(ab, "failed to fetch pdev temp ev");
5825 		kfree(tb);
5826 		return -EPROTO;
5827 	}
5828 
5829 	kfree(tb);
5830 	return 0;
5831 }
5832 
5833 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
5834 {
5835 	/* try to send pending beacons first. they take priority */
5836 	wake_up(&ab->wmi_ab.tx_credits_wq);
5837 }
5838 
5839 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
5840 				       struct sk_buff *skb)
5841 {
5842 	dev_kfree_skb(skb);
5843 }
5844 
5845 static bool ath12k_reg_is_world_alpha(char *alpha)
5846 {
5847 	if (alpha[0] == '0' && alpha[1] == '0')
5848 		return true;
5849 
5850 	if (alpha[0] == 'n' && alpha[1] == 'a')
5851 		return true;
5852 
5853 	return false;
5854 }
5855 
5856 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
5857 {
5858 	struct ath12k_reg_info *reg_info = NULL;
5859 	struct ieee80211_regdomain *regd = NULL;
5860 	bool intersect = false;
5861 	int ret = 0, pdev_idx, i, j;
5862 	struct ath12k *ar;
5863 
5864 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
5865 	if (!reg_info) {
5866 		ret = -ENOMEM;
5867 		goto fallback;
5868 	}
5869 
5870 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
5871 
5872 	if (ret) {
5873 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
5874 		goto fallback;
5875 	}
5876 
5877 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
5878 		/* In case of failure to set the requested ctry,
5879 		 * fw retains the current regd. We print a failure info
5880 		 * and return from here.
5881 		 */
5882 		ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
5883 		goto mem_free;
5884 	}
5885 
5886 	pdev_idx = reg_info->phy_id;
5887 
5888 	if (pdev_idx >= ab->num_radios) {
5889 		/* Process the event for phy0 only if single_pdev_only
5890 		 * is true. If pdev_idx is valid but not 0, discard the
5891 		 * event. Otherwise, it goes to fallback.
5892 		 */
5893 		if (ab->hw_params->single_pdev_only &&
5894 		    pdev_idx < ab->hw_params->num_rxdma_per_pdev)
5895 			goto mem_free;
5896 		else
5897 			goto fallback;
5898 	}
5899 
5900 	/* Avoid multiple overwrites to default regd, during core
5901 	 * stop-start after mac registration.
5902 	 */
5903 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
5904 	    !memcmp(ab->default_regd[pdev_idx]->alpha2,
5905 		    reg_info->alpha2, 2))
5906 		goto mem_free;
5907 
5908 	/* Intersect new rules with default regd if a new country setting was
5909 	 * requested, i.e a default regd was already set during initialization
5910 	 * and the regd coming from this event has a valid country info.
5911 	 */
5912 	if (ab->default_regd[pdev_idx] &&
5913 	    !ath12k_reg_is_world_alpha((char *)
5914 		ab->default_regd[pdev_idx]->alpha2) &&
5915 	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
5916 		intersect = true;
5917 
5918 	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
5919 	if (!regd) {
5920 		ath12k_warn(ab, "failed to build regd from reg_info\n");
5921 		goto fallback;
5922 	}
5923 
5924 	spin_lock(&ab->base_lock);
5925 	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
5926 		/* Once mac is registered, ar is valid and all CC events from
5927 		 * fw is considered to be received due to user requests
5928 		 * currently.
5929 		 * Free previously built regd before assigning the newly
5930 		 * generated regd to ar. NULL pointer handling will be
5931 		 * taken care by kfree itself.
5932 		 */
5933 		ar = ab->pdevs[pdev_idx].ar;
5934 		kfree(ab->new_regd[pdev_idx]);
5935 		ab->new_regd[pdev_idx] = regd;
5936 		queue_work(ab->workqueue, &ar->regd_update_work);
5937 	} else {
5938 		/* Multiple events for the same *ar is not expected. But we
5939 		 * can still clear any previously stored default_regd if we
5940 		 * are receiving this event for the same radio by mistake.
5941 		 * NULL pointer handling will be taken care by kfree itself.
5942 		 */
5943 		kfree(ab->default_regd[pdev_idx]);
5944 		/* This regd would be applied during mac registration */
5945 		ab->default_regd[pdev_idx] = regd;
5946 	}
5947 	ab->dfs_region = reg_info->dfs_region;
5948 	spin_unlock(&ab->base_lock);
5949 
5950 	goto mem_free;
5951 
5952 fallback:
5953 	/* Fallback to older reg (by sending previous country setting
5954 	 * again if fw has succeeded and we failed to process here.
5955 	 * The Regdomain should be uniform across driver and fw. Since the
5956 	 * FW has processed the command and sent a success status, we expect
5957 	 * this function to succeed as well. If it doesn't, CTRY needs to be
5958 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5959 	 */
5960 	/* TODO: This is rare, but still should also be handled */
5961 	WARN_ON(1);
5962 mem_free:
5963 	if (reg_info) {
5964 		kfree(reg_info->reg_rules_2g_ptr);
5965 		kfree(reg_info->reg_rules_5g_ptr);
5966 		if (reg_info->is_ext_reg_event) {
5967 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
5968 				kfree(reg_info->reg_rules_6g_ap_ptr[i]);
5969 
5970 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
5971 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
5972 					kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
5973 		}
5974 		kfree(reg_info);
5975 	}
5976 	return ret;
5977 }
5978 
5979 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
5980 				const void *ptr, void *data)
5981 {
5982 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
5983 	struct wmi_ready_event fixed_param;
5984 	struct ath12k_wmi_mac_addr_params *addr_list;
5985 	struct ath12k_pdev *pdev;
5986 	u32 num_mac_addr;
5987 	int i;
5988 
5989 	switch (tag) {
5990 	case WMI_TAG_READY_EVENT:
5991 		memset(&fixed_param, 0, sizeof(fixed_param));
5992 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
5993 		       min_t(u16, sizeof(fixed_param), len));
5994 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
5995 		rdy_parse->num_extra_mac_addr =
5996 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
5997 
5998 		ether_addr_copy(ab->mac_addr,
5999 				fixed_param.ready_event_min.mac_addr.addr);
6000 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
6001 		ab->wmi_ready = true;
6002 		break;
6003 	case WMI_TAG_ARRAY_FIXED_STRUCT:
6004 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
6005 		num_mac_addr = rdy_parse->num_extra_mac_addr;
6006 
6007 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6008 			break;
6009 
6010 		for (i = 0; i < ab->num_radios; i++) {
6011 			pdev = &ab->pdevs[i];
6012 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6013 		}
6014 		ab->pdevs_macaddr_valid = true;
6015 		break;
6016 	default:
6017 		break;
6018 	}
6019 
6020 	return 0;
6021 }
6022 
6023 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
6024 {
6025 	struct ath12k_wmi_rdy_parse rdy_parse = { };
6026 	int ret;
6027 
6028 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6029 				  ath12k_wmi_rdy_parse, &rdy_parse);
6030 	if (ret) {
6031 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
6032 		return ret;
6033 	}
6034 
6035 	complete(&ab->wmi_ab.unified_ready);
6036 	return 0;
6037 }
6038 
6039 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6040 {
6041 	struct wmi_peer_delete_resp_event peer_del_resp;
6042 	struct ath12k *ar;
6043 
6044 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6045 		ath12k_warn(ab, "failed to extract peer delete resp");
6046 		return;
6047 	}
6048 
6049 	rcu_read_lock();
6050 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
6051 	if (!ar) {
6052 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6053 			    peer_del_resp.vdev_id);
6054 		rcu_read_unlock();
6055 		return;
6056 	}
6057 
6058 	complete(&ar->peer_delete_done);
6059 	rcu_read_unlock();
6060 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6061 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6062 }
6063 
6064 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
6065 					  struct sk_buff *skb)
6066 {
6067 	struct ath12k *ar;
6068 	u32 vdev_id = 0;
6069 
6070 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6071 		ath12k_warn(ab, "failed to extract vdev delete resp");
6072 		return;
6073 	}
6074 
6075 	rcu_read_lock();
6076 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6077 	if (!ar) {
6078 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6079 			    vdev_id);
6080 		rcu_read_unlock();
6081 		return;
6082 	}
6083 
6084 	complete(&ar->vdev_delete_done);
6085 
6086 	rcu_read_unlock();
6087 
6088 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6089 		   vdev_id);
6090 }
6091 
6092 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
6093 {
6094 	switch (vdev_resp_status) {
6095 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6096 		return "invalid vdev id";
6097 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6098 		return "not supported";
6099 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6100 		return "dfs violation";
6101 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6102 		return "invalid regdomain";
6103 	default:
6104 		return "unknown";
6105 	}
6106 }
6107 
6108 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6109 {
6110 	struct wmi_vdev_start_resp_event vdev_start_resp;
6111 	struct ath12k *ar;
6112 	u32 status;
6113 
6114 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6115 		ath12k_warn(ab, "failed to extract vdev start resp");
6116 		return;
6117 	}
6118 
6119 	rcu_read_lock();
6120 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
6121 	if (!ar) {
6122 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6123 			    vdev_start_resp.vdev_id);
6124 		rcu_read_unlock();
6125 		return;
6126 	}
6127 
6128 	ar->last_wmi_vdev_start_status = 0;
6129 
6130 	status = le32_to_cpu(vdev_start_resp.status);
6131 
6132 	if (WARN_ON_ONCE(status)) {
6133 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6134 			    status, ath12k_wmi_vdev_resp_print(status));
6135 		ar->last_wmi_vdev_start_status = status;
6136 	}
6137 
6138 	complete(&ar->vdev_setup_done);
6139 
6140 	rcu_read_unlock();
6141 
6142 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6143 		   vdev_start_resp.vdev_id);
6144 }
6145 
6146 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6147 {
6148 	u32 vdev_id, tx_status;
6149 
6150 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6151 		ath12k_warn(ab, "failed to extract bcn tx status");
6152 		return;
6153 	}
6154 }
6155 
6156 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6157 {
6158 	struct ath12k *ar;
6159 	u32 vdev_id = 0;
6160 
6161 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6162 		ath12k_warn(ab, "failed to extract vdev stopped event");
6163 		return;
6164 	}
6165 
6166 	rcu_read_lock();
6167 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6168 	if (!ar) {
6169 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6170 			    vdev_id);
6171 		rcu_read_unlock();
6172 		return;
6173 	}
6174 
6175 	complete(&ar->vdev_setup_done);
6176 
6177 	rcu_read_unlock();
6178 
6179 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6180 }
6181 
6182 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6183 {
6184 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6185 	struct ath12k *ar;
6186 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6187 	struct ieee80211_hdr *hdr;
6188 	u16 fc;
6189 	struct ieee80211_supported_band *sband;
6190 
6191 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6192 		ath12k_warn(ab, "failed to extract mgmt rx event");
6193 		dev_kfree_skb(skb);
6194 		return;
6195 	}
6196 
6197 	memset(status, 0, sizeof(*status));
6198 
6199 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
6200 		   rx_ev.status);
6201 
6202 	rcu_read_lock();
6203 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
6204 
6205 	if (!ar) {
6206 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
6207 			    rx_ev.pdev_id);
6208 		dev_kfree_skb(skb);
6209 		goto exit;
6210 	}
6211 
6212 	if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
6213 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
6214 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
6215 			     WMI_RX_STATUS_ERR_CRC))) {
6216 		dev_kfree_skb(skb);
6217 		goto exit;
6218 	}
6219 
6220 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
6221 		status->flag |= RX_FLAG_MMIC_ERROR;
6222 
6223 	if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
6224 	    rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
6225 		status->band = NL80211_BAND_6GHZ;
6226 		status->freq = rx_ev.chan_freq;
6227 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
6228 		status->band = NL80211_BAND_2GHZ;
6229 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
6230 		status->band = NL80211_BAND_5GHZ;
6231 	} else {
6232 		/* Shouldn't happen unless list of advertised channels to
6233 		 * mac80211 has been changed.
6234 		 */
6235 		WARN_ON_ONCE(1);
6236 		dev_kfree_skb(skb);
6237 		goto exit;
6238 	}
6239 
6240 	if (rx_ev.phy_mode == MODE_11B &&
6241 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
6242 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6243 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
6244 
6245 	sband = &ar->mac.sbands[status->band];
6246 
6247 	if (status->band != NL80211_BAND_6GHZ)
6248 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
6249 							      status->band);
6250 
6251 	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
6252 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
6253 
6254 	hdr = (struct ieee80211_hdr *)skb->data;
6255 	fc = le16_to_cpu(hdr->frame_control);
6256 
6257 	/* Firmware is guaranteed to report all essential management frames via
6258 	 * WMI while it can deliver some extra via HTT. Since there can be
6259 	 * duplicates split the reporting wrt monitor/sniffing.
6260 	 */
6261 	status->flag |= RX_FLAG_SKIP_MONITOR;
6262 
6263 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
6264 	 * including group privacy action frames.
6265 	 */
6266 	if (ieee80211_has_protected(hdr->frame_control)) {
6267 		status->flag |= RX_FLAG_DECRYPTED;
6268 
6269 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
6270 			status->flag |= RX_FLAG_IV_STRIPPED |
6271 					RX_FLAG_MMIC_STRIPPED;
6272 			hdr->frame_control = __cpu_to_le16(fc &
6273 					     ~IEEE80211_FCTL_PROTECTED);
6274 		}
6275 	}
6276 
6277 	if (ieee80211_is_beacon(hdr->frame_control))
6278 		ath12k_mac_handle_beacon(ar, skb);
6279 
6280 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6281 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
6282 		   skb, skb->len,
6283 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
6284 
6285 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6286 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
6287 		   status->freq, status->band, status->signal,
6288 		   status->rate_idx);
6289 
6290 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
6291 
6292 exit:
6293 	rcu_read_unlock();
6294 }
6295 
6296 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
6297 {
6298 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
6299 	struct ath12k *ar;
6300 
6301 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
6302 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
6303 		return;
6304 	}
6305 
6306 	rcu_read_lock();
6307 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
6308 	if (!ar) {
6309 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
6310 			    tx_compl_param.pdev_id);
6311 		goto exit;
6312 	}
6313 
6314 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
6315 				 le32_to_cpu(tx_compl_param.status));
6316 
6317 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6318 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6319 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
6320 		   tx_compl_param.status);
6321 
6322 exit:
6323 	rcu_read_unlock();
6324 }
6325 
6326 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
6327 						  u32 vdev_id,
6328 						  enum ath12k_scan_state state)
6329 {
6330 	int i;
6331 	struct ath12k_pdev *pdev;
6332 	struct ath12k *ar;
6333 
6334 	for (i = 0; i < ab->num_radios; i++) {
6335 		pdev = rcu_dereference(ab->pdevs_active[i]);
6336 		if (pdev && pdev->ar) {
6337 			ar = pdev->ar;
6338 
6339 			spin_lock_bh(&ar->data_lock);
6340 			if (ar->scan.state == state &&
6341 			    ar->scan.vdev_id == vdev_id) {
6342 				spin_unlock_bh(&ar->data_lock);
6343 				return ar;
6344 			}
6345 			spin_unlock_bh(&ar->data_lock);
6346 		}
6347 	}
6348 	return NULL;
6349 }
6350 
6351 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
6352 {
6353 	struct ath12k *ar;
6354 	struct wmi_scan_event scan_ev = {0};
6355 
6356 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
6357 		ath12k_warn(ab, "failed to extract scan event");
6358 		return;
6359 	}
6360 
6361 	rcu_read_lock();
6362 
6363 	/* In case the scan was cancelled, ex. during interface teardown,
6364 	 * the interface will not be found in active interfaces.
6365 	 * Rather, in such scenarios, iterate over the active pdev's to
6366 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6367 	 * aborting scan's vdev id matches this event info.
6368 	 */
6369 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
6370 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
6371 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6372 						 ATH12K_SCAN_ABORTING);
6373 		if (!ar)
6374 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6375 							 ATH12K_SCAN_RUNNING);
6376 	} else {
6377 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
6378 	}
6379 
6380 	if (!ar) {
6381 		ath12k_warn(ab, "Received scan event for unknown vdev");
6382 		rcu_read_unlock();
6383 		return;
6384 	}
6385 
6386 	spin_lock_bh(&ar->data_lock);
6387 
6388 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6389 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
6390 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
6391 						  le32_to_cpu(scan_ev.reason)),
6392 		   le32_to_cpu(scan_ev.event_type),
6393 		   le32_to_cpu(scan_ev.reason),
6394 		   le32_to_cpu(scan_ev.channel_freq),
6395 		   le32_to_cpu(scan_ev.scan_req_id),
6396 		   le32_to_cpu(scan_ev.scan_id),
6397 		   le32_to_cpu(scan_ev.vdev_id),
6398 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
6399 
6400 	switch (le32_to_cpu(scan_ev.event_type)) {
6401 	case WMI_SCAN_EVENT_STARTED:
6402 		ath12k_wmi_event_scan_started(ar);
6403 		break;
6404 	case WMI_SCAN_EVENT_COMPLETED:
6405 		ath12k_wmi_event_scan_completed(ar);
6406 		break;
6407 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6408 		ath12k_wmi_event_scan_bss_chan(ar);
6409 		break;
6410 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6411 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
6412 		break;
6413 	case WMI_SCAN_EVENT_START_FAILED:
6414 		ath12k_warn(ab, "received scan start failure event\n");
6415 		ath12k_wmi_event_scan_start_failed(ar);
6416 		break;
6417 	case WMI_SCAN_EVENT_DEQUEUED:
6418 		__ath12k_mac_scan_finish(ar);
6419 		break;
6420 	case WMI_SCAN_EVENT_PREEMPTED:
6421 	case WMI_SCAN_EVENT_RESTARTED:
6422 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6423 	default:
6424 		break;
6425 	}
6426 
6427 	spin_unlock_bh(&ar->data_lock);
6428 
6429 	rcu_read_unlock();
6430 }
6431 
6432 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6433 {
6434 	struct wmi_peer_sta_kickout_arg arg = {};
6435 	struct ieee80211_sta *sta;
6436 	struct ath12k_peer *peer;
6437 	struct ath12k *ar;
6438 
6439 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6440 		ath12k_warn(ab, "failed to extract peer sta kickout event");
6441 		return;
6442 	}
6443 
6444 	rcu_read_lock();
6445 
6446 	spin_lock_bh(&ab->base_lock);
6447 
6448 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6449 
6450 	if (!peer) {
6451 		ath12k_warn(ab, "peer not found %pM\n",
6452 			    arg.mac_addr);
6453 		goto exit;
6454 	}
6455 
6456 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6457 	if (!ar) {
6458 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6459 			    peer->vdev_id);
6460 		goto exit;
6461 	}
6462 
6463 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
6464 					   arg.mac_addr, NULL);
6465 	if (!sta) {
6466 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6467 			    arg.mac_addr);
6468 		goto exit;
6469 	}
6470 
6471 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6472 		   arg.mac_addr);
6473 
6474 	ieee80211_report_low_ack(sta, 10);
6475 
6476 exit:
6477 	spin_unlock_bh(&ab->base_lock);
6478 	rcu_read_unlock();
6479 }
6480 
6481 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6482 {
6483 	struct wmi_roam_event roam_ev = {};
6484 	struct ath12k *ar;
6485 	u32 vdev_id;
6486 	u8 roam_reason;
6487 
6488 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6489 		ath12k_warn(ab, "failed to extract roam event");
6490 		return;
6491 	}
6492 
6493 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
6494 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
6495 				   WMI_ROAM_REASON_MASK);
6496 
6497 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6498 		   "wmi roam event vdev %u reason %d rssi %d\n",
6499 		   vdev_id, roam_reason, roam_ev.rssi);
6500 
6501 	rcu_read_lock();
6502 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6503 	if (!ar) {
6504 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
6505 		rcu_read_unlock();
6506 		return;
6507 	}
6508 
6509 	if (roam_reason >= WMI_ROAM_REASON_MAX)
6510 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6511 			    roam_reason, vdev_id);
6512 
6513 	switch (roam_reason) {
6514 	case WMI_ROAM_REASON_BEACON_MISS:
6515 		ath12k_mac_handle_beacon_miss(ar, vdev_id);
6516 		break;
6517 	case WMI_ROAM_REASON_BETTER_AP:
6518 	case WMI_ROAM_REASON_LOW_RSSI:
6519 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6520 	case WMI_ROAM_REASON_HO_FAILED:
6521 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6522 			    roam_reason, vdev_id);
6523 		break;
6524 	}
6525 
6526 	rcu_read_unlock();
6527 }
6528 
6529 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6530 {
6531 	struct wmi_chan_info_event ch_info_ev = {0};
6532 	struct ath12k *ar;
6533 	struct survey_info *survey;
6534 	int idx;
6535 	/* HW channel counters frequency value in hertz */
6536 	u32 cc_freq_hz = ab->cc_freq_hz;
6537 
6538 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
6539 		ath12k_warn(ab, "failed to extract chan info event");
6540 		return;
6541 	}
6542 
6543 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6544 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6545 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6546 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6547 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6548 		   ch_info_ev.mac_clk_mhz);
6549 
6550 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6551 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6552 		return;
6553 	}
6554 
6555 	rcu_read_lock();
6556 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6557 	if (!ar) {
6558 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6559 			    ch_info_ev.vdev_id);
6560 		rcu_read_unlock();
6561 		return;
6562 	}
6563 	spin_lock_bh(&ar->data_lock);
6564 
6565 	switch (ar->scan.state) {
6566 	case ATH12K_SCAN_IDLE:
6567 	case ATH12K_SCAN_STARTING:
6568 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6569 		goto exit;
6570 	case ATH12K_SCAN_RUNNING:
6571 	case ATH12K_SCAN_ABORTING:
6572 		break;
6573 	}
6574 
6575 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6576 	if (idx >= ARRAY_SIZE(ar->survey)) {
6577 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6578 			    ch_info_ev.freq, idx);
6579 		goto exit;
6580 	}
6581 
6582 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
6583 	 * HW channel counters frequency value
6584 	 */
6585 	if (ch_info_ev.mac_clk_mhz)
6586 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6587 
6588 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6589 		survey = &ar->survey[idx];
6590 		memset(survey, 0, sizeof(*survey));
6591 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6592 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6593 				 SURVEY_INFO_TIME_BUSY;
6594 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6595 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6596 					    cc_freq_hz);
6597 	}
6598 exit:
6599 	spin_unlock_bh(&ar->data_lock);
6600 	rcu_read_unlock();
6601 }
6602 
6603 static void
6604 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6605 {
6606 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6607 	struct survey_info *survey;
6608 	struct ath12k *ar;
6609 	u32 cc_freq_hz = ab->cc_freq_hz;
6610 	u64 busy, total, tx, rx, rx_bss;
6611 	int idx;
6612 
6613 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6614 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
6615 		return;
6616 	}
6617 
6618 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6619 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6620 
6621 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6622 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6623 
6624 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6625 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6626 
6627 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6628 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6629 
6630 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6631 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6632 
6633 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6634 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6635 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6636 		   bss_ch_info_ev.noise_floor, busy, total,
6637 		   tx, rx, rx_bss);
6638 
6639 	rcu_read_lock();
6640 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6641 
6642 	if (!ar) {
6643 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6644 			    bss_ch_info_ev.pdev_id);
6645 		rcu_read_unlock();
6646 		return;
6647 	}
6648 
6649 	spin_lock_bh(&ar->data_lock);
6650 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6651 	if (idx >= ARRAY_SIZE(ar->survey)) {
6652 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6653 			    bss_ch_info_ev.freq, idx);
6654 		goto exit;
6655 	}
6656 
6657 	survey = &ar->survey[idx];
6658 
6659 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
6660 	survey->time      = div_u64(total, cc_freq_hz);
6661 	survey->time_busy = div_u64(busy, cc_freq_hz);
6662 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
6663 	survey->time_tx   = div_u64(tx, cc_freq_hz);
6664 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
6665 			     SURVEY_INFO_TIME |
6666 			     SURVEY_INFO_TIME_BUSY |
6667 			     SURVEY_INFO_TIME_RX |
6668 			     SURVEY_INFO_TIME_TX);
6669 exit:
6670 	spin_unlock_bh(&ar->data_lock);
6671 	complete(&ar->bss_survey_done);
6672 
6673 	rcu_read_unlock();
6674 }
6675 
6676 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6677 						struct sk_buff *skb)
6678 {
6679 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6680 	struct ath12k *ar;
6681 
6682 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6683 		ath12k_warn(ab, "failed to extract install key compl event");
6684 		return;
6685 	}
6686 
6687 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6688 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6689 		   install_key_compl.key_idx, install_key_compl.key_flags,
6690 		   install_key_compl.macaddr, install_key_compl.status);
6691 
6692 	rcu_read_lock();
6693 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6694 	if (!ar) {
6695 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6696 			    install_key_compl.vdev_id);
6697 		rcu_read_unlock();
6698 		return;
6699 	}
6700 
6701 	ar->install_key_status = 0;
6702 
6703 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6704 		ath12k_warn(ab, "install key failed for %pM status %d\n",
6705 			    install_key_compl.macaddr, install_key_compl.status);
6706 		ar->install_key_status = install_key_compl.status;
6707 	}
6708 
6709 	complete(&ar->install_key_done);
6710 	rcu_read_unlock();
6711 }
6712 
6713 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6714 					  u16 tag, u16 len,
6715 					  const void *ptr,
6716 					  void *data)
6717 {
6718 	const struct wmi_service_available_event *ev;
6719 	u32 *wmi_ext2_service_bitmap;
6720 	int i, j;
6721 	u16 expected_len;
6722 
6723 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6724 	if (len < expected_len) {
6725 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
6726 			    len, tag);
6727 		return -EINVAL;
6728 	}
6729 
6730 	switch (tag) {
6731 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
6732 		ev = (struct wmi_service_available_event *)ptr;
6733 		for (i = 0, j = WMI_MAX_SERVICE;
6734 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6735 		     i++) {
6736 			do {
6737 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6738 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6739 					set_bit(j, ab->wmi_ab.svc_map);
6740 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6741 		}
6742 
6743 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6744 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
6745 			   ev->wmi_service_segment_bitmap[0],
6746 			   ev->wmi_service_segment_bitmap[1],
6747 			   ev->wmi_service_segment_bitmap[2],
6748 			   ev->wmi_service_segment_bitmap[3]);
6749 		break;
6750 	case WMI_TAG_ARRAY_UINT32:
6751 		wmi_ext2_service_bitmap = (u32 *)ptr;
6752 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
6753 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
6754 		     i++) {
6755 			do {
6756 				if (wmi_ext2_service_bitmap[i] &
6757 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6758 					set_bit(j, ab->wmi_ab.svc_map);
6759 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6760 		}
6761 
6762 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6763 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
6764 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
6765 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
6766 		break;
6767 	}
6768 	return 0;
6769 }
6770 
6771 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
6772 {
6773 	int ret;
6774 
6775 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6776 				  ath12k_wmi_tlv_services_parser,
6777 				  NULL);
6778 	return ret;
6779 }
6780 
6781 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
6782 {
6783 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
6784 	struct ath12k *ar;
6785 
6786 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
6787 		ath12k_warn(ab, "failed to extract peer assoc conf event");
6788 		return;
6789 	}
6790 
6791 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6792 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
6793 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
6794 
6795 	rcu_read_lock();
6796 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
6797 
6798 	if (!ar) {
6799 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
6800 			    peer_assoc_conf.vdev_id);
6801 		rcu_read_unlock();
6802 		return;
6803 	}
6804 
6805 	complete(&ar->peer_assoc_done);
6806 	rcu_read_unlock();
6807 }
6808 
6809 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
6810 {
6811 }
6812 
6813 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6814  * is not part of BDF CTL(Conformance test limits) table entries.
6815  */
6816 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
6817 						 struct sk_buff *skb)
6818 {
6819 	const void **tb;
6820 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
6821 	int ret;
6822 
6823 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6824 	if (IS_ERR(tb)) {
6825 		ret = PTR_ERR(tb);
6826 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6827 		return;
6828 	}
6829 
6830 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
6831 	if (!ev) {
6832 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
6833 		kfree(tb);
6834 		return;
6835 	}
6836 
6837 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6838 		   "pdev ctl failsafe check ev status %d\n",
6839 		   ev->ctl_failsafe_status);
6840 
6841 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6842 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6843 	 */
6844 	if (ev->ctl_failsafe_status != 0)
6845 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
6846 			    ev->ctl_failsafe_status);
6847 
6848 	kfree(tb);
6849 }
6850 
6851 static void
6852 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
6853 					  const struct ath12k_wmi_pdev_csa_event *ev,
6854 					  const u32 *vdev_ids)
6855 {
6856 	int i;
6857 	struct ieee80211_bss_conf *conf;
6858 	struct ath12k_link_vif *arvif;
6859 	struct ath12k_vif *ahvif;
6860 
6861 	/* Finish CSA once the switch count becomes NULL */
6862 	if (ev->current_switch_count)
6863 		return;
6864 
6865 	rcu_read_lock();
6866 	for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
6867 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
6868 
6869 		if (!arvif) {
6870 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
6871 				    vdev_ids[i]);
6872 			continue;
6873 		}
6874 		ahvif = arvif->ahvif;
6875 
6876 		if (arvif->link_id > IEEE80211_MLD_MAX_NUM_LINKS) {
6877 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
6878 				    arvif->link_id);
6879 			continue;
6880 		}
6881 
6882 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
6883 		if (!conf) {
6884 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
6885 				    ahvif->vif->addr, arvif->link_id);
6886 			continue;
6887 		}
6888 
6889 		if (arvif->is_up && conf->csa_active)
6890 			ieee80211_csa_finish(ahvif->vif, 0);
6891 	}
6892 	rcu_read_unlock();
6893 }
6894 
6895 static void
6896 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
6897 					      struct sk_buff *skb)
6898 {
6899 	const void **tb;
6900 	const struct ath12k_wmi_pdev_csa_event *ev;
6901 	const u32 *vdev_ids;
6902 	int ret;
6903 
6904 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6905 	if (IS_ERR(tb)) {
6906 		ret = PTR_ERR(tb);
6907 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6908 		return;
6909 	}
6910 
6911 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
6912 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
6913 
6914 	if (!ev || !vdev_ids) {
6915 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
6916 		kfree(tb);
6917 		return;
6918 	}
6919 
6920 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6921 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
6922 		   ev->current_switch_count, ev->pdev_id,
6923 		   ev->num_vdevs);
6924 
6925 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
6926 
6927 	kfree(tb);
6928 }
6929 
6930 static void
6931 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
6932 {
6933 	const void **tb;
6934 	const struct ath12k_wmi_pdev_radar_event *ev;
6935 	struct ath12k *ar;
6936 	int ret;
6937 
6938 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6939 	if (IS_ERR(tb)) {
6940 		ret = PTR_ERR(tb);
6941 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6942 		return;
6943 	}
6944 
6945 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
6946 
6947 	if (!ev) {
6948 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
6949 		kfree(tb);
6950 		return;
6951 	}
6952 
6953 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6954 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6955 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
6956 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
6957 		   ev->freq_offset, ev->sidx);
6958 
6959 	rcu_read_lock();
6960 
6961 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
6962 
6963 	if (!ar) {
6964 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
6965 			    ev->pdev_id);
6966 		goto exit;
6967 	}
6968 
6969 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
6970 		   ev->pdev_id);
6971 
6972 	if (ar->dfs_block_radar_events)
6973 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
6974 	else
6975 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
6976 
6977 exit:
6978 	rcu_read_unlock();
6979 
6980 	kfree(tb);
6981 }
6982 
6983 static void
6984 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
6985 				  struct sk_buff *skb)
6986 {
6987 	struct ath12k *ar;
6988 	struct wmi_pdev_temperature_event ev = {0};
6989 
6990 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
6991 		ath12k_warn(ab, "failed to extract pdev temperature event");
6992 		return;
6993 	}
6994 
6995 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6996 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
6997 
6998 	rcu_read_lock();
6999 
7000 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
7001 	if (!ar) {
7002 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
7003 		goto exit;
7004 	}
7005 
7006 exit:
7007 	rcu_read_unlock();
7008 }
7009 
7010 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
7011 					struct sk_buff *skb)
7012 {
7013 	const void **tb;
7014 	const struct wmi_fils_discovery_event *ev;
7015 	int ret;
7016 
7017 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7018 	if (IS_ERR(tb)) {
7019 		ret = PTR_ERR(tb);
7020 		ath12k_warn(ab,
7021 			    "failed to parse FILS discovery event tlv %d\n",
7022 			    ret);
7023 		return;
7024 	}
7025 
7026 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
7027 	if (!ev) {
7028 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
7029 		kfree(tb);
7030 		return;
7031 	}
7032 
7033 	ath12k_warn(ab,
7034 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
7035 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
7036 
7037 	kfree(tb);
7038 }
7039 
7040 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
7041 					      struct sk_buff *skb)
7042 {
7043 	const void **tb;
7044 	const struct wmi_probe_resp_tx_status_event *ev;
7045 	int ret;
7046 
7047 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7048 	if (IS_ERR(tb)) {
7049 		ret = PTR_ERR(tb);
7050 		ath12k_warn(ab,
7051 			    "failed to parse probe response transmission status event tlv: %d\n",
7052 			    ret);
7053 		return;
7054 	}
7055 
7056 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
7057 	if (!ev) {
7058 		ath12k_warn(ab,
7059 			    "failed to fetch probe response transmission status event");
7060 		kfree(tb);
7061 		return;
7062 	}
7063 
7064 	if (ev->tx_status)
7065 		ath12k_warn(ab,
7066 			    "Probe response transmission failed for vdev_id %u, status %u\n",
7067 			    ev->vdev_id, ev->tx_status);
7068 
7069 	kfree(tb);
7070 }
7071 
7072 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
7073 				    struct sk_buff *skb)
7074 {
7075 	const void **tb;
7076 	const struct wmi_p2p_noa_event *ev;
7077 	const struct ath12k_wmi_p2p_noa_info *noa;
7078 	struct ath12k *ar;
7079 	int ret, vdev_id;
7080 
7081 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7082 	if (IS_ERR(tb)) {
7083 		ret = PTR_ERR(tb);
7084 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
7085 		return ret;
7086 	}
7087 
7088 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
7089 	noa = tb[WMI_TAG_P2P_NOA_INFO];
7090 
7091 	if (!ev || !noa) {
7092 		ret = -EPROTO;
7093 		goto out;
7094 	}
7095 
7096 	vdev_id = __le32_to_cpu(ev->vdev_id);
7097 
7098 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7099 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
7100 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
7101 
7102 	rcu_read_lock();
7103 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
7104 	if (!ar) {
7105 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
7106 			    vdev_id);
7107 		ret = -EINVAL;
7108 		goto unlock;
7109 	}
7110 
7111 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
7112 
7113 	ret = 0;
7114 
7115 unlock:
7116 	rcu_read_unlock();
7117 out:
7118 	kfree(tb);
7119 	return ret;
7120 }
7121 
7122 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
7123 					     struct sk_buff *skb)
7124 {
7125 	const struct wmi_rfkill_state_change_event *ev;
7126 	const void **tb;
7127 	int ret;
7128 
7129 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7130 	if (IS_ERR(tb)) {
7131 		ret = PTR_ERR(tb);
7132 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7133 		return;
7134 	}
7135 
7136 	ev = tb[WMI_TAG_RFKILL_EVENT];
7137 	if (!ev) {
7138 		kfree(tb);
7139 		return;
7140 	}
7141 
7142 	ath12k_dbg(ab, ATH12K_DBG_MAC,
7143 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
7144 		   le32_to_cpu(ev->gpio_pin_num),
7145 		   le32_to_cpu(ev->int_type),
7146 		   le32_to_cpu(ev->radio_state));
7147 
7148 	spin_lock_bh(&ab->base_lock);
7149 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
7150 	spin_unlock_bh(&ab->base_lock);
7151 
7152 	queue_work(ab->workqueue, &ab->rfkill_work);
7153 	kfree(tb);
7154 }
7155 
7156 static void
7157 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
7158 {
7159 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
7160 }
7161 
7162 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
7163 					struct sk_buff *skb)
7164 {
7165 	const void **tb;
7166 	const struct wmi_twt_enable_event *ev;
7167 	int ret;
7168 
7169 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7170 	if (IS_ERR(tb)) {
7171 		ret = PTR_ERR(tb);
7172 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
7173 			    ret);
7174 		return;
7175 	}
7176 
7177 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
7178 	if (!ev) {
7179 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
7180 		goto exit;
7181 	}
7182 
7183 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
7184 		   le32_to_cpu(ev->pdev_id),
7185 		   le32_to_cpu(ev->status));
7186 
7187 exit:
7188 	kfree(tb);
7189 }
7190 
7191 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
7192 					 struct sk_buff *skb)
7193 {
7194 	const void **tb;
7195 	const struct wmi_twt_disable_event *ev;
7196 	int ret;
7197 
7198 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7199 	if (IS_ERR(tb)) {
7200 		ret = PTR_ERR(tb);
7201 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
7202 			    ret);
7203 		return;
7204 	}
7205 
7206 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
7207 	if (!ev) {
7208 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
7209 		goto exit;
7210 	}
7211 
7212 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
7213 		   le32_to_cpu(ev->pdev_id),
7214 		   le32_to_cpu(ev->status));
7215 
7216 exit:
7217 	kfree(tb);
7218 }
7219 
7220 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
7221 					    u16 tag, u16 len,
7222 					    const void *ptr, void *data)
7223 {
7224 	const struct wmi_wow_ev_pg_fault_param *pf_param;
7225 	const struct wmi_wow_ev_param *param;
7226 	struct wmi_wow_ev_arg *arg = data;
7227 	int pf_len;
7228 
7229 	switch (tag) {
7230 	case WMI_TAG_WOW_EVENT_INFO:
7231 		param = ptr;
7232 		arg->wake_reason = le32_to_cpu(param->wake_reason);
7233 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
7234 			   arg->wake_reason, wow_reason(arg->wake_reason));
7235 		break;
7236 
7237 	case WMI_TAG_ARRAY_BYTE:
7238 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
7239 			pf_param = ptr;
7240 			pf_len = le32_to_cpu(pf_param->len);
7241 			if (pf_len > len - sizeof(pf_len) ||
7242 			    pf_len < 0) {
7243 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
7244 					    pf_len);
7245 				return -EINVAL;
7246 			}
7247 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
7248 				   pf_len);
7249 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
7250 					"wow_reason_page_fault packet present",
7251 					"wow_pg_fault ",
7252 					pf_param->data,
7253 					pf_len);
7254 		}
7255 		break;
7256 	default:
7257 		break;
7258 	}
7259 
7260 	return 0;
7261 }
7262 
7263 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
7264 {
7265 	struct wmi_wow_ev_arg arg = { };
7266 	int ret;
7267 
7268 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7269 				  ath12k_wmi_wow_wakeup_host_parse,
7270 				  &arg);
7271 	if (ret) {
7272 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
7273 			    ret);
7274 		return;
7275 	}
7276 
7277 	complete(&ab->wow.wakeup_completed);
7278 }
7279 
7280 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
7281 						struct sk_buff *skb)
7282 {
7283 	const struct wmi_gtk_offload_status_event *ev;
7284 	struct ath12k_link_vif *arvif;
7285 	__be64 replay_ctr_be;
7286 	u64 replay_ctr;
7287 	const void **tb;
7288 	int ret;
7289 
7290 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7291 	if (IS_ERR(tb)) {
7292 		ret = PTR_ERR(tb);
7293 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7294 		return;
7295 	}
7296 
7297 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
7298 	if (!ev) {
7299 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
7300 		kfree(tb);
7301 		return;
7302 	}
7303 
7304 	rcu_read_lock();
7305 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
7306 	if (!arvif) {
7307 		rcu_read_unlock();
7308 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
7309 			    le32_to_cpu(ev->vdev_id));
7310 		kfree(tb);
7311 		return;
7312 	}
7313 
7314 	replay_ctr = le64_to_cpu(ev->replay_ctr);
7315 	arvif->rekey_data.replay_ctr = replay_ctr;
7316 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
7317 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
7318 
7319 	/* supplicant expects big-endian replay counter */
7320 	replay_ctr_be = cpu_to_be64(replay_ctr);
7321 
7322 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
7323 				   (void *)&replay_ctr_be, GFP_ATOMIC);
7324 
7325 	rcu_read_unlock();
7326 
7327 	kfree(tb);
7328 }
7329 
7330 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
7331 {
7332 	struct wmi_cmd_hdr *cmd_hdr;
7333 	enum wmi_tlv_event_id id;
7334 
7335 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
7336 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
7337 
7338 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
7339 		goto out;
7340 
7341 	switch (id) {
7342 		/* Process all the WMI events here */
7343 	case WMI_SERVICE_READY_EVENTID:
7344 		ath12k_service_ready_event(ab, skb);
7345 		break;
7346 	case WMI_SERVICE_READY_EXT_EVENTID:
7347 		ath12k_service_ready_ext_event(ab, skb);
7348 		break;
7349 	case WMI_SERVICE_READY_EXT2_EVENTID:
7350 		ath12k_service_ready_ext2_event(ab, skb);
7351 		break;
7352 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
7353 		ath12k_reg_chan_list_event(ab, skb);
7354 		break;
7355 	case WMI_READY_EVENTID:
7356 		ath12k_ready_event(ab, skb);
7357 		break;
7358 	case WMI_PEER_DELETE_RESP_EVENTID:
7359 		ath12k_peer_delete_resp_event(ab, skb);
7360 		break;
7361 	case WMI_VDEV_START_RESP_EVENTID:
7362 		ath12k_vdev_start_resp_event(ab, skb);
7363 		break;
7364 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
7365 		ath12k_bcn_tx_status_event(ab, skb);
7366 		break;
7367 	case WMI_VDEV_STOPPED_EVENTID:
7368 		ath12k_vdev_stopped_event(ab, skb);
7369 		break;
7370 	case WMI_MGMT_RX_EVENTID:
7371 		ath12k_mgmt_rx_event(ab, skb);
7372 		/* mgmt_rx_event() owns the skb now! */
7373 		return;
7374 	case WMI_MGMT_TX_COMPLETION_EVENTID:
7375 		ath12k_mgmt_tx_compl_event(ab, skb);
7376 		break;
7377 	case WMI_SCAN_EVENTID:
7378 		ath12k_scan_event(ab, skb);
7379 		break;
7380 	case WMI_PEER_STA_KICKOUT_EVENTID:
7381 		ath12k_peer_sta_kickout_event(ab, skb);
7382 		break;
7383 	case WMI_ROAM_EVENTID:
7384 		ath12k_roam_event(ab, skb);
7385 		break;
7386 	case WMI_CHAN_INFO_EVENTID:
7387 		ath12k_chan_info_event(ab, skb);
7388 		break;
7389 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
7390 		ath12k_pdev_bss_chan_info_event(ab, skb);
7391 		break;
7392 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
7393 		ath12k_vdev_install_key_compl_event(ab, skb);
7394 		break;
7395 	case WMI_SERVICE_AVAILABLE_EVENTID:
7396 		ath12k_service_available_event(ab, skb);
7397 		break;
7398 	case WMI_PEER_ASSOC_CONF_EVENTID:
7399 		ath12k_peer_assoc_conf_event(ab, skb);
7400 		break;
7401 	case WMI_UPDATE_STATS_EVENTID:
7402 		ath12k_update_stats_event(ab, skb);
7403 		break;
7404 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
7405 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
7406 		break;
7407 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
7408 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
7409 		break;
7410 	case WMI_PDEV_TEMPERATURE_EVENTID:
7411 		ath12k_wmi_pdev_temperature_event(ab, skb);
7412 		break;
7413 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
7414 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
7415 		break;
7416 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
7417 		ath12k_fils_discovery_event(ab, skb);
7418 		break;
7419 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
7420 		ath12k_probe_resp_tx_status_event(ab, skb);
7421 		break;
7422 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
7423 		ath12k_rfkill_state_change_event(ab, skb);
7424 		break;
7425 	case WMI_TWT_ENABLE_EVENTID:
7426 		ath12k_wmi_twt_enable_event(ab, skb);
7427 		break;
7428 	case WMI_TWT_DISABLE_EVENTID:
7429 		ath12k_wmi_twt_disable_event(ab, skb);
7430 		break;
7431 	case WMI_P2P_NOA_EVENTID:
7432 		ath12k_wmi_p2p_noa_event(ab, skb);
7433 		break;
7434 	/* add Unsupported events here */
7435 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
7436 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
7437 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
7438 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7439 			   "ignoring unsupported event 0x%x\n", id);
7440 		break;
7441 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
7442 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
7443 		break;
7444 	case WMI_VDEV_DELETE_RESP_EVENTID:
7445 		ath12k_vdev_delete_resp_event(ab, skb);
7446 		break;
7447 	case WMI_DIAG_EVENTID:
7448 		ath12k_wmi_diag_event(ab, skb);
7449 		break;
7450 	case WMI_WOW_WAKEUP_HOST_EVENTID:
7451 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
7452 		break;
7453 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
7454 		ath12k_wmi_gtk_offload_status_event(ab, skb);
7455 		break;
7456 	/* TODO: Add remaining events */
7457 	default:
7458 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
7459 		break;
7460 	}
7461 
7462 out:
7463 	dev_kfree_skb(skb);
7464 }
7465 
7466 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
7467 					   u32 pdev_idx)
7468 {
7469 	int status;
7470 	static const u32 svc_id[] = {
7471 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
7472 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
7473 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
7474 	};
7475 	struct ath12k_htc_svc_conn_req conn_req = {};
7476 	struct ath12k_htc_svc_conn_resp conn_resp = {};
7477 
7478 	/* these fields are the same for all service endpoints */
7479 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
7480 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
7481 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
7482 
7483 	/* connect to control service */
7484 	conn_req.service_id = svc_id[pdev_idx];
7485 
7486 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
7487 	if (status) {
7488 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
7489 			    status);
7490 		return status;
7491 	}
7492 
7493 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
7494 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
7495 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
7496 
7497 	return 0;
7498 }
7499 
7500 static int
7501 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
7502 			      struct wmi_unit_test_cmd ut_cmd,
7503 			      u32 *test_args)
7504 {
7505 	struct ath12k_wmi_pdev *wmi = ar->wmi;
7506 	struct wmi_unit_test_cmd *cmd;
7507 	struct sk_buff *skb;
7508 	struct wmi_tlv *tlv;
7509 	void *ptr;
7510 	u32 *ut_cmd_args;
7511 	int buf_len, arg_len;
7512 	int ret;
7513 	int i;
7514 
7515 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
7516 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
7517 
7518 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
7519 	if (!skb)
7520 		return -ENOMEM;
7521 
7522 	cmd = (struct wmi_unit_test_cmd *)skb->data;
7523 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
7524 						 sizeof(ut_cmd));
7525 
7526 	cmd->vdev_id = ut_cmd.vdev_id;
7527 	cmd->module_id = ut_cmd.module_id;
7528 	cmd->num_args = ut_cmd.num_args;
7529 	cmd->diag_token = ut_cmd.diag_token;
7530 
7531 	ptr = skb->data + sizeof(ut_cmd);
7532 
7533 	tlv = ptr;
7534 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
7535 
7536 	ptr += TLV_HDR_SIZE;
7537 
7538 	ut_cmd_args = ptr;
7539 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
7540 		ut_cmd_args[i] = test_args[i];
7541 
7542 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7543 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
7544 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
7545 		   cmd->diag_token);
7546 
7547 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
7548 
7549 	if (ret) {
7550 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
7551 			    ret);
7552 		dev_kfree_skb(skb);
7553 	}
7554 
7555 	return ret;
7556 }
7557 
7558 int ath12k_wmi_simulate_radar(struct ath12k *ar)
7559 {
7560 	struct ath12k_link_vif *arvif;
7561 	u32 dfs_args[DFS_MAX_TEST_ARGS];
7562 	struct wmi_unit_test_cmd wmi_ut;
7563 	bool arvif_found = false;
7564 
7565 	list_for_each_entry(arvif, &ar->arvifs, list) {
7566 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
7567 			arvif_found = true;
7568 			break;
7569 		}
7570 	}
7571 
7572 	if (!arvif_found)
7573 		return -EINVAL;
7574 
7575 	dfs_args[DFS_TEST_CMDID] = 0;
7576 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
7577 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
7578 	 * freq offset (b3 - b10) to unit test. For simulation
7579 	 * purpose this can be set to 0 which is valid.
7580 	 */
7581 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
7582 
7583 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
7584 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
7585 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
7586 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
7587 
7588 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
7589 
7590 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
7591 }
7592 
7593 int ath12k_wmi_connect(struct ath12k_base *ab)
7594 {
7595 	u32 i;
7596 	u8 wmi_ep_count;
7597 
7598 	wmi_ep_count = ab->htc.wmi_ep_count;
7599 	if (wmi_ep_count > ab->hw_params->max_radios)
7600 		return -1;
7601 
7602 	for (i = 0; i < wmi_ep_count; i++)
7603 		ath12k_connect_pdev_htc_service(ab, i);
7604 
7605 	return 0;
7606 }
7607 
7608 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
7609 {
7610 	if (WARN_ON(pdev_id >= MAX_RADIOS))
7611 		return;
7612 
7613 	/* TODO: Deinit any pdev specific wmi resource */
7614 }
7615 
7616 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
7617 			   u8 pdev_id)
7618 {
7619 	struct ath12k_wmi_pdev *wmi_handle;
7620 
7621 	if (pdev_id >= ab->hw_params->max_radios)
7622 		return -EINVAL;
7623 
7624 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
7625 
7626 	wmi_handle->wmi_ab = &ab->wmi_ab;
7627 
7628 	ab->wmi_ab.ab = ab;
7629 	/* TODO: Init remaining resource specific to pdev */
7630 
7631 	return 0;
7632 }
7633 
7634 int ath12k_wmi_attach(struct ath12k_base *ab)
7635 {
7636 	int ret;
7637 
7638 	ret = ath12k_wmi_pdev_attach(ab, 0);
7639 	if (ret)
7640 		return ret;
7641 
7642 	ab->wmi_ab.ab = ab;
7643 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
7644 
7645 	/* It's overwritten when service_ext_ready is handled */
7646 	if (ab->hw_params->single_pdev_only)
7647 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
7648 
7649 	/* TODO: Init remaining wmi soc resources required */
7650 	init_completion(&ab->wmi_ab.service_ready);
7651 	init_completion(&ab->wmi_ab.unified_ready);
7652 
7653 	return 0;
7654 }
7655 
7656 void ath12k_wmi_detach(struct ath12k_base *ab)
7657 {
7658 	int i;
7659 
7660 	/* TODO: Deinit wmi resource specific to SOC as required */
7661 
7662 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
7663 		ath12k_wmi_pdev_detach(ab, i);
7664 
7665 	ath12k_wmi_free_dbring_caps(ab);
7666 }
7667 
7668 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
7669 {
7670 	struct wmi_hw_data_filter_cmd *cmd;
7671 	struct sk_buff *skb;
7672 	int len;
7673 
7674 	len = sizeof(*cmd);
7675 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7676 
7677 	if (!skb)
7678 		return -ENOMEM;
7679 
7680 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
7681 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
7682 						 sizeof(*cmd));
7683 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
7684 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
7685 
7686 	/* Set all modes in case of disable */
7687 	if (arg->enable)
7688 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
7689 	else
7690 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
7691 
7692 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7693 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
7694 		   arg->enable, arg->hw_filter_bitmap);
7695 
7696 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
7697 }
7698 
7699 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
7700 {
7701 	struct wmi_wow_host_wakeup_cmd *cmd;
7702 	struct sk_buff *skb;
7703 	size_t len;
7704 
7705 	len = sizeof(*cmd);
7706 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7707 	if (!skb)
7708 		return -ENOMEM;
7709 
7710 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
7711 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
7712 						 sizeof(*cmd));
7713 
7714 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
7715 
7716 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
7717 }
7718 
7719 int ath12k_wmi_wow_enable(struct ath12k *ar)
7720 {
7721 	struct wmi_wow_enable_cmd *cmd;
7722 	struct sk_buff *skb;
7723 	int len;
7724 
7725 	len = sizeof(*cmd);
7726 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7727 	if (!skb)
7728 		return -ENOMEM;
7729 
7730 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
7731 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
7732 						 sizeof(*cmd));
7733 
7734 	cmd->enable = cpu_to_le32(1);
7735 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
7736 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
7737 
7738 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
7739 }
7740 
7741 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
7742 				    enum wmi_wow_wakeup_event event,
7743 				    u32 enable)
7744 {
7745 	struct wmi_wow_add_del_event_cmd *cmd;
7746 	struct sk_buff *skb;
7747 	size_t len;
7748 
7749 	len = sizeof(*cmd);
7750 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7751 	if (!skb)
7752 		return -ENOMEM;
7753 
7754 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
7755 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
7756 						 sizeof(*cmd));
7757 	cmd->vdev_id = cpu_to_le32(vdev_id);
7758 	cmd->is_add = cpu_to_le32(enable);
7759 	cmd->event_bitmap = cpu_to_le32((1 << event));
7760 
7761 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
7762 		   wow_wakeup_event(event), enable, vdev_id);
7763 
7764 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
7765 }
7766 
7767 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
7768 			       const u8 *pattern, const u8 *mask,
7769 			       int pattern_len, int pattern_offset)
7770 {
7771 	struct wmi_wow_add_pattern_cmd *cmd;
7772 	struct wmi_wow_bitmap_pattern_params *bitmap;
7773 	struct wmi_tlv *tlv;
7774 	struct sk_buff *skb;
7775 	void *ptr;
7776 	size_t len;
7777 
7778 	len = sizeof(*cmd) +
7779 	      sizeof(*tlv) +			/* array struct */
7780 	      sizeof(*bitmap) +			/* bitmap */
7781 	      sizeof(*tlv) +			/* empty ipv4 sync */
7782 	      sizeof(*tlv) +			/* empty ipv6 sync */
7783 	      sizeof(*tlv) +			/* empty magic */
7784 	      sizeof(*tlv) +			/* empty info timeout */
7785 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
7786 
7787 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7788 	if (!skb)
7789 		return -ENOMEM;
7790 
7791 	/* cmd */
7792 	ptr = skb->data;
7793 	cmd = ptr;
7794 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
7795 						 sizeof(*cmd));
7796 	cmd->vdev_id = cpu_to_le32(vdev_id);
7797 	cmd->pattern_id = cpu_to_le32(pattern_id);
7798 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
7799 
7800 	ptr += sizeof(*cmd);
7801 
7802 	/* bitmap */
7803 	tlv = ptr;
7804 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
7805 
7806 	ptr += sizeof(*tlv);
7807 
7808 	bitmap = ptr;
7809 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
7810 						    sizeof(*bitmap));
7811 	memcpy(bitmap->patternbuf, pattern, pattern_len);
7812 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
7813 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
7814 	bitmap->pattern_len = cpu_to_le32(pattern_len);
7815 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
7816 	bitmap->pattern_id = cpu_to_le32(pattern_id);
7817 
7818 	ptr += sizeof(*bitmap);
7819 
7820 	/* ipv4 sync */
7821 	tlv = ptr;
7822 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7823 
7824 	ptr += sizeof(*tlv);
7825 
7826 	/* ipv6 sync */
7827 	tlv = ptr;
7828 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7829 
7830 	ptr += sizeof(*tlv);
7831 
7832 	/* magic */
7833 	tlv = ptr;
7834 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7835 
7836 	ptr += sizeof(*tlv);
7837 
7838 	/* pattern info timeout */
7839 	tlv = ptr;
7840 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
7841 
7842 	ptr += sizeof(*tlv);
7843 
7844 	/* ratelimit interval */
7845 	tlv = ptr;
7846 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
7847 
7848 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
7849 		   vdev_id, pattern_id, pattern_offset, pattern_len);
7850 
7851 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
7852 			bitmap->patternbuf, pattern_len);
7853 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
7854 			bitmap->bitmaskbuf, pattern_len);
7855 
7856 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
7857 }
7858 
7859 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
7860 {
7861 	struct wmi_wow_del_pattern_cmd *cmd;
7862 	struct sk_buff *skb;
7863 	size_t len;
7864 
7865 	len = sizeof(*cmd);
7866 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7867 	if (!skb)
7868 		return -ENOMEM;
7869 
7870 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
7871 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
7872 						 sizeof(*cmd));
7873 	cmd->vdev_id = cpu_to_le32(vdev_id);
7874 	cmd->pattern_id = cpu_to_le32(pattern_id);
7875 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
7876 
7877 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
7878 		   vdev_id, pattern_id);
7879 
7880 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
7881 }
7882 
7883 static struct sk_buff *
7884 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
7885 				   struct wmi_pno_scan_req_arg *pno)
7886 {
7887 	struct nlo_configured_params *nlo_list;
7888 	size_t len, nlo_list_len, channel_list_len;
7889 	struct wmi_wow_nlo_config_cmd *cmd;
7890 	__le32 *channel_list;
7891 	struct wmi_tlv *tlv;
7892 	struct sk_buff *skb;
7893 	void *ptr;
7894 	u32 i;
7895 
7896 	len = sizeof(*cmd) +
7897 	      sizeof(*tlv) +
7898 	      /* TLV place holder for array of structures
7899 	       * nlo_configured_params(nlo_list)
7900 	       */
7901 	      sizeof(*tlv);
7902 	      /* TLV place holder for array of uint32 channel_list */
7903 
7904 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
7905 	len += channel_list_len;
7906 
7907 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
7908 	len += nlo_list_len;
7909 
7910 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7911 	if (!skb)
7912 		return ERR_PTR(-ENOMEM);
7913 
7914 	ptr = skb->data;
7915 	cmd = ptr;
7916 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
7917 
7918 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
7919 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
7920 
7921 	/* current FW does not support min-max range for dwell time */
7922 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
7923 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
7924 
7925 	if (pno->do_passive_scan)
7926 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
7927 
7928 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
7929 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
7930 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
7931 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
7932 
7933 	if (pno->enable_pno_scan_randomization) {
7934 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
7935 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
7936 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
7937 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
7938 	}
7939 
7940 	ptr += sizeof(*cmd);
7941 
7942 	/* nlo_configured_params(nlo_list) */
7943 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
7944 	tlv = ptr;
7945 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
7946 
7947 	ptr += sizeof(*tlv);
7948 	nlo_list = ptr;
7949 	for (i = 0; i < pno->uc_networks_count; i++) {
7950 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
7951 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
7952 						     sizeof(*nlo_list));
7953 
7954 		nlo_list[i].ssid.valid = cpu_to_le32(1);
7955 		nlo_list[i].ssid.ssid.ssid_len =
7956 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
7957 		memcpy(nlo_list[i].ssid.ssid.ssid,
7958 		       pno->a_networks[i].ssid.ssid,
7959 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
7960 
7961 		if (pno->a_networks[i].rssi_threshold &&
7962 		    pno->a_networks[i].rssi_threshold > -300) {
7963 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
7964 			nlo_list[i].rssi_cond.rssi =
7965 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
7966 		}
7967 
7968 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
7969 		nlo_list[i].bcast_nw_type.bcast_nw_type =
7970 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
7971 	}
7972 
7973 	ptr += nlo_list_len;
7974 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
7975 	tlv = ptr;
7976 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
7977 	ptr += sizeof(*tlv);
7978 	channel_list = ptr;
7979 
7980 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
7981 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
7982 
7983 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
7984 		   vdev_id);
7985 
7986 	return skb;
7987 }
7988 
7989 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
7990 							 u32 vdev_id)
7991 {
7992 	struct wmi_wow_nlo_config_cmd *cmd;
7993 	struct sk_buff *skb;
7994 	size_t len;
7995 
7996 	len = sizeof(*cmd);
7997 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7998 	if (!skb)
7999 		return ERR_PTR(-ENOMEM);
8000 
8001 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
8002 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
8003 
8004 	cmd->vdev_id = cpu_to_le32(vdev_id);
8005 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
8006 
8007 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8008 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
8009 	return skb;
8010 }
8011 
8012 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
8013 			      struct wmi_pno_scan_req_arg  *pno_scan)
8014 {
8015 	struct sk_buff *skb;
8016 
8017 	if (pno_scan->enable)
8018 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
8019 	else
8020 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
8021 
8022 	if (IS_ERR_OR_NULL(skb))
8023 		return -ENOMEM;
8024 
8025 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
8026 }
8027 
8028 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
8029 				       struct wmi_arp_ns_offload_arg *offload,
8030 				       void **ptr,
8031 				       bool enable,
8032 				       bool ext)
8033 {
8034 	struct wmi_ns_offload_params *ns;
8035 	struct wmi_tlv *tlv;
8036 	void *buf_ptr = *ptr;
8037 	u32 ns_cnt, ns_ext_tuples;
8038 	int i, max_offloads;
8039 
8040 	ns_cnt = offload->ipv6_count;
8041 
8042 	tlv  = buf_ptr;
8043 
8044 	if (ext) {
8045 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
8046 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
8047 						 ns_ext_tuples * sizeof(*ns));
8048 		i = WMI_MAX_NS_OFFLOADS;
8049 		max_offloads = offload->ipv6_count;
8050 	} else {
8051 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
8052 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
8053 		i = 0;
8054 		max_offloads = WMI_MAX_NS_OFFLOADS;
8055 	}
8056 
8057 	buf_ptr += sizeof(*tlv);
8058 
8059 	for (; i < max_offloads; i++) {
8060 		ns = buf_ptr;
8061 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
8062 							sizeof(*ns));
8063 
8064 		if (enable) {
8065 			if (i < ns_cnt)
8066 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
8067 
8068 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
8069 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
8070 
8071 			if (offload->ipv6_type[i])
8072 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
8073 
8074 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
8075 
8076 			if (!is_zero_ether_addr(ns->target_mac.addr))
8077 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
8078 
8079 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8080 				   "wmi index %d ns_solicited %pI6 target %pI6",
8081 				   i, ns->solicitation_ipaddr,
8082 				   ns->target_ipaddr[0]);
8083 		}
8084 
8085 		buf_ptr += sizeof(*ns);
8086 	}
8087 
8088 	*ptr = buf_ptr;
8089 }
8090 
8091 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
8092 					struct wmi_arp_ns_offload_arg *offload,
8093 					void **ptr,
8094 					bool enable)
8095 {
8096 	struct wmi_arp_offload_params *arp;
8097 	struct wmi_tlv *tlv;
8098 	void *buf_ptr = *ptr;
8099 	int i;
8100 
8101 	/* fill arp tuple */
8102 	tlv = buf_ptr;
8103 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
8104 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
8105 	buf_ptr += sizeof(*tlv);
8106 
8107 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
8108 		arp = buf_ptr;
8109 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
8110 							 sizeof(*arp));
8111 
8112 		if (enable && i < offload->ipv4_count) {
8113 			/* Copy the target ip addr and flags */
8114 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
8115 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
8116 
8117 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
8118 				   arp->target_ipaddr);
8119 		}
8120 
8121 		buf_ptr += sizeof(*arp);
8122 	}
8123 
8124 	*ptr = buf_ptr;
8125 }
8126 
8127 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
8128 			      struct ath12k_link_vif *arvif,
8129 			      struct wmi_arp_ns_offload_arg *offload,
8130 			      bool enable)
8131 {
8132 	struct wmi_set_arp_ns_offload_cmd *cmd;
8133 	struct wmi_tlv *tlv;
8134 	struct sk_buff *skb;
8135 	void *buf_ptr;
8136 	size_t len;
8137 	u8 ns_cnt, ns_ext_tuples = 0;
8138 
8139 	ns_cnt = offload->ipv6_count;
8140 
8141 	len = sizeof(*cmd) +
8142 	      sizeof(*tlv) +
8143 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
8144 	      sizeof(*tlv) +
8145 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
8146 
8147 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
8148 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
8149 		len += sizeof(*tlv) +
8150 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
8151 	}
8152 
8153 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8154 	if (!skb)
8155 		return -ENOMEM;
8156 
8157 	buf_ptr = skb->data;
8158 	cmd = buf_ptr;
8159 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
8160 						 sizeof(*cmd));
8161 	cmd->flags = cpu_to_le32(0);
8162 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8163 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
8164 
8165 	buf_ptr += sizeof(*cmd);
8166 
8167 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
8168 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
8169 
8170 	if (ns_ext_tuples)
8171 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
8172 
8173 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
8174 }
8175 
8176 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
8177 				 struct ath12k_link_vif *arvif, bool enable)
8178 {
8179 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
8180 	struct wmi_gtk_rekey_offload_cmd *cmd;
8181 	struct sk_buff *skb;
8182 	__le64 replay_ctr;
8183 	int len;
8184 
8185 	len = sizeof(*cmd);
8186 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8187 	if (!skb)
8188 		return -ENOMEM;
8189 
8190 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8191 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
8192 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8193 
8194 	if (enable) {
8195 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
8196 
8197 		/* the length in rekey_data and cmd is equal */
8198 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
8199 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
8200 
8201 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
8202 		memcpy(cmd->replay_ctr, &replay_ctr,
8203 		       sizeof(replay_ctr));
8204 	} else {
8205 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
8206 	}
8207 
8208 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
8209 		   arvif->vdev_id, enable);
8210 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8211 }
8212 
8213 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
8214 				 struct ath12k_link_vif *arvif)
8215 {
8216 	struct wmi_gtk_rekey_offload_cmd *cmd;
8217 	struct sk_buff *skb;
8218 	int len;
8219 
8220 	len = sizeof(*cmd);
8221 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8222 	if (!skb)
8223 		return -ENOMEM;
8224 
8225 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8226 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
8227 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8228 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
8229 
8230 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
8231 		   arvif->vdev_id);
8232 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8233 }
8234 
8235 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
8236 			     const struct wmi_sta_keepalive_arg *arg)
8237 {
8238 	struct wmi_sta_keepalive_arp_resp_params *arp;
8239 	struct ath12k_wmi_pdev *wmi = ar->wmi;
8240 	struct wmi_sta_keepalive_cmd *cmd;
8241 	struct sk_buff *skb;
8242 	size_t len;
8243 
8244 	len = sizeof(*cmd) + sizeof(*arp);
8245 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
8246 	if (!skb)
8247 		return -ENOMEM;
8248 
8249 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
8250 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
8251 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
8252 	cmd->enabled = cpu_to_le32(arg->enabled);
8253 	cmd->interval = cpu_to_le32(arg->interval);
8254 	cmd->method = cpu_to_le32(arg->method);
8255 
8256 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
8257 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
8258 						 sizeof(*arp));
8259 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
8260 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
8261 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
8262 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
8263 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
8264 	}
8265 
8266 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8267 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
8268 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
8269 
8270 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
8271 }
8272