xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 0a7a30fce30e566a462b30994fcf69cea01934ed)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debugfs.h"
19 #include "debug.h"
20 #include "mac.h"
21 #include "hw.h"
22 #include "peer.h"
23 #include "p2p.h"
24 #include "testmode.h"
25 
26 struct ath12k_wmi_svc_ready_parse {
27 	bool wmi_svc_bitmap_done;
28 };
29 
30 struct wmi_tlv_fw_stats_parse {
31 	const struct wmi_stats_event *ev;
32 	struct ath12k_fw_stats *stats;
33 };
34 
35 struct ath12k_wmi_dma_ring_caps_parse {
36 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
37 	u32 n_dma_ring_caps;
38 };
39 
40 struct ath12k_wmi_service_ext_arg {
41 	u32 default_conc_scan_config_bits;
42 	u32 default_fw_config_bits;
43 	struct ath12k_wmi_ppe_threshold_arg ppet;
44 	u32 he_cap_info;
45 	u32 mpdu_density;
46 	u32 max_bssid_rx_filters;
47 	u32 num_hw_modes;
48 	u32 num_phy;
49 };
50 
51 struct ath12k_wmi_svc_rdy_ext_parse {
52 	struct ath12k_wmi_service_ext_arg arg;
53 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
54 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
55 	u32 n_hw_mode_caps;
56 	u32 tot_phy_id;
57 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
58 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
59 	u32 n_mac_phy_caps;
60 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
61 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
62 	u32 n_ext_hal_reg_caps;
63 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
64 	bool hw_mode_done;
65 	bool mac_phy_done;
66 	bool ext_hal_reg_done;
67 	bool mac_phy_chainmask_combo_done;
68 	bool mac_phy_chainmask_cap_done;
69 	bool oem_dma_ring_cap_done;
70 	bool dma_ring_cap_done;
71 };
72 
73 struct ath12k_wmi_svc_rdy_ext2_arg {
74 	u32 reg_db_version;
75 	u32 hw_min_max_tx_power_2ghz;
76 	u32 hw_min_max_tx_power_5ghz;
77 	u32 chwidth_num_peer_caps;
78 	u32 preamble_puncture_bw;
79 	u32 max_user_per_ppdu_ofdma;
80 	u32 max_user_per_ppdu_mumimo;
81 	u32 target_cap_flags;
82 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
83 	u32 max_num_linkview_peers;
84 	u32 max_num_msduq_supported_per_tid;
85 	u32 default_num_msduq_supported_per_tid;
86 };
87 
88 struct ath12k_wmi_svc_rdy_ext2_parse {
89 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
90 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
91 	bool dma_ring_cap_done;
92 	bool spectral_bin_scaling_done;
93 	bool mac_phy_caps_ext_done;
94 };
95 
96 struct ath12k_wmi_rdy_parse {
97 	u32 num_extra_mac_addr;
98 };
99 
100 struct ath12k_wmi_dma_buf_release_arg {
101 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
102 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
103 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
104 	u32 num_buf_entry;
105 	u32 num_meta;
106 	bool buf_entry_done;
107 	bool meta_data_done;
108 };
109 
110 struct ath12k_wmi_tlv_policy {
111 	size_t min_len;
112 };
113 
114 struct wmi_tlv_mgmt_rx_parse {
115 	const struct ath12k_wmi_mgmt_rx_params *fixed;
116 	const u8 *frame_buf;
117 	bool frame_buf_done;
118 };
119 
120 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
121 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
122 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
123 	[WMI_TAG_SERVICE_READY_EVENT] = {
124 		.min_len = sizeof(struct wmi_service_ready_event) },
125 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
126 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
127 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
128 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
129 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
130 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
131 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
132 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
133 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
134 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
135 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
136 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
137 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
138 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
139 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
140 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
141 	[WMI_TAG_MGMT_RX_HDR] = {
142 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
143 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
144 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
145 	[WMI_TAG_SCAN_EVENT] = {
146 		.min_len = sizeof(struct wmi_scan_event) },
147 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
148 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
149 	[WMI_TAG_ROAM_EVENT] = {
150 		.min_len = sizeof(struct wmi_roam_event) },
151 	[WMI_TAG_CHAN_INFO_EVENT] = {
152 		.min_len = sizeof(struct wmi_chan_info_event) },
153 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
154 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
155 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
156 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
157 	[WMI_TAG_READY_EVENT] = {
158 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
159 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
160 		.min_len = sizeof(struct wmi_service_available_event) },
161 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
162 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
163 	[WMI_TAG_RFKILL_EVENT] = {
164 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
165 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
166 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
167 	[WMI_TAG_HOST_SWFDA_EVENT] = {
168 		.min_len = sizeof(struct wmi_fils_discovery_event) },
169 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
170 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
171 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
172 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
173 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
174 		.min_len = sizeof(struct wmi_twt_enable_event) },
175 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
176 		.min_len = sizeof(struct wmi_twt_disable_event) },
177 	[WMI_TAG_P2P_NOA_INFO] = {
178 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
179 	[WMI_TAG_P2P_NOA_EVENT] = {
180 		.min_len = sizeof(struct wmi_p2p_noa_event) },
181 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
182 		.min_len = sizeof(struct wmi_11d_new_cc_event) },
183 };
184 
185 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
186 {
187 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
188 		le32_encode_bits(len, WMI_TLV_LEN);
189 }
190 
191 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
192 {
193 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
194 }
195 
196 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
197 			     struct ath12k_wmi_resource_config_arg *config)
198 {
199 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
200 	config->num_peers = ab->num_radios *
201 		ath12k_core_get_max_peers_per_radio(ab);
202 	config->num_tids = ath12k_core_get_max_num_tids(ab);
203 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
204 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
205 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
206 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
207 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
208 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
209 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
210 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
211 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
212 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
213 
214 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
215 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
216 	else
217 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
218 
219 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
220 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
221 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
222 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
223 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
224 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
225 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
226 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
227 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
228 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
229 	config->rx_skip_defrag_timeout_dup_detection_check =
230 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
231 	config->vow_config = TARGET_VOW_CONFIG;
232 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
233 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
234 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
235 	config->rx_batchmode = TARGET_RX_BATCHMODE;
236 	/* Indicates host supports peer map v3 and unmap v2 support */
237 	config->peer_map_unmap_version = 0x32;
238 	config->twt_ap_pdev_count = ab->num_radios;
239 	config->twt_ap_sta_count = 1000;
240 	config->ema_max_vap_cnt = ab->num_radios;
241 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
242 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
243 
244 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
245 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
246 }
247 
248 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
249 			     struct ath12k_wmi_resource_config_arg *config)
250 {
251 	config->num_vdevs = 4;
252 	config->num_peers = 16;
253 	config->num_tids = 32;
254 
255 	config->num_offload_peers = 3;
256 	config->num_offload_reorder_buffs = 3;
257 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
258 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
259 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
260 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
261 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
262 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
263 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
264 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
265 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
266 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
267 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
268 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
269 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
270 	config->num_mcast_groups = 0;
271 	config->num_mcast_table_elems = 0;
272 	config->mcast2ucast_mode = 0;
273 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
274 	config->num_wds_entries = 0;
275 	config->dma_burst_size = 0;
276 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
277 	config->vow_config = TARGET_VOW_CONFIG;
278 	config->gtk_offload_max_vdev = 2;
279 	config->num_msdu_desc = 0x400;
280 	config->beacon_tx_offload_max_vdev = 2;
281 	config->rx_batchmode = TARGET_RX_BATCHMODE;
282 
283 	config->peer_map_unmap_version = 0x1;
284 	config->use_pdev_id = 1;
285 	config->max_frag_entries = 0xa;
286 	config->num_tdls_vdevs = 0x1;
287 	config->num_tdls_conn_table_entries = 8;
288 	config->beacon_tx_offload_max_vdev = 0x2;
289 	config->num_multicast_filter_entries = 0x20;
290 	config->num_wow_filters = 0x16;
291 	config->num_keep_alive_pattern = 0;
292 }
293 
294 #define PRIMAP(_hw_mode_) \
295 	[_hw_mode_] = _hw_mode_##_PRI
296 
297 static const int ath12k_hw_mode_pri_map[] = {
298 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
299 	PRIMAP(WMI_HOST_HW_MODE_DBS),
300 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
301 	PRIMAP(WMI_HOST_HW_MODE_SBS),
302 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
303 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
304 	/* keep last */
305 	PRIMAP(WMI_HOST_HW_MODE_MAX),
306 };
307 
308 static int
309 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
310 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
311 				const void *ptr, void *data),
312 		    void *data)
313 {
314 	const void *begin = ptr;
315 	const struct wmi_tlv *tlv;
316 	u16 tlv_tag, tlv_len;
317 	int ret;
318 
319 	while (len > 0) {
320 		if (len < sizeof(*tlv)) {
321 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
322 				   ptr - begin, len, sizeof(*tlv));
323 			return -EINVAL;
324 		}
325 
326 		tlv = ptr;
327 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
328 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
329 		ptr += sizeof(*tlv);
330 		len -= sizeof(*tlv);
331 
332 		if (tlv_len > len) {
333 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
334 				   tlv_tag, ptr - begin, len, tlv_len);
335 			return -EINVAL;
336 		}
337 
338 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
339 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
340 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
341 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
342 				   tlv_tag, ptr - begin, tlv_len,
343 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
344 			return -EINVAL;
345 		}
346 
347 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
348 		if (ret)
349 			return ret;
350 
351 		ptr += tlv_len;
352 		len -= tlv_len;
353 	}
354 
355 	return 0;
356 }
357 
358 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
359 				     const void *ptr, void *data)
360 {
361 	const void **tb = data;
362 
363 	if (tag < WMI_TAG_MAX)
364 		tb[tag] = ptr;
365 
366 	return 0;
367 }
368 
369 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
370 				const void *ptr, size_t len)
371 {
372 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
373 				   (void *)tb);
374 }
375 
376 static const void **
377 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
378 			   struct sk_buff *skb, gfp_t gfp)
379 {
380 	const void **tb;
381 	int ret;
382 
383 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
384 	if (!tb)
385 		return ERR_PTR(-ENOMEM);
386 
387 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
388 	if (ret) {
389 		kfree(tb);
390 		return ERR_PTR(ret);
391 	}
392 
393 	return tb;
394 }
395 
396 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
397 				      u32 cmd_id)
398 {
399 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
400 	struct ath12k_base *ab = wmi->wmi_ab->ab;
401 	struct wmi_cmd_hdr *cmd_hdr;
402 	int ret;
403 
404 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
405 		return -ENOMEM;
406 
407 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
408 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
409 
410 	memset(skb_cb, 0, sizeof(*skb_cb));
411 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
412 
413 	if (ret)
414 		goto err_pull;
415 
416 	return 0;
417 
418 err_pull:
419 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
420 	return ret;
421 }
422 
423 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
424 			u32 cmd_id)
425 {
426 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
427 	int ret = -EOPNOTSUPP;
428 
429 	might_sleep();
430 
431 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
432 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
433 
434 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
435 			ret = -ESHUTDOWN;
436 
437 		(ret != -EAGAIN);
438 	}), WMI_SEND_TIMEOUT_HZ);
439 
440 	if (ret == -EAGAIN)
441 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
442 
443 	return ret;
444 }
445 
446 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
447 				     const void *ptr,
448 				     struct ath12k_wmi_service_ext_arg *arg)
449 {
450 	const struct wmi_service_ready_ext_event *ev = ptr;
451 	int i;
452 
453 	if (!ev)
454 		return -EINVAL;
455 
456 	/* Move this to host based bitmap */
457 	arg->default_conc_scan_config_bits =
458 		le32_to_cpu(ev->default_conc_scan_config_bits);
459 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
460 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
461 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
462 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
463 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
464 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
465 
466 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
467 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
468 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
469 
470 	return 0;
471 }
472 
473 static int
474 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
475 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
476 				      u8 hw_mode_id, u8 phy_id,
477 				      struct ath12k_pdev *pdev)
478 {
479 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
480 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
481 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
482 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
483 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
484 	struct ath12k_band_cap *cap_band;
485 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
486 	struct ath12k_fw_pdev *fw_pdev;
487 	u32 phy_map;
488 	u32 hw_idx, phy_idx = 0;
489 	int i;
490 
491 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
492 		return -EINVAL;
493 
494 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
495 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
496 			break;
497 
498 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
499 		phy_idx = fls(phy_map);
500 	}
501 
502 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
503 		return -EINVAL;
504 
505 	phy_idx += phy_id;
506 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
507 		return -EINVAL;
508 
509 	mac_caps = wmi_mac_phy_caps + phy_idx;
510 
511 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
512 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
513 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
514 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
515 
516 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
517 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
518 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
519 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
520 	ab->fw_pdev_count++;
521 
522 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
523 	 * band to band for a single radio, need to see how this should be
524 	 * handled.
525 	 */
526 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
527 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
528 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
529 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
530 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
531 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
532 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
533 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
534 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
535 	} else {
536 		return -EINVAL;
537 	}
538 
539 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
540 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
541 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
542 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
543 	 * will be advertised for second mac or vice-versa. Compute the shift value
544 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
545 	 * mac80211.
546 	 */
547 	pdev_cap->tx_chain_mask_shift =
548 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
549 	pdev_cap->rx_chain_mask_shift =
550 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
551 
552 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
553 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
554 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
555 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
556 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
557 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
558 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
559 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
560 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
561 			cap_band->he_cap_phy_info[i] =
562 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
563 
564 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
565 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
566 
567 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
568 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
569 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
570 	}
571 
572 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
573 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
574 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
575 		cap_band->max_bw_supported =
576 			le32_to_cpu(mac_caps->max_bw_supported_5g);
577 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
578 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
579 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
580 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
581 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
582 			cap_band->he_cap_phy_info[i] =
583 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
584 
585 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
586 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
587 
588 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
589 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
590 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
591 
592 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
593 		cap_band->max_bw_supported =
594 			le32_to_cpu(mac_caps->max_bw_supported_5g);
595 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
596 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
597 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
598 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
599 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
600 			cap_band->he_cap_phy_info[i] =
601 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
602 
603 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
604 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
605 
606 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
607 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
608 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
609 	}
610 
611 	return 0;
612 }
613 
614 static int
615 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
616 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
617 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
618 				u8 phy_idx,
619 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
620 {
621 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
622 
623 	if (!reg_caps || !ext_caps)
624 		return -EINVAL;
625 
626 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
627 		return -EINVAL;
628 
629 	ext_reg_cap = &ext_caps[phy_idx];
630 
631 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
632 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
633 	param->eeprom_reg_domain_ext =
634 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
635 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
636 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
637 	/* check if param->wireless_mode is needed */
638 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
639 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
640 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
641 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
642 
643 	return 0;
644 }
645 
646 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
647 					 const void *evt_buf,
648 					 struct ath12k_wmi_target_cap_arg *cap)
649 {
650 	const struct wmi_service_ready_event *ev = evt_buf;
651 
652 	if (!ev) {
653 		ath12k_err(ab, "%s: failed by NULL param\n",
654 			   __func__);
655 		return -EINVAL;
656 	}
657 
658 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
659 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
660 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
661 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
662 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
663 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
664 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
665 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
666 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
667 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
668 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
669 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
670 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
671 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
672 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
673 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
674 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
675 
676 	return 0;
677 }
678 
679 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
680  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
681  * 4-byte word.
682  */
683 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
684 					   const u32 *wmi_svc_bm)
685 {
686 	int i, j;
687 
688 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
689 		do {
690 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
691 				set_bit(j, wmi->wmi_ab->svc_map);
692 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
693 	}
694 }
695 
696 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
697 				    const void *ptr, void *data)
698 {
699 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
700 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
701 	u16 expect_len;
702 
703 	switch (tag) {
704 	case WMI_TAG_SERVICE_READY_EVENT:
705 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
706 			return -EINVAL;
707 		break;
708 
709 	case WMI_TAG_ARRAY_UINT32:
710 		if (!svc_ready->wmi_svc_bitmap_done) {
711 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
712 			if (len < expect_len) {
713 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
714 					    len, tag);
715 				return -EINVAL;
716 			}
717 
718 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
719 
720 			svc_ready->wmi_svc_bitmap_done = true;
721 		}
722 		break;
723 	default:
724 		break;
725 	}
726 
727 	return 0;
728 }
729 
730 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
731 {
732 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
733 	int ret;
734 
735 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
736 				  ath12k_wmi_svc_rdy_parse,
737 				  &svc_ready);
738 	if (ret) {
739 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
740 		return ret;
741 	}
742 
743 	return 0;
744 }
745 
746 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
747 				    struct ieee80211_tx_info *info)
748 {
749 	struct ath12k_base *ab = ar->ab;
750 	u32 freq = 0;
751 
752 	if (ab->hw_params->single_pdev_only &&
753 	    ar->scan.is_roc &&
754 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
755 		freq = ar->scan.roc_freq;
756 
757 	return freq;
758 }
759 
760 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
761 {
762 	struct sk_buff *skb;
763 	struct ath12k_base *ab = wmi_ab->ab;
764 	u32 round_len = roundup(len, 4);
765 
766 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
767 	if (!skb)
768 		return NULL;
769 
770 	skb_reserve(skb, WMI_SKB_HEADROOM);
771 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
772 		ath12k_warn(ab, "unaligned WMI skb data\n");
773 
774 	skb_put(skb, round_len);
775 	memset(skb->data, 0, round_len);
776 
777 	return skb;
778 }
779 
780 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
781 			 struct sk_buff *frame)
782 {
783 	struct ath12k_wmi_pdev *wmi = ar->wmi;
784 	struct wmi_mgmt_send_cmd *cmd;
785 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
786 	struct wmi_tlv *frame_tlv;
787 	struct sk_buff *skb;
788 	u32 buf_len;
789 	int ret, len;
790 
791 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
792 
793 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
794 
795 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
796 	if (!skb)
797 		return -ENOMEM;
798 
799 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
800 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
801 						 sizeof(*cmd));
802 	cmd->vdev_id = cpu_to_le32(vdev_id);
803 	cmd->desc_id = cpu_to_le32(buf_id);
804 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
805 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
806 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
807 	cmd->frame_len = cpu_to_le32(frame->len);
808 	cmd->buf_len = cpu_to_le32(buf_len);
809 	cmd->tx_params_valid = 0;
810 
811 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
812 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
813 
814 	memcpy(frame_tlv->value, frame->data, buf_len);
815 
816 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
817 	if (ret) {
818 		ath12k_warn(ar->ab,
819 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
820 		dev_kfree_skb(skb);
821 	}
822 
823 	return ret;
824 }
825 
826 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
827 				      u32 vdev_id, u32 pdev_id)
828 {
829 	struct ath12k_wmi_pdev *wmi = ar->wmi;
830 	struct wmi_request_stats_cmd *cmd;
831 	struct sk_buff *skb;
832 	int ret;
833 
834 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
835 	if (!skb)
836 		return -ENOMEM;
837 
838 	cmd = (struct wmi_request_stats_cmd *)skb->data;
839 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
840 						 sizeof(*cmd));
841 
842 	cmd->stats_id = cpu_to_le32(stats_id);
843 	cmd->vdev_id = cpu_to_le32(vdev_id);
844 	cmd->pdev_id = cpu_to_le32(pdev_id);
845 
846 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
847 	if (ret) {
848 		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
849 		dev_kfree_skb(skb);
850 	}
851 
852 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
853 		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
854 		   stats_id, vdev_id, pdev_id);
855 
856 	return ret;
857 }
858 
859 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
860 			   struct ath12k_wmi_vdev_create_arg *args)
861 {
862 	struct ath12k_wmi_pdev *wmi = ar->wmi;
863 	struct wmi_vdev_create_cmd *cmd;
864 	struct sk_buff *skb;
865 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
866 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
867 	struct wmi_vdev_create_mlo_params *ml_params;
868 	struct wmi_tlv *tlv;
869 	int ret, len;
870 	void *ptr;
871 
872 	/* It can be optimized my sending tx/rx chain configuration
873 	 * only for supported bands instead of always sending it for
874 	 * both the bands.
875 	 */
876 	len = sizeof(*cmd) + TLV_HDR_SIZE +
877 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
878 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
879 
880 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
881 	if (!skb)
882 		return -ENOMEM;
883 
884 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
885 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
886 						 sizeof(*cmd));
887 
888 	cmd->vdev_id = cpu_to_le32(args->if_id);
889 	cmd->vdev_type = cpu_to_le32(args->type);
890 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
891 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
892 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
893 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
894 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
895 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
896 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
897 
898 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
899 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
900 
901 	ptr = skb->data + sizeof(*cmd);
902 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
903 
904 	tlv = ptr;
905 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
906 
907 	ptr += TLV_HDR_SIZE;
908 	txrx_streams = ptr;
909 	len = sizeof(*txrx_streams);
910 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
911 							  len);
912 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
913 	txrx_streams->supported_tx_streams =
914 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
915 	txrx_streams->supported_rx_streams =
916 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
917 
918 	txrx_streams++;
919 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
920 							  len);
921 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
922 	txrx_streams->supported_tx_streams =
923 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
924 	txrx_streams->supported_rx_streams =
925 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
926 
927 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
928 
929 	if (is_ml_vdev) {
930 		tlv = ptr;
931 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
932 						 sizeof(*ml_params));
933 		ptr += TLV_HDR_SIZE;
934 		ml_params = ptr;
935 
936 		ml_params->tlv_header =
937 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
938 					       sizeof(*ml_params));
939 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
940 	}
941 
942 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
943 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
944 		   args->if_id, args->type, args->subtype,
945 		   macaddr, args->pdev_id);
946 
947 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
948 	if (ret) {
949 		ath12k_warn(ar->ab,
950 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
951 		dev_kfree_skb(skb);
952 	}
953 
954 	return ret;
955 }
956 
957 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
958 {
959 	struct ath12k_wmi_pdev *wmi = ar->wmi;
960 	struct wmi_vdev_delete_cmd *cmd;
961 	struct sk_buff *skb;
962 	int ret;
963 
964 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
965 	if (!skb)
966 		return -ENOMEM;
967 
968 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
969 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
970 						 sizeof(*cmd));
971 	cmd->vdev_id = cpu_to_le32(vdev_id);
972 
973 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
974 
975 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
976 	if (ret) {
977 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
978 		dev_kfree_skb(skb);
979 	}
980 
981 	return ret;
982 }
983 
984 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
985 {
986 	struct ath12k_wmi_pdev *wmi = ar->wmi;
987 	struct wmi_vdev_stop_cmd *cmd;
988 	struct sk_buff *skb;
989 	int ret;
990 
991 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
992 	if (!skb)
993 		return -ENOMEM;
994 
995 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
996 
997 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
998 						 sizeof(*cmd));
999 	cmd->vdev_id = cpu_to_le32(vdev_id);
1000 
1001 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
1002 
1003 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
1004 	if (ret) {
1005 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
1006 		dev_kfree_skb(skb);
1007 	}
1008 
1009 	return ret;
1010 }
1011 
1012 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
1013 {
1014 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1015 	struct wmi_vdev_down_cmd *cmd;
1016 	struct sk_buff *skb;
1017 	int ret;
1018 
1019 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1020 	if (!skb)
1021 		return -ENOMEM;
1022 
1023 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1024 
1025 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
1026 						 sizeof(*cmd));
1027 	cmd->vdev_id = cpu_to_le32(vdev_id);
1028 
1029 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
1030 
1031 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
1032 	if (ret) {
1033 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
1034 		dev_kfree_skb(skb);
1035 	}
1036 
1037 	return ret;
1038 }
1039 
1040 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
1041 				       struct wmi_vdev_start_req_arg *arg)
1042 {
1043 	u32 center_freq1 = arg->band_center_freq1;
1044 
1045 	memset(chan, 0, sizeof(*chan));
1046 
1047 	chan->mhz = cpu_to_le32(arg->freq);
1048 	chan->band_center_freq1 = cpu_to_le32(center_freq1);
1049 	if (arg->mode == MODE_11BE_EHT320) {
1050 		if (arg->freq > center_freq1)
1051 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
1052 		else
1053 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
1054 
1055 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1056 
1057 	} else if (arg->mode == MODE_11BE_EHT160) {
1058 		if (arg->freq > center_freq1)
1059 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
1060 		else
1061 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
1062 
1063 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1064 	} else if (arg->mode == MODE_11BE_EHT80_80) {
1065 		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
1066 	} else {
1067 		chan->band_center_freq2 = 0;
1068 	}
1069 
1070 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1071 	if (arg->passive)
1072 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1073 	if (arg->allow_ibss)
1074 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1075 	if (arg->allow_ht)
1076 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1077 	if (arg->allow_vht)
1078 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1079 	if (arg->allow_he)
1080 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1081 	if (arg->ht40plus)
1082 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1083 	if (arg->chan_radar)
1084 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1085 	if (arg->freq2_radar)
1086 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1087 
1088 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1089 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1090 		le32_encode_bits(arg->max_reg_power,
1091 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1092 
1093 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1094 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1095 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1096 }
1097 
1098 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1099 			  bool restart)
1100 {
1101 	struct wmi_vdev_start_mlo_params *ml_params;
1102 	struct wmi_partner_link_info *partner_info;
1103 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1104 	struct wmi_vdev_start_request_cmd *cmd;
1105 	struct sk_buff *skb;
1106 	struct ath12k_wmi_channel_params *chan;
1107 	struct wmi_tlv *tlv;
1108 	void *ptr;
1109 	int ret, len, i, ml_arg_size = 0;
1110 
1111 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1112 		return -EINVAL;
1113 
1114 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1115 
1116 	if (!restart && arg->ml.enabled) {
1117 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1118 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1119 					      sizeof(*partner_info));
1120 		len += ml_arg_size;
1121 	}
1122 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1123 	if (!skb)
1124 		return -ENOMEM;
1125 
1126 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1127 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1128 						 sizeof(*cmd));
1129 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1130 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1131 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1132 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1133 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1134 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1135 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1136 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1137 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1138 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1139 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1140 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1141 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1142 
1143 	if (!restart) {
1144 		if (arg->ssid) {
1145 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1146 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1147 		}
1148 		if (arg->hidden_ssid)
1149 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1150 		if (arg->pmf_enabled)
1151 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1152 	}
1153 
1154 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1155 
1156 	ptr = skb->data + sizeof(*cmd);
1157 	chan = ptr;
1158 
1159 	ath12k_wmi_put_wmi_channel(chan, arg);
1160 
1161 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1162 						  sizeof(*chan));
1163 	ptr += sizeof(*chan);
1164 
1165 	tlv = ptr;
1166 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1167 
1168 	/* Note: This is a nested TLV containing:
1169 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1170 	 */
1171 
1172 	ptr += sizeof(*tlv);
1173 
1174 	if (ml_arg_size) {
1175 		tlv = ptr;
1176 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1177 						 sizeof(*ml_params));
1178 		ptr += TLV_HDR_SIZE;
1179 
1180 		ml_params = ptr;
1181 
1182 		ml_params->tlv_header =
1183 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1184 					       sizeof(*ml_params));
1185 
1186 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1187 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1188 				   le32_encode_bits(arg->ml.assoc_link,
1189 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1190 				   le32_encode_bits(arg->ml.mcast_link,
1191 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1192 				   le32_encode_bits(arg->ml.link_add,
1193 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1194 
1195 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1196 			   arg->vdev_id, ml_params->flags);
1197 
1198 		ptr += sizeof(*ml_params);
1199 
1200 		tlv = ptr;
1201 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1202 						 arg->ml.num_partner_links *
1203 						 sizeof(*partner_info));
1204 		ptr += TLV_HDR_SIZE;
1205 
1206 		partner_info = ptr;
1207 
1208 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1209 			partner_info->tlv_header =
1210 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1211 						       sizeof(*partner_info));
1212 			partner_info->vdev_id =
1213 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1214 			partner_info->hw_link_id =
1215 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1216 			ether_addr_copy(partner_info->vdev_addr.addr,
1217 					arg->ml.partner_info[i].addr);
1218 
1219 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1220 				   partner_info->vdev_id, partner_info->hw_link_id,
1221 				   partner_info->vdev_addr.addr);
1222 
1223 			partner_info++;
1224 		}
1225 
1226 		ptr = partner_info;
1227 	}
1228 
1229 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1230 		   restart ? "restart" : "start", arg->vdev_id,
1231 		   arg->freq, arg->mode);
1232 
1233 	if (restart)
1234 		ret = ath12k_wmi_cmd_send(wmi, skb,
1235 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1236 	else
1237 		ret = ath12k_wmi_cmd_send(wmi, skb,
1238 					  WMI_VDEV_START_REQUEST_CMDID);
1239 	if (ret) {
1240 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1241 			    restart ? "restart" : "start");
1242 		dev_kfree_skb(skb);
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1249 {
1250 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1251 	struct wmi_vdev_up_cmd *cmd;
1252 	struct sk_buff *skb;
1253 	int ret;
1254 
1255 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1256 	if (!skb)
1257 		return -ENOMEM;
1258 
1259 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1260 
1261 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1262 						 sizeof(*cmd));
1263 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1264 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1265 
1266 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1267 
1268 	if (params->tx_bssid) {
1269 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1270 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1271 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1272 	}
1273 
1274 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1275 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1276 		   params->vdev_id, params->aid, params->bssid);
1277 
1278 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1279 	if (ret) {
1280 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1281 		dev_kfree_skb(skb);
1282 	}
1283 
1284 	return ret;
1285 }
1286 
1287 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1288 				    struct ath12k_wmi_peer_create_arg *arg)
1289 {
1290 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1291 	struct wmi_peer_create_cmd *cmd;
1292 	struct sk_buff *skb;
1293 	int ret, len;
1294 	struct wmi_peer_create_mlo_params *ml_param;
1295 	void *ptr;
1296 	struct wmi_tlv *tlv;
1297 
1298 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1299 
1300 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1301 	if (!skb)
1302 		return -ENOMEM;
1303 
1304 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1305 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1306 						 sizeof(*cmd));
1307 
1308 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1309 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1310 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1311 
1312 	ptr = skb->data + sizeof(*cmd);
1313 	tlv = ptr;
1314 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1315 					 sizeof(*ml_param));
1316 	ptr += TLV_HDR_SIZE;
1317 	ml_param = ptr;
1318 	ml_param->tlv_header =
1319 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1320 					       sizeof(*ml_param));
1321 	if (arg->ml_enabled)
1322 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1323 
1324 	ptr += sizeof(*ml_param);
1325 
1326 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1327 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1328 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1329 
1330 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1331 	if (ret) {
1332 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1333 		dev_kfree_skb(skb);
1334 	}
1335 
1336 	return ret;
1337 }
1338 
1339 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1340 				    const u8 *peer_addr, u8 vdev_id)
1341 {
1342 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1343 	struct wmi_peer_delete_cmd *cmd;
1344 	struct sk_buff *skb;
1345 	int ret;
1346 
1347 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1348 	if (!skb)
1349 		return -ENOMEM;
1350 
1351 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1352 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1353 						 sizeof(*cmd));
1354 
1355 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1356 	cmd->vdev_id = cpu_to_le32(vdev_id);
1357 
1358 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1359 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1360 		   vdev_id,  peer_addr);
1361 
1362 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1363 	if (ret) {
1364 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1365 		dev_kfree_skb(skb);
1366 	}
1367 
1368 	return ret;
1369 }
1370 
1371 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1372 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1373 {
1374 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1375 	struct wmi_pdev_set_regdomain_cmd *cmd;
1376 	struct sk_buff *skb;
1377 	int ret;
1378 
1379 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1380 	if (!skb)
1381 		return -ENOMEM;
1382 
1383 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1384 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1385 						 sizeof(*cmd));
1386 
1387 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1388 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1389 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1390 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1391 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1392 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1393 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1394 
1395 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1396 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1397 		   arg->current_rd_in_use, arg->current_rd_2g,
1398 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1399 
1400 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1401 	if (ret) {
1402 		ath12k_warn(ar->ab,
1403 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1404 		dev_kfree_skb(skb);
1405 	}
1406 
1407 	return ret;
1408 }
1409 
1410 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1411 			      u32 vdev_id, u32 param_id, u32 param_val)
1412 {
1413 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1414 	struct wmi_peer_set_param_cmd *cmd;
1415 	struct sk_buff *skb;
1416 	int ret;
1417 
1418 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1419 	if (!skb)
1420 		return -ENOMEM;
1421 
1422 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1423 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1424 						 sizeof(*cmd));
1425 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1426 	cmd->vdev_id = cpu_to_le32(vdev_id);
1427 	cmd->param_id = cpu_to_le32(param_id);
1428 	cmd->param_value = cpu_to_le32(param_val);
1429 
1430 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1431 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1432 		   vdev_id, peer_addr, param_id, param_val);
1433 
1434 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1435 	if (ret) {
1436 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1437 		dev_kfree_skb(skb);
1438 	}
1439 
1440 	return ret;
1441 }
1442 
1443 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1444 					u8 peer_addr[ETH_ALEN],
1445 					u32 peer_tid_bitmap,
1446 					u8 vdev_id)
1447 {
1448 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1449 	struct wmi_peer_flush_tids_cmd *cmd;
1450 	struct sk_buff *skb;
1451 	int ret;
1452 
1453 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1454 	if (!skb)
1455 		return -ENOMEM;
1456 
1457 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1458 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1459 						 sizeof(*cmd));
1460 
1461 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1462 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1463 	cmd->vdev_id = cpu_to_le32(vdev_id);
1464 
1465 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1466 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1467 		   vdev_id, peer_addr, peer_tid_bitmap);
1468 
1469 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1470 	if (ret) {
1471 		ath12k_warn(ar->ab,
1472 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1473 		dev_kfree_skb(skb);
1474 	}
1475 
1476 	return ret;
1477 }
1478 
1479 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1480 					   int vdev_id, const u8 *addr,
1481 					   dma_addr_t paddr, u8 tid,
1482 					   u8 ba_window_size_valid,
1483 					   u32 ba_window_size)
1484 {
1485 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1486 	struct sk_buff *skb;
1487 	int ret;
1488 
1489 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1490 	if (!skb)
1491 		return -ENOMEM;
1492 
1493 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1494 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1495 						 sizeof(*cmd));
1496 
1497 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1498 	cmd->vdev_id = cpu_to_le32(vdev_id);
1499 	cmd->tid = cpu_to_le32(tid);
1500 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1501 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1502 	cmd->queue_no = cpu_to_le32(tid);
1503 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1504 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1505 
1506 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1507 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1508 		   addr, vdev_id, tid);
1509 
1510 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1511 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1512 	if (ret) {
1513 		ath12k_warn(ar->ab,
1514 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1515 		dev_kfree_skb(skb);
1516 	}
1517 
1518 	return ret;
1519 }
1520 
1521 int
1522 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1523 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1524 {
1525 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1526 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1527 	struct sk_buff *skb;
1528 	int ret;
1529 
1530 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1531 	if (!skb)
1532 		return -ENOMEM;
1533 
1534 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1535 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1536 						 sizeof(*cmd));
1537 
1538 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1539 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1540 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1541 
1542 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1543 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1544 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1545 
1546 	ret = ath12k_wmi_cmd_send(wmi, skb,
1547 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1548 	if (ret) {
1549 		ath12k_warn(ar->ab,
1550 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1551 		dev_kfree_skb(skb);
1552 	}
1553 
1554 	return ret;
1555 }
1556 
1557 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1558 			      u32 param_value, u8 pdev_id)
1559 {
1560 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1561 	struct wmi_pdev_set_param_cmd *cmd;
1562 	struct sk_buff *skb;
1563 	int ret;
1564 
1565 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1566 	if (!skb)
1567 		return -ENOMEM;
1568 
1569 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1570 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1571 						 sizeof(*cmd));
1572 	cmd->pdev_id = cpu_to_le32(pdev_id);
1573 	cmd->param_id = cpu_to_le32(param_id);
1574 	cmd->param_value = cpu_to_le32(param_value);
1575 
1576 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1577 		   "WMI pdev set param %d pdev id %d value %d\n",
1578 		   param_id, pdev_id, param_value);
1579 
1580 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1581 	if (ret) {
1582 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1583 		dev_kfree_skb(skb);
1584 	}
1585 
1586 	return ret;
1587 }
1588 
1589 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1590 {
1591 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1592 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1593 	struct sk_buff *skb;
1594 	int ret;
1595 
1596 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1597 	if (!skb)
1598 		return -ENOMEM;
1599 
1600 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1601 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1602 						 sizeof(*cmd));
1603 	cmd->vdev_id = cpu_to_le32(vdev_id);
1604 	cmd->sta_ps_mode = cpu_to_le32(enable);
1605 
1606 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1607 		   "WMI vdev set psmode %d vdev id %d\n",
1608 		   enable, vdev_id);
1609 
1610 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1611 	if (ret) {
1612 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1613 		dev_kfree_skb(skb);
1614 	}
1615 
1616 	return ret;
1617 }
1618 
1619 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1620 			    u32 pdev_id)
1621 {
1622 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1623 	struct wmi_pdev_suspend_cmd *cmd;
1624 	struct sk_buff *skb;
1625 	int ret;
1626 
1627 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1628 	if (!skb)
1629 		return -ENOMEM;
1630 
1631 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1632 
1633 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1634 						 sizeof(*cmd));
1635 
1636 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1637 	cmd->pdev_id = cpu_to_le32(pdev_id);
1638 
1639 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1640 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1641 
1642 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1643 	if (ret) {
1644 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1645 		dev_kfree_skb(skb);
1646 	}
1647 
1648 	return ret;
1649 }
1650 
1651 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1652 {
1653 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1654 	struct wmi_pdev_resume_cmd *cmd;
1655 	struct sk_buff *skb;
1656 	int ret;
1657 
1658 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1659 	if (!skb)
1660 		return -ENOMEM;
1661 
1662 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1663 
1664 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1665 						 sizeof(*cmd));
1666 	cmd->pdev_id = cpu_to_le32(pdev_id);
1667 
1668 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1669 		   "WMI pdev resume pdev id %d\n", pdev_id);
1670 
1671 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1672 	if (ret) {
1673 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1674 		dev_kfree_skb(skb);
1675 	}
1676 
1677 	return ret;
1678 }
1679 
1680 /* TODO FW Support for the cmd is not available yet.
1681  * Can be tested once the command and corresponding
1682  * event is implemented in FW
1683  */
1684 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1685 					  enum wmi_bss_chan_info_req_type type)
1686 {
1687 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1688 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1689 	struct sk_buff *skb;
1690 	int ret;
1691 
1692 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1693 	if (!skb)
1694 		return -ENOMEM;
1695 
1696 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1697 
1698 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1699 						 sizeof(*cmd));
1700 	cmd->req_type = cpu_to_le32(type);
1701 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1702 
1703 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1704 		   "WMI bss chan info req type %d\n", type);
1705 
1706 	ret = ath12k_wmi_cmd_send(wmi, skb,
1707 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1708 	if (ret) {
1709 		ath12k_warn(ar->ab,
1710 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1711 		dev_kfree_skb(skb);
1712 	}
1713 
1714 	return ret;
1715 }
1716 
1717 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1718 					struct ath12k_wmi_ap_ps_arg *arg)
1719 {
1720 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1721 	struct wmi_ap_ps_peer_cmd *cmd;
1722 	struct sk_buff *skb;
1723 	int ret;
1724 
1725 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1726 	if (!skb)
1727 		return -ENOMEM;
1728 
1729 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1730 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1731 						 sizeof(*cmd));
1732 
1733 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1734 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1735 	cmd->param = cpu_to_le32(arg->param);
1736 	cmd->value = cpu_to_le32(arg->value);
1737 
1738 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1739 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1740 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1741 
1742 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1743 	if (ret) {
1744 		ath12k_warn(ar->ab,
1745 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1746 		dev_kfree_skb(skb);
1747 	}
1748 
1749 	return ret;
1750 }
1751 
1752 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1753 				u32 param, u32 param_value)
1754 {
1755 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1756 	struct wmi_sta_powersave_param_cmd *cmd;
1757 	struct sk_buff *skb;
1758 	int ret;
1759 
1760 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1761 	if (!skb)
1762 		return -ENOMEM;
1763 
1764 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1765 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1766 						 sizeof(*cmd));
1767 
1768 	cmd->vdev_id = cpu_to_le32(vdev_id);
1769 	cmd->param = cpu_to_le32(param);
1770 	cmd->value = cpu_to_le32(param_value);
1771 
1772 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1773 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1774 		   vdev_id, param, param_value);
1775 
1776 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1777 	if (ret) {
1778 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1779 		dev_kfree_skb(skb);
1780 	}
1781 
1782 	return ret;
1783 }
1784 
1785 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1786 {
1787 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1788 	struct wmi_force_fw_hang_cmd *cmd;
1789 	struct sk_buff *skb;
1790 	int ret, len;
1791 
1792 	len = sizeof(*cmd);
1793 
1794 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1795 	if (!skb)
1796 		return -ENOMEM;
1797 
1798 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1799 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1800 						 len);
1801 
1802 	cmd->type = cpu_to_le32(type);
1803 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1804 
1805 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1806 
1807 	if (ret) {
1808 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1809 		dev_kfree_skb(skb);
1810 	}
1811 	return ret;
1812 }
1813 
1814 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1815 				  u32 param_id, u32 param_value)
1816 {
1817 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1818 	struct wmi_vdev_set_param_cmd *cmd;
1819 	struct sk_buff *skb;
1820 	int ret;
1821 
1822 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1823 	if (!skb)
1824 		return -ENOMEM;
1825 
1826 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1827 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1828 						 sizeof(*cmd));
1829 
1830 	cmd->vdev_id = cpu_to_le32(vdev_id);
1831 	cmd->param_id = cpu_to_le32(param_id);
1832 	cmd->param_value = cpu_to_le32(param_value);
1833 
1834 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1835 		   "WMI vdev id 0x%x set param %d value %d\n",
1836 		   vdev_id, param_id, param_value);
1837 
1838 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1839 	if (ret) {
1840 		ath12k_warn(ar->ab,
1841 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1842 		dev_kfree_skb(skb);
1843 	}
1844 
1845 	return ret;
1846 }
1847 
1848 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1849 {
1850 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1851 	struct wmi_get_pdev_temperature_cmd *cmd;
1852 	struct sk_buff *skb;
1853 	int ret;
1854 
1855 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1856 	if (!skb)
1857 		return -ENOMEM;
1858 
1859 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1860 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1861 						 sizeof(*cmd));
1862 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1863 
1864 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1865 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1866 
1867 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1868 	if (ret) {
1869 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1870 		dev_kfree_skb(skb);
1871 	}
1872 
1873 	return ret;
1874 }
1875 
1876 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1877 					    u32 vdev_id, u32 bcn_ctrl_op)
1878 {
1879 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1880 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1881 	struct sk_buff *skb;
1882 	int ret;
1883 
1884 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1885 	if (!skb)
1886 		return -ENOMEM;
1887 
1888 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1889 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1890 						 sizeof(*cmd));
1891 
1892 	cmd->vdev_id = cpu_to_le32(vdev_id);
1893 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1894 
1895 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1896 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1897 		   vdev_id, bcn_ctrl_op);
1898 
1899 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1900 	if (ret) {
1901 		ath12k_warn(ar->ab,
1902 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1903 		dev_kfree_skb(skb);
1904 	}
1905 
1906 	return ret;
1907 }
1908 
1909 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1910 			     const u8 *p2p_ie)
1911 {
1912 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1913 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1914 	size_t p2p_ie_len, aligned_len;
1915 	struct wmi_tlv *tlv;
1916 	struct sk_buff *skb;
1917 	void *ptr;
1918 	int ret, len;
1919 
1920 	p2p_ie_len = p2p_ie[1] + 2;
1921 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1922 
1923 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1924 
1925 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1926 	if (!skb)
1927 		return -ENOMEM;
1928 
1929 	ptr = skb->data;
1930 	cmd = ptr;
1931 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1932 						 sizeof(*cmd));
1933 	cmd->vdev_id = cpu_to_le32(vdev_id);
1934 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1935 
1936 	ptr += sizeof(*cmd);
1937 	tlv = ptr;
1938 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1939 					     aligned_len);
1940 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1941 
1942 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1943 	if (ret) {
1944 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1945 		dev_kfree_skb(skb);
1946 	}
1947 
1948 	return ret;
1949 }
1950 
1951 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
1952 			struct ieee80211_mutable_offsets *offs,
1953 			struct sk_buff *bcn,
1954 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1955 {
1956 	struct ath12k *ar = arvif->ar;
1957 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1958 	struct ath12k_base *ab = ar->ab;
1959 	struct wmi_bcn_tmpl_cmd *cmd;
1960 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1961 	struct ath12k_vif *ahvif = arvif->ahvif;
1962 	struct ieee80211_bss_conf *conf;
1963 	u32 vdev_id = arvif->vdev_id;
1964 	struct wmi_tlv *tlv;
1965 	struct sk_buff *skb;
1966 	u32 ema_params = 0;
1967 	void *ptr;
1968 	int ret, len;
1969 	size_t aligned_len = roundup(bcn->len, 4);
1970 
1971 	conf = ath12k_mac_get_link_bss_conf(arvif);
1972 	if (!conf) {
1973 		ath12k_warn(ab,
1974 			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
1975 			    ahvif->vif->addr, arvif->link_id);
1976 		return -EINVAL;
1977 	}
1978 
1979 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1980 
1981 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1982 	if (!skb)
1983 		return -ENOMEM;
1984 
1985 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1986 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1987 						 sizeof(*cmd));
1988 	cmd->vdev_id = cpu_to_le32(vdev_id);
1989 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1990 
1991 	if (conf->csa_active) {
1992 		cmd->csa_switch_count_offset =
1993 				cpu_to_le32(offs->cntdwn_counter_offs[0]);
1994 		cmd->ext_csa_switch_count_offset =
1995 				cpu_to_le32(offs->cntdwn_counter_offs[1]);
1996 		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
1997 		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
1998 	}
1999 
2000 	cmd->buf_len = cpu_to_le32(bcn->len);
2001 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
2002 	if (ema_args) {
2003 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
2004 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
2005 		if (ema_args->bcn_index == 0)
2006 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
2007 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
2008 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
2009 		cmd->ema_params = cpu_to_le32(ema_params);
2010 	}
2011 
2012 	ptr = skb->data + sizeof(*cmd);
2013 
2014 	bcn_prb_info = ptr;
2015 	len = sizeof(*bcn_prb_info);
2016 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
2017 							  len);
2018 	bcn_prb_info->caps = 0;
2019 	bcn_prb_info->erp = 0;
2020 
2021 	ptr += sizeof(*bcn_prb_info);
2022 
2023 	tlv = ptr;
2024 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2025 	memcpy(tlv->value, bcn->data, bcn->len);
2026 
2027 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
2028 	if (ret) {
2029 		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
2030 		dev_kfree_skb(skb);
2031 	}
2032 
2033 	return ret;
2034 }
2035 
2036 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
2037 				struct wmi_vdev_install_key_arg *arg)
2038 {
2039 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2040 	struct wmi_vdev_install_key_cmd *cmd;
2041 	struct wmi_tlv *tlv;
2042 	struct sk_buff *skb;
2043 	int ret, len, key_len_aligned;
2044 
2045 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
2046 	 * length is specified in cmd->key_len.
2047 	 */
2048 	key_len_aligned = roundup(arg->key_len, 4);
2049 
2050 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
2051 
2052 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2053 	if (!skb)
2054 		return -ENOMEM;
2055 
2056 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
2057 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
2058 						 sizeof(*cmd));
2059 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2060 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2061 	cmd->key_idx = cpu_to_le32(arg->key_idx);
2062 	cmd->key_flags = cpu_to_le32(arg->key_flags);
2063 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
2064 	cmd->key_len = cpu_to_le32(arg->key_len);
2065 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
2066 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
2067 
2068 	if (arg->key_rsc_counter)
2069 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
2070 
2071 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2072 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
2073 	memcpy(tlv->value, arg->key_data, arg->key_len);
2074 
2075 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2076 		   "WMI vdev install key idx %d cipher %d len %d\n",
2077 		   arg->key_idx, arg->key_cipher, arg->key_len);
2078 
2079 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
2080 	if (ret) {
2081 		ath12k_warn(ar->ab,
2082 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2083 		dev_kfree_skb(skb);
2084 	}
2085 
2086 	return ret;
2087 }
2088 
2089 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2090 				       struct ath12k_wmi_peer_assoc_arg *arg,
2091 				       bool hw_crypto_disabled)
2092 {
2093 	cmd->peer_flags = 0;
2094 	cmd->peer_flags_ext = 0;
2095 
2096 	if (arg->is_wme_set) {
2097 		if (arg->qos_flag)
2098 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2099 		if (arg->apsd_flag)
2100 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2101 		if (arg->ht_flag)
2102 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2103 		if (arg->bw_40)
2104 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2105 		if (arg->bw_80)
2106 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2107 		if (arg->bw_160)
2108 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2109 		if (arg->bw_320)
2110 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2111 
2112 		/* Typically if STBC is enabled for VHT it should be enabled
2113 		 * for HT as well
2114 		 **/
2115 		if (arg->stbc_flag)
2116 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2117 
2118 		/* Typically if LDPC is enabled for VHT it should be enabled
2119 		 * for HT as well
2120 		 **/
2121 		if (arg->ldpc_flag)
2122 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2123 
2124 		if (arg->static_mimops_flag)
2125 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2126 		if (arg->dynamic_mimops_flag)
2127 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2128 		if (arg->spatial_mux_flag)
2129 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2130 		if (arg->vht_flag)
2131 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2132 		if (arg->he_flag)
2133 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2134 		if (arg->twt_requester)
2135 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2136 		if (arg->twt_responder)
2137 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2138 		if (arg->eht_flag)
2139 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2140 	}
2141 
2142 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2143 	 * (during re-association).
2144 	 * Authorization will be done for these modes on key installation.
2145 	 */
2146 	if (arg->auth_flag)
2147 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2148 	if (arg->need_ptk_4_way) {
2149 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2150 		if (!hw_crypto_disabled)
2151 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2152 	}
2153 	if (arg->need_gtk_2_way)
2154 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2155 	/* safe mode bypass the 4-way handshake */
2156 	if (arg->safe_mode_enabled)
2157 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2158 						 WMI_PEER_NEED_GTK_2_WAY));
2159 
2160 	if (arg->is_pmf_enabled)
2161 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2162 
2163 	/* Disable AMSDU for station transmit, if user configures it */
2164 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2165 	 * it
2166 	 * if (arg->amsdu_disable) Add after FW support
2167 	 **/
2168 
2169 	/* Target asserts if node is marked HT and all MCS is set to 0.
2170 	 * Mark the node as non-HT if all the mcs rates are disabled through
2171 	 * iwpriv
2172 	 **/
2173 	if (arg->peer_ht_rates.num_rates == 0)
2174 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2175 }
2176 
2177 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2178 				   struct ath12k_wmi_peer_assoc_arg *arg)
2179 {
2180 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2181 	struct wmi_peer_assoc_complete_cmd *cmd;
2182 	struct ath12k_wmi_vht_rate_set_params *mcs;
2183 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2184 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2185 	struct wmi_peer_assoc_mlo_params *ml_params;
2186 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2187 	struct sk_buff *skb;
2188 	struct wmi_tlv *tlv;
2189 	void *ptr;
2190 	u32 peer_legacy_rates_align;
2191 	u32 peer_ht_rates_align;
2192 	int i, ret, len;
2193 	__le32 v;
2194 
2195 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2196 					  sizeof(u32));
2197 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2198 				      sizeof(u32));
2199 
2200 	len = sizeof(*cmd) +
2201 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2202 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2203 	      sizeof(*mcs) + TLV_HDR_SIZE +
2204 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2205 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2206 
2207 	if (arg->ml.enabled)
2208 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2209 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2210 	else
2211 		len += (2 * TLV_HDR_SIZE);
2212 
2213 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2214 	if (!skb)
2215 		return -ENOMEM;
2216 
2217 	ptr = skb->data;
2218 
2219 	cmd = ptr;
2220 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2221 						 sizeof(*cmd));
2222 
2223 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2224 
2225 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2226 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2227 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2228 
2229 	ath12k_wmi_copy_peer_flags(cmd, arg,
2230 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2231 					    &ar->ab->dev_flags));
2232 
2233 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2234 
2235 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2236 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2237 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2238 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2239 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2240 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2241 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2242 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2243 
2244 	/* Update 11ax capabilities */
2245 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2246 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2247 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2248 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2249 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2250 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2251 		cmd->peer_he_cap_phy[i] =
2252 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2253 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2254 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2255 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2256 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2257 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2258 
2259 	/* Update 11be capabilities */
2260 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2261 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2262 		       0);
2263 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2264 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2265 		       0);
2266 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2267 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2268 
2269 	/* Update peer legacy rate information */
2270 	ptr += sizeof(*cmd);
2271 
2272 	tlv = ptr;
2273 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2274 
2275 	ptr += TLV_HDR_SIZE;
2276 
2277 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2278 	memcpy(ptr, arg->peer_legacy_rates.rates,
2279 	       arg->peer_legacy_rates.num_rates);
2280 
2281 	/* Update peer HT rate information */
2282 	ptr += peer_legacy_rates_align;
2283 
2284 	tlv = ptr;
2285 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2286 	ptr += TLV_HDR_SIZE;
2287 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2288 	memcpy(ptr, arg->peer_ht_rates.rates,
2289 	       arg->peer_ht_rates.num_rates);
2290 
2291 	/* VHT Rates */
2292 	ptr += peer_ht_rates_align;
2293 
2294 	mcs = ptr;
2295 
2296 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2297 						 sizeof(*mcs));
2298 
2299 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2300 
2301 	/* Update bandwidth-NSS mapping */
2302 	cmd->peer_bw_rxnss_override = 0;
2303 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2304 
2305 	if (arg->vht_capable) {
2306 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2307 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2308 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2309 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2310 	}
2311 
2312 	/* HE Rates */
2313 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2314 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2315 
2316 	ptr += sizeof(*mcs);
2317 
2318 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2319 
2320 	tlv = ptr;
2321 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2322 	ptr += TLV_HDR_SIZE;
2323 
2324 	/* Loop through the HE rate set */
2325 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2326 		he_mcs = ptr;
2327 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2328 							    sizeof(*he_mcs));
2329 
2330 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2331 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2332 		ptr += sizeof(*he_mcs);
2333 	}
2334 
2335 	tlv = ptr;
2336 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2337 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2338 	ptr += TLV_HDR_SIZE;
2339 	if (!len)
2340 		goto skip_ml_params;
2341 
2342 	ml_params = ptr;
2343 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2344 						       len);
2345 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2346 
2347 	if (arg->ml.assoc_link)
2348 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2349 
2350 	if (arg->ml.primary_umac)
2351 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2352 
2353 	if (arg->ml.logical_link_idx_valid)
2354 		ml_params->flags |=
2355 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2356 
2357 	if (arg->ml.peer_id_valid)
2358 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2359 
2360 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2361 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2362 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2363 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2364 	ptr += sizeof(*ml_params);
2365 
2366 skip_ml_params:
2367 	/* Loop through the EHT rate set */
2368 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2369 	tlv = ptr;
2370 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2371 	ptr += TLV_HDR_SIZE;
2372 
2373 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2374 		eht_mcs = ptr;
2375 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
2376 							     sizeof(*eht_mcs));
2377 
2378 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2379 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2380 		ptr += sizeof(*eht_mcs);
2381 	}
2382 
2383 	tlv = ptr;
2384 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2385 	/* fill ML Partner links */
2386 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2387 	ptr += TLV_HDR_SIZE;
2388 
2389 	if (len == 0)
2390 		goto send;
2391 
2392 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2393 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2394 
2395 		partner_info = ptr;
2396 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2397 								  sizeof(*partner_info));
2398 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2399 		partner_info->hw_link_id =
2400 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2401 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2402 
2403 		if (arg->ml.partner_info[i].assoc_link)
2404 			partner_info->flags |=
2405 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2406 
2407 		if (arg->ml.partner_info[i].primary_umac)
2408 			partner_info->flags |=
2409 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2410 
2411 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2412 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2413 			partner_info->flags |= v;
2414 		}
2415 
2416 		partner_info->logical_link_idx =
2417 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2418 		ptr += sizeof(*partner_info);
2419 	}
2420 
2421 send:
2422 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2423 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
2424 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2425 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2426 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2427 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2428 		   cmd->peer_mpdu_density,
2429 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2430 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2431 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2432 		   cmd->peer_he_cap_phy[2],
2433 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2434 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2435 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2436 		   cmd->peer_eht_cap_phy[2]);
2437 
2438 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2439 	if (ret) {
2440 		ath12k_warn(ar->ab,
2441 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2442 		dev_kfree_skb(skb);
2443 	}
2444 
2445 	return ret;
2446 }
2447 
2448 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2449 				struct ath12k_wmi_scan_req_arg *arg)
2450 {
2451 	/* setup commonly used values */
2452 	arg->scan_req_id = 1;
2453 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2454 	arg->dwell_time_active = 50;
2455 	arg->dwell_time_active_2g = 0;
2456 	arg->dwell_time_passive = 150;
2457 	arg->dwell_time_active_6g = 70;
2458 	arg->dwell_time_passive_6g = 70;
2459 	arg->min_rest_time = 50;
2460 	arg->max_rest_time = 500;
2461 	arg->repeat_probe_time = 0;
2462 	arg->probe_spacing_time = 0;
2463 	arg->idle_time = 0;
2464 	arg->max_scan_time = 20000;
2465 	arg->probe_delay = 5;
2466 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2467 				  WMI_SCAN_EVENT_COMPLETED |
2468 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2469 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2470 				  WMI_SCAN_EVENT_DEQUEUED;
2471 	arg->scan_f_chan_stat_evnt = 1;
2472 	arg->num_bssid = 1;
2473 
2474 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2475 	 * ZEROs in probe request
2476 	 */
2477 	eth_broadcast_addr(arg->bssid_list[0].addr);
2478 }
2479 
2480 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2481 						   struct ath12k_wmi_scan_req_arg *arg)
2482 {
2483 	/* Scan events subscription */
2484 	if (arg->scan_ev_started)
2485 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2486 	if (arg->scan_ev_completed)
2487 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2488 	if (arg->scan_ev_bss_chan)
2489 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2490 	if (arg->scan_ev_foreign_chan)
2491 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2492 	if (arg->scan_ev_dequeued)
2493 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2494 	if (arg->scan_ev_preempted)
2495 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2496 	if (arg->scan_ev_start_failed)
2497 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2498 	if (arg->scan_ev_restarted)
2499 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2500 	if (arg->scan_ev_foreign_chn_exit)
2501 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2502 	if (arg->scan_ev_suspended)
2503 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2504 	if (arg->scan_ev_resumed)
2505 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2506 
2507 	/** Set scan control flags */
2508 	cmd->scan_ctrl_flags = 0;
2509 	if (arg->scan_f_passive)
2510 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2511 	if (arg->scan_f_strict_passive_pch)
2512 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2513 	if (arg->scan_f_promisc_mode)
2514 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2515 	if (arg->scan_f_capture_phy_err)
2516 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2517 	if (arg->scan_f_half_rate)
2518 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2519 	if (arg->scan_f_quarter_rate)
2520 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2521 	if (arg->scan_f_cck_rates)
2522 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2523 	if (arg->scan_f_ofdm_rates)
2524 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2525 	if (arg->scan_f_chan_stat_evnt)
2526 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2527 	if (arg->scan_f_filter_prb_req)
2528 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2529 	if (arg->scan_f_bcast_probe)
2530 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2531 	if (arg->scan_f_offchan_mgmt_tx)
2532 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2533 	if (arg->scan_f_offchan_data_tx)
2534 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2535 	if (arg->scan_f_force_active_dfs_chn)
2536 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2537 	if (arg->scan_f_add_tpc_ie_in_probe)
2538 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2539 	if (arg->scan_f_add_ds_ie_in_probe)
2540 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2541 	if (arg->scan_f_add_spoofed_mac_in_probe)
2542 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2543 	if (arg->scan_f_add_rand_seq_in_probe)
2544 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2545 	if (arg->scan_f_en_ie_whitelist_in_probe)
2546 		cmd->scan_ctrl_flags |=
2547 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2548 
2549 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2550 						 WMI_SCAN_DWELL_MODE_MASK);
2551 }
2552 
2553 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2554 				   struct ath12k_wmi_scan_req_arg *arg)
2555 {
2556 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2557 	struct wmi_start_scan_cmd *cmd;
2558 	struct ath12k_wmi_ssid_params *ssid = NULL;
2559 	struct ath12k_wmi_mac_addr_params *bssid;
2560 	struct sk_buff *skb;
2561 	struct wmi_tlv *tlv;
2562 	void *ptr;
2563 	int i, ret, len;
2564 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2565 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2566 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2567 
2568 	len = sizeof(*cmd);
2569 
2570 	len += TLV_HDR_SIZE;
2571 	if (arg->num_chan)
2572 		len += arg->num_chan * sizeof(u32);
2573 
2574 	len += TLV_HDR_SIZE;
2575 	if (arg->num_ssids)
2576 		len += arg->num_ssids * sizeof(*ssid);
2577 
2578 	len += TLV_HDR_SIZE;
2579 	if (arg->num_bssid)
2580 		len += sizeof(*bssid) * arg->num_bssid;
2581 
2582 	if (arg->num_hint_bssid)
2583 		len += TLV_HDR_SIZE +
2584 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2585 
2586 	if (arg->num_hint_s_ssid)
2587 		len += TLV_HDR_SIZE +
2588 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2589 
2590 	len += TLV_HDR_SIZE;
2591 	if (arg->extraie.len)
2592 		extraie_len_with_pad =
2593 			roundup(arg->extraie.len, sizeof(u32));
2594 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2595 		len += extraie_len_with_pad;
2596 	} else {
2597 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2598 			    arg->extraie.len);
2599 		extraie_len_with_pad = 0;
2600 	}
2601 
2602 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2603 	if (!skb)
2604 		return -ENOMEM;
2605 
2606 	ptr = skb->data;
2607 
2608 	cmd = ptr;
2609 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2610 						 sizeof(*cmd));
2611 
2612 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2613 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2614 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2615 	if (ar->state_11d == ATH12K_11D_PREPARING)
2616 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
2617 	else
2618 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2619 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2620 
2621 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2622 
2623 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2624 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2625 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2626 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2627 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2628 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2629 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2630 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2631 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2632 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2633 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2634 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2635 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2636 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2637 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2638 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2639 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2640 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2641 
2642 	ptr += sizeof(*cmd);
2643 
2644 	len = arg->num_chan * sizeof(u32);
2645 
2646 	tlv = ptr;
2647 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2648 	ptr += TLV_HDR_SIZE;
2649 	tmp_ptr = (u32 *)ptr;
2650 
2651 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2652 
2653 	ptr += len;
2654 
2655 	len = arg->num_ssids * sizeof(*ssid);
2656 	tlv = ptr;
2657 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2658 
2659 	ptr += TLV_HDR_SIZE;
2660 
2661 	if (arg->num_ssids) {
2662 		ssid = ptr;
2663 		for (i = 0; i < arg->num_ssids; ++i) {
2664 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2665 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2666 			       arg->ssid[i].ssid_len);
2667 			ssid++;
2668 		}
2669 	}
2670 
2671 	ptr += (arg->num_ssids * sizeof(*ssid));
2672 	len = arg->num_bssid * sizeof(*bssid);
2673 	tlv = ptr;
2674 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2675 
2676 	ptr += TLV_HDR_SIZE;
2677 	bssid = ptr;
2678 
2679 	if (arg->num_bssid) {
2680 		for (i = 0; i < arg->num_bssid; ++i) {
2681 			ether_addr_copy(bssid->addr,
2682 					arg->bssid_list[i].addr);
2683 			bssid++;
2684 		}
2685 	}
2686 
2687 	ptr += arg->num_bssid * sizeof(*bssid);
2688 
2689 	len = extraie_len_with_pad;
2690 	tlv = ptr;
2691 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2692 	ptr += TLV_HDR_SIZE;
2693 
2694 	if (extraie_len_with_pad)
2695 		memcpy(ptr, arg->extraie.ptr,
2696 		       arg->extraie.len);
2697 
2698 	ptr += extraie_len_with_pad;
2699 
2700 	if (arg->num_hint_s_ssid) {
2701 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2702 		tlv = ptr;
2703 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2704 		ptr += TLV_HDR_SIZE;
2705 		s_ssid = ptr;
2706 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2707 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2708 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2709 			s_ssid++;
2710 		}
2711 		ptr += len;
2712 	}
2713 
2714 	if (arg->num_hint_bssid) {
2715 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2716 		tlv = ptr;
2717 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2718 		ptr += TLV_HDR_SIZE;
2719 		hint_bssid = ptr;
2720 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2721 			hint_bssid->freq_flags =
2722 				arg->hint_bssid[i].freq_flags;
2723 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2724 					&hint_bssid->bssid.addr[0]);
2725 			hint_bssid++;
2726 		}
2727 	}
2728 
2729 	ret = ath12k_wmi_cmd_send(wmi, skb,
2730 				  WMI_START_SCAN_CMDID);
2731 	if (ret) {
2732 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2733 		dev_kfree_skb(skb);
2734 	}
2735 
2736 	return ret;
2737 }
2738 
2739 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2740 				  struct ath12k_wmi_scan_cancel_arg *arg)
2741 {
2742 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2743 	struct wmi_stop_scan_cmd *cmd;
2744 	struct sk_buff *skb;
2745 	int ret;
2746 
2747 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2748 	if (!skb)
2749 		return -ENOMEM;
2750 
2751 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2752 
2753 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2754 						 sizeof(*cmd));
2755 
2756 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2757 	cmd->requestor = cpu_to_le32(arg->requester);
2758 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2759 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2760 	/* stop the scan with the corresponding scan_id */
2761 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2762 		/* Cancelling all scans */
2763 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2764 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2765 		/* Cancelling VAP scans */
2766 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2767 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2768 		/* Cancelling specific scan */
2769 		cmd->req_type = WMI_SCAN_STOP_ONE;
2770 	} else {
2771 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2772 			    arg->req_type);
2773 		dev_kfree_skb(skb);
2774 		return -EINVAL;
2775 	}
2776 
2777 	ret = ath12k_wmi_cmd_send(wmi, skb,
2778 				  WMI_STOP_SCAN_CMDID);
2779 	if (ret) {
2780 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2781 		dev_kfree_skb(skb);
2782 	}
2783 
2784 	return ret;
2785 }
2786 
2787 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2788 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2789 {
2790 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2791 	struct wmi_scan_chan_list_cmd *cmd;
2792 	struct sk_buff *skb;
2793 	struct ath12k_wmi_channel_params *chan_info;
2794 	struct ath12k_wmi_channel_arg *channel_arg;
2795 	struct wmi_tlv *tlv;
2796 	void *ptr;
2797 	int i, ret, len;
2798 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2799 	__le32 *reg1, *reg2;
2800 
2801 	channel_arg = &arg->channel[0];
2802 	while (arg->nallchans) {
2803 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2804 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2805 			sizeof(*chan_info);
2806 
2807 		num_send_chans = min(arg->nallchans, max_chan_limit);
2808 
2809 		arg->nallchans -= num_send_chans;
2810 		len += sizeof(*chan_info) * num_send_chans;
2811 
2812 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2813 		if (!skb)
2814 			return -ENOMEM;
2815 
2816 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2817 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2818 							 sizeof(*cmd));
2819 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2820 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2821 		if (num_sends)
2822 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2823 
2824 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2825 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2826 			   num_send_chans, len, cmd->pdev_id, num_sends);
2827 
2828 		ptr = skb->data + sizeof(*cmd);
2829 
2830 		len = sizeof(*chan_info) * num_send_chans;
2831 		tlv = ptr;
2832 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2833 						     len);
2834 		ptr += TLV_HDR_SIZE;
2835 
2836 		for (i = 0; i < num_send_chans; ++i) {
2837 			chan_info = ptr;
2838 			memset(chan_info, 0, sizeof(*chan_info));
2839 			len = sizeof(*chan_info);
2840 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2841 								       len);
2842 
2843 			reg1 = &chan_info->reg_info_1;
2844 			reg2 = &chan_info->reg_info_2;
2845 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2846 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2847 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2848 
2849 			if (channel_arg->is_chan_passive)
2850 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2851 			if (channel_arg->allow_he)
2852 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2853 			else if (channel_arg->allow_vht)
2854 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2855 			else if (channel_arg->allow_ht)
2856 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2857 			if (channel_arg->half_rate)
2858 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2859 			if (channel_arg->quarter_rate)
2860 				chan_info->info |=
2861 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2862 
2863 			if (channel_arg->psc_channel)
2864 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2865 
2866 			if (channel_arg->dfs_set)
2867 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2868 
2869 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2870 							    WMI_CHAN_INFO_MODE);
2871 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2872 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2873 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2874 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2875 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2876 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2877 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2878 						  WMI_CHAN_REG_INFO1_REG_CLS);
2879 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2880 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2881 			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
2882 						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
2883 
2884 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2885 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2886 				   i, chan_info->mhz, chan_info->info);
2887 
2888 			ptr += sizeof(*chan_info);
2889 
2890 			channel_arg++;
2891 		}
2892 
2893 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2894 		if (ret) {
2895 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2896 			dev_kfree_skb(skb);
2897 			return ret;
2898 		}
2899 
2900 		num_sends++;
2901 	}
2902 
2903 	return 0;
2904 }
2905 
2906 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2907 				   struct wmi_wmm_params_all_arg *param)
2908 {
2909 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2910 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2911 	struct wmi_wmm_params *wmm_param;
2912 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2913 	struct sk_buff *skb;
2914 	int ret, ac;
2915 
2916 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2917 	if (!skb)
2918 		return -ENOMEM;
2919 
2920 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2921 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2922 						 sizeof(*cmd));
2923 
2924 	cmd->vdev_id = cpu_to_le32(vdev_id);
2925 	cmd->wmm_param_type = 0;
2926 
2927 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2928 		switch (ac) {
2929 		case WME_AC_BE:
2930 			wmi_wmm_arg = &param->ac_be;
2931 			break;
2932 		case WME_AC_BK:
2933 			wmi_wmm_arg = &param->ac_bk;
2934 			break;
2935 		case WME_AC_VI:
2936 			wmi_wmm_arg = &param->ac_vi;
2937 			break;
2938 		case WME_AC_VO:
2939 			wmi_wmm_arg = &param->ac_vo;
2940 			break;
2941 		}
2942 
2943 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2944 		wmm_param->tlv_header =
2945 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2946 					       sizeof(*wmm_param));
2947 
2948 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2949 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2950 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2951 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2952 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2953 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2954 
2955 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2956 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2957 			   ac, wmm_param->aifs, wmm_param->cwmin,
2958 			   wmm_param->cwmax, wmm_param->txoplimit,
2959 			   wmm_param->acm, wmm_param->no_ack);
2960 	}
2961 	ret = ath12k_wmi_cmd_send(wmi, skb,
2962 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2963 	if (ret) {
2964 		ath12k_warn(ar->ab,
2965 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2966 		dev_kfree_skb(skb);
2967 	}
2968 
2969 	return ret;
2970 }
2971 
2972 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2973 						  u32 pdev_id)
2974 {
2975 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2976 	struct wmi_dfs_phyerr_offload_cmd *cmd;
2977 	struct sk_buff *skb;
2978 	int ret;
2979 
2980 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2981 	if (!skb)
2982 		return -ENOMEM;
2983 
2984 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2985 	cmd->tlv_header =
2986 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2987 				       sizeof(*cmd));
2988 
2989 	cmd->pdev_id = cpu_to_le32(pdev_id);
2990 
2991 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2992 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2993 
2994 	ret = ath12k_wmi_cmd_send(wmi, skb,
2995 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2996 	if (ret) {
2997 		ath12k_warn(ar->ab,
2998 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2999 		dev_kfree_skb(skb);
3000 	}
3001 
3002 	return ret;
3003 }
3004 
3005 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
3006 			    const u8 *buf, size_t buf_len)
3007 {
3008 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3009 	struct wmi_pdev_set_bios_interface_cmd *cmd;
3010 	struct wmi_tlv *tlv;
3011 	struct sk_buff *skb;
3012 	u8 *ptr;
3013 	u32 len, len_aligned;
3014 	int ret;
3015 
3016 	len_aligned = roundup(buf_len, sizeof(u32));
3017 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
3018 
3019 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3020 	if (!skb)
3021 		return -ENOMEM;
3022 
3023 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
3024 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
3025 						 sizeof(*cmd));
3026 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3027 	cmd->param_type_id = cpu_to_le32(param_id);
3028 	cmd->length = cpu_to_le32(buf_len);
3029 
3030 	ptr = skb->data + sizeof(*cmd);
3031 	tlv = (struct wmi_tlv *)ptr;
3032 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
3033 	ptr += TLV_HDR_SIZE;
3034 	memcpy(ptr, buf, buf_len);
3035 
3036 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3037 				  skb,
3038 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
3039 	if (ret) {
3040 		ath12k_warn(ab,
3041 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
3042 			    param_id, ret);
3043 		dev_kfree_skb(skb);
3044 	}
3045 
3046 	return 0;
3047 }
3048 
3049 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
3050 {
3051 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3052 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
3053 	struct wmi_tlv *tlv;
3054 	struct sk_buff *skb;
3055 	int ret;
3056 	u8 *buf_ptr;
3057 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
3058 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
3059 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
3060 
3061 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
3062 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
3063 					      sizeof(u32));
3064 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
3065 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
3066 
3067 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3068 	if (!skb)
3069 		return -ENOMEM;
3070 
3071 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
3072 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
3073 						 sizeof(*cmd));
3074 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3075 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3076 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3077 
3078 	buf_ptr = skb->data + sizeof(*cmd);
3079 	tlv = (struct wmi_tlv *)buf_ptr;
3080 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3081 					 sar_table_len_aligned);
3082 	buf_ptr += TLV_HDR_SIZE;
3083 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3084 
3085 	buf_ptr += sar_table_len_aligned;
3086 	tlv = (struct wmi_tlv *)buf_ptr;
3087 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3088 					 sar_dbs_backoff_len_aligned);
3089 	buf_ptr += TLV_HDR_SIZE;
3090 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3091 
3092 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3093 				  skb,
3094 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3095 	if (ret) {
3096 		ath12k_warn(ab,
3097 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3098 			    ret);
3099 		dev_kfree_skb(skb);
3100 	}
3101 
3102 	return ret;
3103 }
3104 
3105 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3106 {
3107 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3108 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3109 	struct wmi_tlv *tlv;
3110 	struct sk_buff *skb;
3111 	int ret;
3112 	u8 *buf_ptr;
3113 	u32 len, sar_geo_len_aligned;
3114 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3115 
3116 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3117 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3118 
3119 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3120 	if (!skb)
3121 		return -ENOMEM;
3122 
3123 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3124 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3125 						 sizeof(*cmd));
3126 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3127 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3128 
3129 	buf_ptr = skb->data + sizeof(*cmd);
3130 	tlv = (struct wmi_tlv *)buf_ptr;
3131 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3132 	buf_ptr += TLV_HDR_SIZE;
3133 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3134 
3135 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3136 				  skb,
3137 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3138 	if (ret) {
3139 		ath12k_warn(ab,
3140 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3141 			    ret);
3142 		dev_kfree_skb(skb);
3143 	}
3144 
3145 	return ret;
3146 }
3147 
3148 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3149 			  u32 tid, u32 initiator, u32 reason)
3150 {
3151 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3152 	struct wmi_delba_send_cmd *cmd;
3153 	struct sk_buff *skb;
3154 	int ret;
3155 
3156 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3157 	if (!skb)
3158 		return -ENOMEM;
3159 
3160 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3161 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3162 						 sizeof(*cmd));
3163 	cmd->vdev_id = cpu_to_le32(vdev_id);
3164 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3165 	cmd->tid = cpu_to_le32(tid);
3166 	cmd->initiator = cpu_to_le32(initiator);
3167 	cmd->reasoncode = cpu_to_le32(reason);
3168 
3169 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3170 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3171 		   vdev_id, mac, tid, initiator, reason);
3172 
3173 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3174 
3175 	if (ret) {
3176 		ath12k_warn(ar->ab,
3177 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3178 		dev_kfree_skb(skb);
3179 	}
3180 
3181 	return ret;
3182 }
3183 
3184 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3185 			      u32 tid, u32 status)
3186 {
3187 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3188 	struct wmi_addba_setresponse_cmd *cmd;
3189 	struct sk_buff *skb;
3190 	int ret;
3191 
3192 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3193 	if (!skb)
3194 		return -ENOMEM;
3195 
3196 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3197 	cmd->tlv_header =
3198 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3199 				       sizeof(*cmd));
3200 	cmd->vdev_id = cpu_to_le32(vdev_id);
3201 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3202 	cmd->tid = cpu_to_le32(tid);
3203 	cmd->statuscode = cpu_to_le32(status);
3204 
3205 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3206 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3207 		   vdev_id, mac, tid, status);
3208 
3209 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3210 
3211 	if (ret) {
3212 		ath12k_warn(ar->ab,
3213 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3214 		dev_kfree_skb(skb);
3215 	}
3216 
3217 	return ret;
3218 }
3219 
3220 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3221 			  u32 tid, u32 buf_size)
3222 {
3223 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3224 	struct wmi_addba_send_cmd *cmd;
3225 	struct sk_buff *skb;
3226 	int ret;
3227 
3228 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3229 	if (!skb)
3230 		return -ENOMEM;
3231 
3232 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3233 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3234 						 sizeof(*cmd));
3235 	cmd->vdev_id = cpu_to_le32(vdev_id);
3236 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3237 	cmd->tid = cpu_to_le32(tid);
3238 	cmd->buffersize = cpu_to_le32(buf_size);
3239 
3240 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3241 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3242 		   vdev_id, mac, tid, buf_size);
3243 
3244 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3245 
3246 	if (ret) {
3247 		ath12k_warn(ar->ab,
3248 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3249 		dev_kfree_skb(skb);
3250 	}
3251 
3252 	return ret;
3253 }
3254 
3255 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3256 {
3257 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3258 	struct wmi_addba_clear_resp_cmd *cmd;
3259 	struct sk_buff *skb;
3260 	int ret;
3261 
3262 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3263 	if (!skb)
3264 		return -ENOMEM;
3265 
3266 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3267 	cmd->tlv_header =
3268 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3269 				       sizeof(*cmd));
3270 	cmd->vdev_id = cpu_to_le32(vdev_id);
3271 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3272 
3273 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3274 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3275 		   vdev_id, mac);
3276 
3277 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3278 
3279 	if (ret) {
3280 		ath12k_warn(ar->ab,
3281 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3282 		dev_kfree_skb(skb);
3283 	}
3284 
3285 	return ret;
3286 }
3287 
3288 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3289 				     struct ath12k_wmi_init_country_arg *arg)
3290 {
3291 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3292 	struct wmi_init_country_cmd *cmd;
3293 	struct sk_buff *skb;
3294 	int ret;
3295 
3296 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3297 	if (!skb)
3298 		return -ENOMEM;
3299 
3300 	cmd = (struct wmi_init_country_cmd *)skb->data;
3301 	cmd->tlv_header =
3302 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3303 				       sizeof(*cmd));
3304 
3305 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3306 
3307 	switch (arg->flags) {
3308 	case ALPHA_IS_SET:
3309 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3310 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3311 		break;
3312 	case CC_IS_SET:
3313 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3314 		cmd->cc_info.country_code =
3315 			cpu_to_le32(arg->cc_info.country_code);
3316 		break;
3317 	case REGDMN_IS_SET:
3318 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3319 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3320 		break;
3321 	default:
3322 		ret = -EINVAL;
3323 		goto out;
3324 	}
3325 
3326 	ret = ath12k_wmi_cmd_send(wmi, skb,
3327 				  WMI_SET_INIT_COUNTRY_CMDID);
3328 
3329 out:
3330 	if (ret) {
3331 		ath12k_warn(ar->ab,
3332 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3333 			    ret);
3334 		dev_kfree_skb(skb);
3335 	}
3336 
3337 	return ret;
3338 }
3339 
3340 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
3341 					    struct wmi_set_current_country_arg *arg)
3342 {
3343 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3344 	struct wmi_set_current_country_cmd *cmd;
3345 	struct sk_buff *skb;
3346 	int ret;
3347 
3348 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3349 	if (!skb)
3350 		return -ENOMEM;
3351 
3352 	cmd = (struct wmi_set_current_country_cmd *)skb->data;
3353 	cmd->tlv_header =
3354 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
3355 				       sizeof(*cmd));
3356 
3357 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3358 	memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
3359 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
3360 
3361 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3362 		   "set current country pdev id %d alpha2 %c%c\n",
3363 		   ar->pdev->pdev_id,
3364 		   arg->alpha2[0],
3365 		   arg->alpha2[1]);
3366 
3367 	if (ret) {
3368 		ath12k_warn(ar->ab,
3369 			    "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
3370 		dev_kfree_skb(skb);
3371 	}
3372 
3373 	return ret;
3374 }
3375 
3376 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
3377 				       struct wmi_11d_scan_start_arg *arg)
3378 {
3379 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3380 	struct wmi_11d_scan_start_cmd *cmd;
3381 	struct sk_buff *skb;
3382 	int ret;
3383 
3384 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3385 	if (!skb)
3386 		return -ENOMEM;
3387 
3388 	cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
3389 	cmd->tlv_header =
3390 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
3391 				       sizeof(*cmd));
3392 
3393 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3394 	cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
3395 	cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
3396 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
3397 
3398 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3399 		   "send 11d scan start vdev id %d period %d ms internal %d ms\n",
3400 		   arg->vdev_id, arg->scan_period_msec,
3401 		   arg->start_interval_msec);
3402 
3403 	if (ret) {
3404 		ath12k_warn(ar->ab,
3405 			    "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
3406 		dev_kfree_skb(skb);
3407 	}
3408 
3409 	return ret;
3410 }
3411 
3412 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
3413 {
3414 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3415 	struct wmi_11d_scan_stop_cmd *cmd;
3416 	struct sk_buff *skb;
3417 	int ret;
3418 
3419 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3420 	if (!skb)
3421 		return -ENOMEM;
3422 
3423 	cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
3424 	cmd->tlv_header =
3425 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
3426 				       sizeof(*cmd));
3427 
3428 	cmd->vdev_id = cpu_to_le32(vdev_id);
3429 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
3430 
3431 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3432 		   "send 11d scan stop vdev id %d\n",
3433 		   cmd->vdev_id);
3434 
3435 	if (ret) {
3436 		ath12k_warn(ar->ab,
3437 			    "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
3438 		dev_kfree_skb(skb);
3439 	}
3440 
3441 	return ret;
3442 }
3443 
3444 int
3445 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3446 {
3447 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3448 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3449 	struct wmi_twt_enable_params_cmd *cmd;
3450 	struct sk_buff *skb;
3451 	int ret, len;
3452 
3453 	len = sizeof(*cmd);
3454 
3455 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3456 	if (!skb)
3457 		return -ENOMEM;
3458 
3459 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3460 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3461 						 len);
3462 	cmd->pdev_id = cpu_to_le32(pdev_id);
3463 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3464 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3465 	cmd->congestion_thresh_setup =
3466 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3467 	cmd->congestion_thresh_teardown =
3468 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3469 	cmd->congestion_thresh_critical =
3470 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3471 	cmd->interference_thresh_teardown =
3472 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3473 	cmd->interference_thresh_setup =
3474 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3475 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3476 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3477 	cmd->no_of_bcast_mcast_slots =
3478 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3479 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3480 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3481 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3482 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3483 	cmd->remove_sta_slot_interval =
3484 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3485 	/* TODO add MBSSID support */
3486 	cmd->mbss_support = 0;
3487 
3488 	ret = ath12k_wmi_cmd_send(wmi, skb,
3489 				  WMI_TWT_ENABLE_CMDID);
3490 	if (ret) {
3491 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3492 		dev_kfree_skb(skb);
3493 	}
3494 	return ret;
3495 }
3496 
3497 int
3498 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3499 {
3500 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3501 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3502 	struct wmi_twt_disable_params_cmd *cmd;
3503 	struct sk_buff *skb;
3504 	int ret, len;
3505 
3506 	len = sizeof(*cmd);
3507 
3508 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3509 	if (!skb)
3510 		return -ENOMEM;
3511 
3512 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3513 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3514 						 len);
3515 	cmd->pdev_id = cpu_to_le32(pdev_id);
3516 
3517 	ret = ath12k_wmi_cmd_send(wmi, skb,
3518 				  WMI_TWT_DISABLE_CMDID);
3519 	if (ret) {
3520 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3521 		dev_kfree_skb(skb);
3522 	}
3523 	return ret;
3524 }
3525 
3526 int
3527 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3528 			     struct ieee80211_he_obss_pd *he_obss_pd)
3529 {
3530 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3531 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3532 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3533 	struct sk_buff *skb;
3534 	int ret, len;
3535 
3536 	len = sizeof(*cmd);
3537 
3538 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3539 	if (!skb)
3540 		return -ENOMEM;
3541 
3542 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3543 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3544 						 len);
3545 	cmd->vdev_id = cpu_to_le32(vdev_id);
3546 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3547 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3548 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3549 
3550 	ret = ath12k_wmi_cmd_send(wmi, skb,
3551 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3552 	if (ret) {
3553 		ath12k_warn(ab,
3554 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3555 		dev_kfree_skb(skb);
3556 	}
3557 	return ret;
3558 }
3559 
3560 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3561 				  u8 bss_color, u32 period,
3562 				  bool enable)
3563 {
3564 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3565 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3566 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3567 	struct sk_buff *skb;
3568 	int ret, len;
3569 
3570 	len = sizeof(*cmd);
3571 
3572 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3573 	if (!skb)
3574 		return -ENOMEM;
3575 
3576 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3577 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3578 						 len);
3579 	cmd->vdev_id = cpu_to_le32(vdev_id);
3580 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3581 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3582 	cmd->current_bss_color = cpu_to_le32(bss_color);
3583 	cmd->detection_period_ms = cpu_to_le32(period);
3584 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3585 	cmd->free_slot_expiry_time_ms = 0;
3586 	cmd->flags = 0;
3587 
3588 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3589 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3590 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3591 		   cmd->detection_period_ms, cmd->scan_period_ms);
3592 
3593 	ret = ath12k_wmi_cmd_send(wmi, skb,
3594 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3595 	if (ret) {
3596 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3597 		dev_kfree_skb(skb);
3598 	}
3599 	return ret;
3600 }
3601 
3602 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3603 						bool enable)
3604 {
3605 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3606 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3607 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3608 	struct sk_buff *skb;
3609 	int ret, len;
3610 
3611 	len = sizeof(*cmd);
3612 
3613 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3614 	if (!skb)
3615 		return -ENOMEM;
3616 
3617 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3618 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3619 						 len);
3620 	cmd->vdev_id = cpu_to_le32(vdev_id);
3621 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3622 
3623 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3624 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3625 		   cmd->vdev_id, cmd->enable);
3626 
3627 	ret = ath12k_wmi_cmd_send(wmi, skb,
3628 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3629 	if (ret) {
3630 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3631 		dev_kfree_skb(skb);
3632 	}
3633 	return ret;
3634 }
3635 
3636 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3637 				   struct sk_buff *tmpl)
3638 {
3639 	struct wmi_tlv *tlv;
3640 	struct sk_buff *skb;
3641 	void *ptr;
3642 	int ret, len;
3643 	size_t aligned_len;
3644 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3645 
3646 	aligned_len = roundup(tmpl->len, 4);
3647 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3648 
3649 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3650 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3651 
3652 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3653 	if (!skb)
3654 		return -ENOMEM;
3655 
3656 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3657 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3658 						 sizeof(*cmd));
3659 	cmd->vdev_id = cpu_to_le32(vdev_id);
3660 	cmd->buf_len = cpu_to_le32(tmpl->len);
3661 	ptr = skb->data + sizeof(*cmd);
3662 
3663 	tlv = ptr;
3664 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3665 	memcpy(tlv->value, tmpl->data, tmpl->len);
3666 
3667 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3668 	if (ret) {
3669 		ath12k_warn(ar->ab,
3670 			    "WMI vdev %i failed to send FILS discovery template command\n",
3671 			    vdev_id);
3672 		dev_kfree_skb(skb);
3673 	}
3674 	return ret;
3675 }
3676 
3677 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3678 			       struct sk_buff *tmpl)
3679 {
3680 	struct wmi_probe_tmpl_cmd *cmd;
3681 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3682 	struct wmi_tlv *tlv;
3683 	struct sk_buff *skb;
3684 	void *ptr;
3685 	int ret, len;
3686 	size_t aligned_len = roundup(tmpl->len, 4);
3687 
3688 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3689 		   "WMI vdev %i set probe response template\n", vdev_id);
3690 
3691 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3692 
3693 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3694 	if (!skb)
3695 		return -ENOMEM;
3696 
3697 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3698 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3699 						 sizeof(*cmd));
3700 	cmd->vdev_id = cpu_to_le32(vdev_id);
3701 	cmd->buf_len = cpu_to_le32(tmpl->len);
3702 
3703 	ptr = skb->data + sizeof(*cmd);
3704 
3705 	probe_info = ptr;
3706 	len = sizeof(*probe_info);
3707 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3708 							len);
3709 	probe_info->caps = 0;
3710 	probe_info->erp = 0;
3711 
3712 	ptr += sizeof(*probe_info);
3713 
3714 	tlv = ptr;
3715 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3716 	memcpy(tlv->value, tmpl->data, tmpl->len);
3717 
3718 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3719 	if (ret) {
3720 		ath12k_warn(ar->ab,
3721 			    "WMI vdev %i failed to send probe response template command\n",
3722 			    vdev_id);
3723 		dev_kfree_skb(skb);
3724 	}
3725 	return ret;
3726 }
3727 
3728 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3729 			      bool unsol_bcast_probe_resp_enabled)
3730 {
3731 	struct sk_buff *skb;
3732 	int ret, len;
3733 	struct wmi_fils_discovery_cmd *cmd;
3734 
3735 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3736 		   "WMI vdev %i set %s interval to %u TU\n",
3737 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3738 		   "unsolicited broadcast probe response" : "FILS discovery",
3739 		   interval);
3740 
3741 	len = sizeof(*cmd);
3742 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3743 	if (!skb)
3744 		return -ENOMEM;
3745 
3746 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3747 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3748 						 len);
3749 	cmd->vdev_id = cpu_to_le32(vdev_id);
3750 	cmd->interval = cpu_to_le32(interval);
3751 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3752 
3753 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3754 	if (ret) {
3755 		ath12k_warn(ar->ab,
3756 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3757 			    vdev_id);
3758 		dev_kfree_skb(skb);
3759 	}
3760 	return ret;
3761 }
3762 
3763 static void
3764 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3765 			      struct ath12k_wmi_pdev_band_arg *arg)
3766 {
3767 	u8 i;
3768 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3769 	struct ath12k_pdev *pdev;
3770 
3771 	for (i = 0; i < soc->num_radios; i++) {
3772 		pdev = &soc->pdevs[i];
3773 		hal_reg_cap = &soc->hal_reg_cap[i];
3774 		arg[i].pdev_id = pdev->pdev_id;
3775 
3776 		switch (pdev->cap.supported_bands) {
3777 		case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
3778 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3779 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3780 			break;
3781 		case WMI_HOST_WLAN_2GHZ_CAP:
3782 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3783 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3784 			break;
3785 		case WMI_HOST_WLAN_5GHZ_CAP:
3786 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3787 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3788 			break;
3789 		default:
3790 			break;
3791 		}
3792 	}
3793 }
3794 
3795 static void
3796 ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
3797 				struct ath12k_wmi_resource_config_params *wmi_cfg,
3798 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3799 {
3800 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3801 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3802 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3803 	wmi_cfg->num_offload_reorder_buffs =
3804 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3805 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3806 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3807 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3808 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3809 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3810 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3811 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3812 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3813 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3814 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3815 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3816 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3817 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3818 	wmi_cfg->roam_offload_max_ap_profiles =
3819 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3820 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3821 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3822 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3823 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3824 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3825 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3826 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3827 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3828 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3829 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3830 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3831 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3832 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3833 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3834 	wmi_cfg->num_tdls_conn_table_entries =
3835 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3836 	wmi_cfg->beacon_tx_offload_max_vdev =
3837 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3838 	wmi_cfg->num_multicast_filter_entries =
3839 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3840 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3841 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3842 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3843 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3844 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3845 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3846 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3847 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3848 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3849 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3850 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3851 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3852 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3853 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3854 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3855 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
3856 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3857 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3858 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3859 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3860 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3861 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3862 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3863 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3864 	if (ab->hw_params->reoq_lut_support)
3865 		wmi_cfg->host_service_flags |=
3866 			cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
3867 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3868 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3869 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3870 }
3871 
3872 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3873 				struct ath12k_wmi_init_cmd_arg *arg)
3874 {
3875 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3876 	struct sk_buff *skb;
3877 	struct wmi_init_cmd *cmd;
3878 	struct ath12k_wmi_resource_config_params *cfg;
3879 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3880 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3881 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3882 	struct wmi_tlv *tlv;
3883 	size_t ret, len;
3884 	void *ptr;
3885 	u32 hw_mode_len = 0;
3886 	u16 idx;
3887 
3888 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3889 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3890 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3891 
3892 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3893 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3894 
3895 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3896 	if (!skb)
3897 		return -ENOMEM;
3898 
3899 	cmd = (struct wmi_init_cmd *)skb->data;
3900 
3901 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3902 						 sizeof(*cmd));
3903 
3904 	ptr = skb->data + sizeof(*cmd);
3905 	cfg = ptr;
3906 
3907 	ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
3908 
3909 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3910 						 sizeof(*cfg));
3911 
3912 	ptr += sizeof(*cfg);
3913 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3914 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3915 
3916 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3917 		host_mem_chunks[idx].tlv_header =
3918 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3919 					   len);
3920 
3921 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3922 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3923 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3924 
3925 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3926 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3927 			   arg->mem_chunks[idx].req_id,
3928 			   (u64)arg->mem_chunks[idx].paddr,
3929 			   arg->mem_chunks[idx].len);
3930 	}
3931 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3932 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3933 
3934 	/* num_mem_chunks is zero */
3935 	tlv = ptr;
3936 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3937 	ptr += TLV_HDR_SIZE + len;
3938 
3939 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3940 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3941 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3942 							     sizeof(*hw_mode));
3943 
3944 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3945 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3946 
3947 		ptr += sizeof(*hw_mode);
3948 
3949 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3950 		tlv = ptr;
3951 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3952 
3953 		ptr += TLV_HDR_SIZE;
3954 		len = sizeof(*band_to_mac);
3955 
3956 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3957 			band_to_mac = (void *)ptr;
3958 
3959 			band_to_mac->tlv_header =
3960 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3961 						       len);
3962 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3963 			band_to_mac->start_freq =
3964 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3965 			band_to_mac->end_freq =
3966 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
3967 			ptr += sizeof(*band_to_mac);
3968 		}
3969 	}
3970 
3971 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3972 	if (ret) {
3973 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3974 		dev_kfree_skb(skb);
3975 	}
3976 
3977 	return ret;
3978 }
3979 
3980 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3981 			    int pdev_id)
3982 {
3983 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3984 	struct sk_buff *skb;
3985 	int ret;
3986 
3987 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3988 	if (!skb)
3989 		return -ENOMEM;
3990 
3991 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3992 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3993 						 sizeof(*cmd));
3994 
3995 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3996 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3997 
3998 	cmd->pdev_id = cpu_to_le32(pdev_id);
3999 
4000 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4001 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
4002 
4003 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
4004 	if (ret) {
4005 		ath12k_warn(ar->ab,
4006 			    "failed to send lro cfg req wmi cmd\n");
4007 		goto err;
4008 	}
4009 
4010 	return 0;
4011 err:
4012 	dev_kfree_skb(skb);
4013 	return ret;
4014 }
4015 
4016 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
4017 {
4018 	unsigned long time_left;
4019 
4020 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
4021 						WMI_SERVICE_READY_TIMEOUT_HZ);
4022 	if (!time_left)
4023 		return -ETIMEDOUT;
4024 
4025 	return 0;
4026 }
4027 
4028 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
4029 {
4030 	unsigned long time_left;
4031 
4032 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
4033 						WMI_SERVICE_READY_TIMEOUT_HZ);
4034 	if (!time_left)
4035 		return -ETIMEDOUT;
4036 
4037 	return 0;
4038 }
4039 
4040 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
4041 			   enum wmi_host_hw_mode_config_type mode)
4042 {
4043 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
4044 	struct sk_buff *skb;
4045 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4046 	int len;
4047 	int ret;
4048 
4049 	len = sizeof(*cmd);
4050 
4051 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
4052 	if (!skb)
4053 		return -ENOMEM;
4054 
4055 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
4056 
4057 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
4058 						 sizeof(*cmd));
4059 
4060 	cmd->pdev_id = WMI_PDEV_ID_SOC;
4061 	cmd->hw_mode_index = cpu_to_le32(mode);
4062 
4063 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
4064 	if (ret) {
4065 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
4066 		dev_kfree_skb(skb);
4067 	}
4068 
4069 	return ret;
4070 }
4071 
4072 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
4073 {
4074 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4075 	struct ath12k_wmi_init_cmd_arg arg = {};
4076 
4077 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
4078 		     ab->wmi_ab.svc_map))
4079 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
4080 
4081 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
4082 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
4083 
4084 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
4085 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
4086 	arg.mem_chunks = wmi_ab->mem_chunks;
4087 
4088 	if (ab->hw_params->single_pdev_only)
4089 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
4090 
4091 	arg.num_band_to_mac = ab->num_radios;
4092 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
4093 
4094 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
4095 
4096 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
4097 }
4098 
4099 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
4100 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
4101 {
4102 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
4103 	struct sk_buff *skb;
4104 	int ret;
4105 
4106 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4107 	if (!skb)
4108 		return -ENOMEM;
4109 
4110 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
4111 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
4112 						 sizeof(*cmd));
4113 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
4114 	cmd->scan_count = cpu_to_le32(arg->scan_count);
4115 	cmd->scan_period = cpu_to_le32(arg->scan_period);
4116 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
4117 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
4118 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
4119 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
4120 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
4121 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
4122 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
4123 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
4124 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
4125 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
4126 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
4127 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
4128 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
4129 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
4130 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
4131 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
4132 
4133 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4134 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
4135 		   arg->vdev_id);
4136 
4137 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4138 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4139 	if (ret) {
4140 		ath12k_warn(ar->ab,
4141 			    "failed to send spectral scan config wmi cmd\n");
4142 		goto err;
4143 	}
4144 
4145 	return 0;
4146 err:
4147 	dev_kfree_skb(skb);
4148 	return ret;
4149 }
4150 
4151 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
4152 				    u32 trigger, u32 enable)
4153 {
4154 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
4155 	struct sk_buff *skb;
4156 	int ret;
4157 
4158 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4159 	if (!skb)
4160 		return -ENOMEM;
4161 
4162 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
4163 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
4164 						 sizeof(*cmd));
4165 
4166 	cmd->vdev_id = cpu_to_le32(vdev_id);
4167 	cmd->trigger_cmd = cpu_to_le32(trigger);
4168 	cmd->enable_cmd = cpu_to_le32(enable);
4169 
4170 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4171 		   "WMI spectral enable cmd vdev id 0x%x\n",
4172 		   vdev_id);
4173 
4174 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4175 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4176 	if (ret) {
4177 		ath12k_warn(ar->ab,
4178 			    "failed to send spectral enable wmi cmd\n");
4179 		goto err;
4180 	}
4181 
4182 	return 0;
4183 err:
4184 	dev_kfree_skb(skb);
4185 	return ret;
4186 }
4187 
4188 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
4189 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
4190 {
4191 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4192 	struct sk_buff *skb;
4193 	int ret;
4194 
4195 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4196 	if (!skb)
4197 		return -ENOMEM;
4198 
4199 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4200 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4201 						 sizeof(*cmd));
4202 
4203 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4204 	cmd->module_id = cpu_to_le32(arg->module_id);
4205 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4206 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4207 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4208 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4209 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4210 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4211 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4212 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4213 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4214 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4215 
4216 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4217 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4218 		   arg->pdev_id);
4219 
4220 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4221 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4222 	if (ret) {
4223 		ath12k_warn(ar->ab,
4224 			    "failed to send dma ring cfg req wmi cmd\n");
4225 		goto err;
4226 	}
4227 
4228 	return 0;
4229 err:
4230 	dev_kfree_skb(skb);
4231 	return ret;
4232 }
4233 
4234 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4235 					  u16 tag, u16 len,
4236 					  const void *ptr, void *data)
4237 {
4238 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4239 
4240 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4241 		return -EPROTO;
4242 
4243 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4244 		return -ENOBUFS;
4245 
4246 	arg->num_buf_entry++;
4247 	return 0;
4248 }
4249 
4250 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4251 					 u16 tag, u16 len,
4252 					 const void *ptr, void *data)
4253 {
4254 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4255 
4256 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4257 		return -EPROTO;
4258 
4259 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4260 		return -ENOBUFS;
4261 
4262 	arg->num_meta++;
4263 
4264 	return 0;
4265 }
4266 
4267 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4268 				    u16 tag, u16 len,
4269 				    const void *ptr, void *data)
4270 {
4271 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4272 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4273 	u32 pdev_id;
4274 	int ret;
4275 
4276 	switch (tag) {
4277 	case WMI_TAG_DMA_BUF_RELEASE:
4278 		fixed = ptr;
4279 		arg->fixed = *fixed;
4280 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4281 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4282 		break;
4283 	case WMI_TAG_ARRAY_STRUCT:
4284 		if (!arg->buf_entry_done) {
4285 			arg->num_buf_entry = 0;
4286 			arg->buf_entry = ptr;
4287 
4288 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4289 						  ath12k_wmi_dma_buf_entry_parse,
4290 						  arg);
4291 			if (ret) {
4292 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4293 					    ret);
4294 				return ret;
4295 			}
4296 
4297 			arg->buf_entry_done = true;
4298 		} else if (!arg->meta_data_done) {
4299 			arg->num_meta = 0;
4300 			arg->meta_data = ptr;
4301 
4302 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4303 						  ath12k_wmi_dma_buf_meta_parse,
4304 						  arg);
4305 			if (ret) {
4306 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4307 					    ret);
4308 				return ret;
4309 			}
4310 
4311 			arg->meta_data_done = true;
4312 		}
4313 		break;
4314 	default:
4315 		break;
4316 	}
4317 	return 0;
4318 }
4319 
4320 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4321 						       struct sk_buff *skb)
4322 {
4323 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4324 	struct ath12k_dbring_buf_release_event param;
4325 	int ret;
4326 
4327 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4328 				  ath12k_wmi_dma_buf_parse,
4329 				  &arg);
4330 	if (ret) {
4331 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4332 		return;
4333 	}
4334 
4335 	param.fixed = arg.fixed;
4336 	param.buf_entry = arg.buf_entry;
4337 	param.num_buf_entry = arg.num_buf_entry;
4338 	param.meta_data = arg.meta_data;
4339 	param.num_meta = arg.num_meta;
4340 
4341 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4342 	if (ret) {
4343 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4344 		return;
4345 	}
4346 }
4347 
4348 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4349 					 u16 tag, u16 len,
4350 					 const void *ptr, void *data)
4351 {
4352 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4353 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4354 	u32 phy_map = 0;
4355 
4356 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4357 		return -EPROTO;
4358 
4359 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4360 		return -ENOBUFS;
4361 
4362 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4363 				   hw_mode_id);
4364 	svc_rdy_ext->n_hw_mode_caps++;
4365 
4366 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4367 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4368 
4369 	return 0;
4370 }
4371 
4372 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4373 				   u16 len, const void *ptr, void *data)
4374 {
4375 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4376 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4377 	enum wmi_host_hw_mode_config_type mode, pref;
4378 	u32 i;
4379 	int ret;
4380 
4381 	svc_rdy_ext->n_hw_mode_caps = 0;
4382 	svc_rdy_ext->hw_mode_caps = ptr;
4383 
4384 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4385 				  ath12k_wmi_hw_mode_caps_parse,
4386 				  svc_rdy_ext);
4387 	if (ret) {
4388 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4389 		return ret;
4390 	}
4391 
4392 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4393 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4394 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4395 
4396 		if (mode >= WMI_HOST_HW_MODE_MAX)
4397 			continue;
4398 
4399 		pref = soc->wmi_ab.preferred_hw_mode;
4400 
4401 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4402 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4403 			soc->wmi_ab.preferred_hw_mode = mode;
4404 		}
4405 	}
4406 
4407 	ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
4408 		   soc->wmi_ab.preferred_hw_mode);
4409 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4410 		return -EINVAL;
4411 
4412 	return 0;
4413 }
4414 
4415 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4416 					 u16 tag, u16 len,
4417 					 const void *ptr, void *data)
4418 {
4419 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4420 
4421 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4422 		return -EPROTO;
4423 
4424 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4425 		return -ENOBUFS;
4426 
4427 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4428 	if (!svc_rdy_ext->n_mac_phy_caps) {
4429 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4430 						    GFP_ATOMIC);
4431 		if (!svc_rdy_ext->mac_phy_caps)
4432 			return -ENOMEM;
4433 	}
4434 
4435 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4436 	svc_rdy_ext->n_mac_phy_caps++;
4437 	return 0;
4438 }
4439 
4440 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4441 					     u16 tag, u16 len,
4442 					     const void *ptr, void *data)
4443 {
4444 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4445 
4446 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4447 		return -EPROTO;
4448 
4449 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4450 		return -ENOBUFS;
4451 
4452 	svc_rdy_ext->n_ext_hal_reg_caps++;
4453 	return 0;
4454 }
4455 
4456 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4457 				       u16 len, const void *ptr, void *data)
4458 {
4459 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4460 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4461 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4462 	int ret;
4463 	u32 i;
4464 
4465 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4466 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4467 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4468 				  ath12k_wmi_ext_hal_reg_caps_parse,
4469 				  svc_rdy_ext);
4470 	if (ret) {
4471 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4472 		return ret;
4473 	}
4474 
4475 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4476 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4477 						      svc_rdy_ext->soc_hal_reg_caps,
4478 						      svc_rdy_ext->ext_hal_reg_caps, i,
4479 						      &reg_cap);
4480 		if (ret) {
4481 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4482 			return ret;
4483 		}
4484 
4485 		if (reg_cap.phy_id >= MAX_RADIOS) {
4486 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4487 			return -EINVAL;
4488 		}
4489 
4490 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4491 	}
4492 	return 0;
4493 }
4494 
4495 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4496 						 u16 len, const void *ptr,
4497 						 void *data)
4498 {
4499 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4500 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4501 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4502 	u32 phy_id_map;
4503 	int pdev_index = 0;
4504 	int ret;
4505 
4506 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4507 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4508 
4509 	soc->num_radios = 0;
4510 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4511 	soc->fw_pdev_count = 0;
4512 
4513 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4514 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4515 							    svc_rdy_ext,
4516 							    hw_mode_id, soc->num_radios,
4517 							    &soc->pdevs[pdev_index]);
4518 		if (ret) {
4519 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4520 				    soc->num_radios);
4521 			return ret;
4522 		}
4523 
4524 		soc->num_radios++;
4525 
4526 		/* For single_pdev_only targets,
4527 		 * save mac_phy capability in the same pdev
4528 		 */
4529 		if (soc->hw_params->single_pdev_only)
4530 			pdev_index = 0;
4531 		else
4532 			pdev_index = soc->num_radios;
4533 
4534 		/* TODO: mac_phy_cap prints */
4535 		phy_id_map >>= 1;
4536 	}
4537 
4538 	if (soc->hw_params->single_pdev_only) {
4539 		soc->num_radios = 1;
4540 		soc->pdevs[0].pdev_id = 0;
4541 	}
4542 
4543 	return 0;
4544 }
4545 
4546 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4547 					  u16 tag, u16 len,
4548 					  const void *ptr, void *data)
4549 {
4550 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4551 
4552 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4553 		return -EPROTO;
4554 
4555 	parse->n_dma_ring_caps++;
4556 	return 0;
4557 }
4558 
4559 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4560 					u32 num_cap)
4561 {
4562 	size_t sz;
4563 	void *ptr;
4564 
4565 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4566 	ptr = kzalloc(sz, GFP_ATOMIC);
4567 	if (!ptr)
4568 		return -ENOMEM;
4569 
4570 	ab->db_caps = ptr;
4571 	ab->num_db_cap = num_cap;
4572 
4573 	return 0;
4574 }
4575 
4576 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4577 {
4578 	kfree(ab->db_caps);
4579 	ab->db_caps = NULL;
4580 	ab->num_db_cap = 0;
4581 }
4582 
4583 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4584 				    u16 len, const void *ptr, void *data)
4585 {
4586 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4587 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4588 	struct ath12k_dbring_cap *dir_buff_caps;
4589 	int ret;
4590 	u32 i;
4591 
4592 	dma_caps_parse->n_dma_ring_caps = 0;
4593 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4594 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4595 				  ath12k_wmi_dma_ring_caps_parse,
4596 				  dma_caps_parse);
4597 	if (ret) {
4598 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4599 		return ret;
4600 	}
4601 
4602 	if (!dma_caps_parse->n_dma_ring_caps)
4603 		return 0;
4604 
4605 	if (ab->num_db_cap) {
4606 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4607 		return 0;
4608 	}
4609 
4610 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4611 	if (ret)
4612 		return ret;
4613 
4614 	dir_buff_caps = ab->db_caps;
4615 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4616 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4617 			ath12k_warn(ab, "Invalid module id %d\n",
4618 				    le32_to_cpu(dma_caps[i].module_id));
4619 			ret = -EINVAL;
4620 			goto free_dir_buff;
4621 		}
4622 
4623 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4624 		dir_buff_caps[i].pdev_id =
4625 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4626 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4627 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4628 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4629 	}
4630 
4631 	return 0;
4632 
4633 free_dir_buff:
4634 	ath12k_wmi_free_dbring_caps(ab);
4635 	return ret;
4636 }
4637 
4638 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4639 					u16 tag, u16 len,
4640 					const void *ptr, void *data)
4641 {
4642 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4643 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4644 	int ret;
4645 
4646 	switch (tag) {
4647 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4648 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4649 						&svc_rdy_ext->arg);
4650 		if (ret) {
4651 			ath12k_warn(ab, "unable to extract ext params\n");
4652 			return ret;
4653 		}
4654 		break;
4655 
4656 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4657 		svc_rdy_ext->hw_caps = ptr;
4658 		svc_rdy_ext->arg.num_hw_modes =
4659 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4660 		break;
4661 
4662 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4663 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4664 							    svc_rdy_ext);
4665 		if (ret)
4666 			return ret;
4667 		break;
4668 
4669 	case WMI_TAG_ARRAY_STRUCT:
4670 		if (!svc_rdy_ext->hw_mode_done) {
4671 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4672 			if (ret)
4673 				return ret;
4674 
4675 			svc_rdy_ext->hw_mode_done = true;
4676 		} else if (!svc_rdy_ext->mac_phy_done) {
4677 			svc_rdy_ext->n_mac_phy_caps = 0;
4678 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4679 						  ath12k_wmi_mac_phy_caps_parse,
4680 						  svc_rdy_ext);
4681 			if (ret) {
4682 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4683 				return ret;
4684 			}
4685 
4686 			svc_rdy_ext->mac_phy_done = true;
4687 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4688 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4689 			if (ret)
4690 				return ret;
4691 
4692 			svc_rdy_ext->ext_hal_reg_done = true;
4693 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4694 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4695 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4696 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4697 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4698 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4699 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4700 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4701 						       &svc_rdy_ext->dma_caps_parse);
4702 			if (ret)
4703 				return ret;
4704 
4705 			svc_rdy_ext->dma_ring_cap_done = true;
4706 		}
4707 		break;
4708 
4709 	default:
4710 		break;
4711 	}
4712 	return 0;
4713 }
4714 
4715 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4716 					  struct sk_buff *skb)
4717 {
4718 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4719 	int ret;
4720 
4721 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4722 				  ath12k_wmi_svc_rdy_ext_parse,
4723 				  &svc_rdy_ext);
4724 	if (ret) {
4725 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4726 		goto err;
4727 	}
4728 
4729 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4730 		complete(&ab->wmi_ab.service_ready);
4731 
4732 	kfree(svc_rdy_ext.mac_phy_caps);
4733 	return 0;
4734 
4735 err:
4736 	ath12k_wmi_free_dbring_caps(ab);
4737 	return ret;
4738 }
4739 
4740 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4741 				      const void *ptr,
4742 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4743 {
4744 	const struct wmi_service_ready_ext2_event *ev = ptr;
4745 
4746 	if (!ev)
4747 		return -EINVAL;
4748 
4749 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4750 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4751 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4752 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4753 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4754 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4755 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4756 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4757 	return 0;
4758 }
4759 
4760 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4761 				      const __le32 cap_mac_info[],
4762 				      const __le32 cap_phy_info[],
4763 				      const __le32 supp_mcs[],
4764 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4765 				       __le32 cap_info_internal)
4766 {
4767 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4768 	u32 support_320mhz;
4769 	u8 i;
4770 
4771 	if (band == NL80211_BAND_6GHZ)
4772 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4773 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4774 
4775 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4776 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4777 
4778 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4779 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4780 
4781 	if (band == NL80211_BAND_6GHZ)
4782 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4783 
4784 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4785 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4786 	if (band != NL80211_BAND_2GHZ) {
4787 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4788 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4789 	}
4790 
4791 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4792 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4793 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4794 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4795 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4796 
4797 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4798 }
4799 
4800 static int
4801 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4802 				      const struct ath12k_wmi_caps_ext_params *caps,
4803 				      struct ath12k_pdev *pdev)
4804 {
4805 	struct ath12k_band_cap *cap_band;
4806 	u32 bands, support_320mhz;
4807 	int i;
4808 
4809 	if (ab->hw_params->single_pdev_only) {
4810 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4811 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4812 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4813 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4814 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4815 			return 0;
4816 		}
4817 
4818 		for (i = 0; i < ab->fw_pdev_count; i++) {
4819 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4820 
4821 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4822 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4823 				bands = fw_pdev->supported_bands;
4824 				break;
4825 			}
4826 		}
4827 
4828 		if (i == ab->fw_pdev_count)
4829 			return -EINVAL;
4830 	} else {
4831 		bands = pdev->cap.supported_bands;
4832 	}
4833 
4834 	if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
4835 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4836 					  caps->eht_cap_mac_info_2ghz,
4837 					  caps->eht_cap_phy_info_2ghz,
4838 					  caps->eht_supp_mcs_ext_2ghz,
4839 					  &caps->eht_ppet_2ghz,
4840 					  caps->eht_cap_info_internal);
4841 	}
4842 
4843 	if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
4844 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4845 					  caps->eht_cap_mac_info_5ghz,
4846 					  caps->eht_cap_phy_info_5ghz,
4847 					  caps->eht_supp_mcs_ext_5ghz,
4848 					  &caps->eht_ppet_5ghz,
4849 					  caps->eht_cap_info_internal);
4850 
4851 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4852 					  caps->eht_cap_mac_info_5ghz,
4853 					  caps->eht_cap_phy_info_5ghz,
4854 					  caps->eht_supp_mcs_ext_5ghz,
4855 					  &caps->eht_ppet_5ghz,
4856 					  caps->eht_cap_info_internal);
4857 	}
4858 
4859 	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
4860 	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
4861 
4862 	return 0;
4863 }
4864 
4865 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4866 					   u16 len, const void *ptr,
4867 					   void *data)
4868 {
4869 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4870 	int i = 0, ret;
4871 
4872 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4873 		return -EPROTO;
4874 
4875 	if (ab->hw_params->single_pdev_only) {
4876 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4877 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4878 			return 0;
4879 	} else {
4880 		for (i = 0; i < ab->num_radios; i++) {
4881 			if (ab->pdevs[i].pdev_id ==
4882 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4883 				break;
4884 		}
4885 
4886 		if (i == ab->num_radios)
4887 			return -EINVAL;
4888 	}
4889 
4890 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4891 	if (ret) {
4892 		ath12k_warn(ab,
4893 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4894 			    ret, ab->pdevs[i].pdev_id);
4895 		return ret;
4896 	}
4897 
4898 	return 0;
4899 }
4900 
4901 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4902 					 u16 tag, u16 len,
4903 					 const void *ptr, void *data)
4904 {
4905 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4906 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4907 	int ret;
4908 
4909 	switch (tag) {
4910 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4911 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4912 						 &parse->arg);
4913 		if (ret) {
4914 			ath12k_warn(ab,
4915 				    "failed to extract wmi service ready ext2 parameters: %d\n",
4916 				    ret);
4917 			return ret;
4918 		}
4919 		break;
4920 
4921 	case WMI_TAG_ARRAY_STRUCT:
4922 		if (!parse->dma_ring_cap_done) {
4923 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4924 						       &parse->dma_caps_parse);
4925 			if (ret)
4926 				return ret;
4927 
4928 			parse->dma_ring_cap_done = true;
4929 		} else if (!parse->spectral_bin_scaling_done) {
4930 			/* TODO: This is a place-holder as WMI tag for
4931 			 * spectral scaling is before
4932 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4933 			 */
4934 			parse->spectral_bin_scaling_done = true;
4935 		} else if (!parse->mac_phy_caps_ext_done) {
4936 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4937 						  ath12k_wmi_tlv_mac_phy_caps_ext,
4938 						  parse);
4939 			if (ret) {
4940 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4941 					    ret);
4942 				return ret;
4943 			}
4944 
4945 			parse->mac_phy_caps_ext_done = true;
4946 		}
4947 		break;
4948 	default:
4949 		break;
4950 	}
4951 
4952 	return 0;
4953 }
4954 
4955 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4956 					   struct sk_buff *skb)
4957 {
4958 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4959 	int ret;
4960 
4961 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4962 				  ath12k_wmi_svc_rdy_ext2_parse,
4963 				  &svc_rdy_ext2);
4964 	if (ret) {
4965 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4966 		goto err;
4967 	}
4968 
4969 	complete(&ab->wmi_ab.service_ready);
4970 
4971 	return 0;
4972 
4973 err:
4974 	ath12k_wmi_free_dbring_caps(ab);
4975 	return ret;
4976 }
4977 
4978 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4979 					   struct wmi_vdev_start_resp_event *vdev_rsp)
4980 {
4981 	const void **tb;
4982 	const struct wmi_vdev_start_resp_event *ev;
4983 	int ret;
4984 
4985 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4986 	if (IS_ERR(tb)) {
4987 		ret = PTR_ERR(tb);
4988 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4989 		return ret;
4990 	}
4991 
4992 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4993 	if (!ev) {
4994 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
4995 		kfree(tb);
4996 		return -EPROTO;
4997 	}
4998 
4999 	*vdev_rsp = *ev;
5000 
5001 	kfree(tb);
5002 	return 0;
5003 }
5004 
5005 static struct ath12k_reg_rule
5006 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
5007 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
5008 {
5009 	struct ath12k_reg_rule *reg_rule_ptr;
5010 	u32 count;
5011 
5012 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
5013 			       GFP_ATOMIC);
5014 
5015 	if (!reg_rule_ptr)
5016 		return NULL;
5017 
5018 	for (count = 0; count < num_reg_rules; count++) {
5019 		reg_rule_ptr[count].start_freq =
5020 			le32_get_bits(wmi_reg_rule[count].freq_info,
5021 				      REG_RULE_START_FREQ);
5022 		reg_rule_ptr[count].end_freq =
5023 			le32_get_bits(wmi_reg_rule[count].freq_info,
5024 				      REG_RULE_END_FREQ);
5025 		reg_rule_ptr[count].max_bw =
5026 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5027 				      REG_RULE_MAX_BW);
5028 		reg_rule_ptr[count].reg_power =
5029 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5030 				      REG_RULE_REG_PWR);
5031 		reg_rule_ptr[count].ant_gain =
5032 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5033 				      REG_RULE_ANT_GAIN);
5034 		reg_rule_ptr[count].flags =
5035 			le32_get_bits(wmi_reg_rule[count].flag_info,
5036 				      REG_RULE_FLAGS);
5037 		reg_rule_ptr[count].psd_flag =
5038 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5039 				      REG_RULE_PSD_INFO);
5040 		reg_rule_ptr[count].psd_eirp =
5041 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5042 				      REG_RULE_PSD_EIRP);
5043 	}
5044 
5045 	return reg_rule_ptr;
5046 }
5047 
5048 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
5049 					    u32 num_reg_rules)
5050 {
5051 	u8 num_invalid_5ghz_rules = 0;
5052 	u32 count, start_freq;
5053 
5054 	for (count = 0; count < num_reg_rules; count++) {
5055 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
5056 
5057 		if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
5058 			num_invalid_5ghz_rules++;
5059 	}
5060 
5061 	return num_invalid_5ghz_rules;
5062 }
5063 
5064 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
5065 						   struct sk_buff *skb,
5066 						   struct ath12k_reg_info *reg_info)
5067 {
5068 	const void **tb;
5069 	const struct wmi_reg_chan_list_cc_ext_event *ev;
5070 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
5071 	u32 num_2g_reg_rules, num_5g_reg_rules;
5072 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
5073 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
5074 	u8 num_invalid_5ghz_ext_rules;
5075 	u32 total_reg_rules = 0;
5076 	int ret, i, j;
5077 
5078 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
5079 
5080 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5081 	if (IS_ERR(tb)) {
5082 		ret = PTR_ERR(tb);
5083 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5084 		return ret;
5085 	}
5086 
5087 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
5088 	if (!ev) {
5089 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
5090 		kfree(tb);
5091 		return -EPROTO;
5092 	}
5093 
5094 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
5095 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
5096 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
5097 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
5098 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
5099 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
5100 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
5101 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
5102 
5103 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5104 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5105 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
5106 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5107 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
5108 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5109 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
5110 	}
5111 
5112 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
5113 	total_reg_rules += num_2g_reg_rules;
5114 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
5115 	total_reg_rules += num_5g_reg_rules;
5116 
5117 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
5118 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
5119 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
5120 		kfree(tb);
5121 		return -EINVAL;
5122 	}
5123 
5124 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5125 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
5126 
5127 		if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
5128 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
5129 				    i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
5130 			kfree(tb);
5131 			return -EINVAL;
5132 		}
5133 
5134 		total_reg_rules += num_6g_reg_rules_ap[i];
5135 	}
5136 
5137 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5138 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5139 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5140 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5141 
5142 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5143 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5144 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5145 
5146 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5147 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5148 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5149 
5150 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
5151 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
5152 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6GHZ_REG_RULES) {
5153 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
5154 				    i);
5155 			kfree(tb);
5156 			return -EINVAL;
5157 		}
5158 	}
5159 
5160 	if (!total_reg_rules) {
5161 		ath12k_warn(ab, "No reg rules available\n");
5162 		kfree(tb);
5163 		return -EINVAL;
5164 	}
5165 
5166 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
5167 
5168 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
5169 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
5170 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
5171 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
5172 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
5173 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
5174 
5175 	switch (le32_to_cpu(ev->status_code)) {
5176 	case WMI_REG_SET_CC_STATUS_PASS:
5177 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
5178 		break;
5179 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
5180 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
5181 		break;
5182 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
5183 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
5184 		break;
5185 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
5186 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
5187 		break;
5188 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
5189 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
5190 		break;
5191 	case WMI_REG_SET_CC_STATUS_FAIL:
5192 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
5193 		break;
5194 	}
5195 
5196 	reg_info->is_ext_reg_event = true;
5197 
5198 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
5199 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
5200 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
5201 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
5202 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
5203 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
5204 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
5205 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
5206 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
5207 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
5208 
5209 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5210 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5211 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
5212 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5213 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
5214 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5215 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
5216 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5217 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
5218 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
5219 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
5220 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
5221 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
5222 	}
5223 
5224 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5225 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
5226 		   __func__, reg_info->alpha2, reg_info->dfs_region,
5227 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
5228 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
5229 		   reg_info->phybitmap);
5230 
5231 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5232 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
5233 		   num_2g_reg_rules, num_5g_reg_rules);
5234 
5235 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5236 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
5237 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
5238 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
5239 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
5240 
5241 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5242 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5243 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
5244 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
5245 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
5246 
5247 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5248 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5249 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
5250 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
5251 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
5252 
5253 	ext_wmi_reg_rule =
5254 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
5255 			+ sizeof(*ev)
5256 			+ sizeof(struct wmi_tlv));
5257 
5258 	if (num_2g_reg_rules) {
5259 		reg_info->reg_rules_2g_ptr =
5260 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
5261 						      ext_wmi_reg_rule);
5262 
5263 		if (!reg_info->reg_rules_2g_ptr) {
5264 			kfree(tb);
5265 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
5266 			return -ENOMEM;
5267 		}
5268 	}
5269 
5270 	ext_wmi_reg_rule += num_2g_reg_rules;
5271 
5272 	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
5273 	 * for few countries along with separate 6 GHz rule.
5274 	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
5275 	 * causes intersect check to be true, and same rules will be
5276 	 * shown multiple times in iw cmd.
5277 	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
5278 	 */
5279 	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
5280 								       num_5g_reg_rules);
5281 
5282 	if (num_invalid_5ghz_ext_rules) {
5283 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5284 			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
5285 			   reg_info->alpha2, reg_info->num_5g_reg_rules,
5286 			   num_invalid_5ghz_ext_rules);
5287 
5288 		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
5289 		reg_info->num_5g_reg_rules = num_5g_reg_rules;
5290 	}
5291 
5292 	if (num_5g_reg_rules) {
5293 		reg_info->reg_rules_5g_ptr =
5294 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
5295 						      ext_wmi_reg_rule);
5296 
5297 		if (!reg_info->reg_rules_5g_ptr) {
5298 			kfree(tb);
5299 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5300 			return -ENOMEM;
5301 		}
5302 	}
5303 
5304 	/* We have adjusted the number of 5 GHz reg rules above. But still those
5305 	 * many rules needs to be adjusted in ext_wmi_reg_rule.
5306 	 *
5307 	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
5308 	 */
5309 	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
5310 
5311 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5312 		reg_info->reg_rules_6g_ap_ptr[i] =
5313 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
5314 						      ext_wmi_reg_rule);
5315 
5316 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
5317 			kfree(tb);
5318 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
5319 			return -ENOMEM;
5320 		}
5321 
5322 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
5323 	}
5324 
5325 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
5326 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5327 			reg_info->reg_rules_6g_client_ptr[j][i] =
5328 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
5329 							      ext_wmi_reg_rule);
5330 
5331 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
5332 				kfree(tb);
5333 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
5334 				return -ENOMEM;
5335 			}
5336 
5337 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
5338 		}
5339 	}
5340 
5341 	reg_info->client_type = le32_to_cpu(ev->client_type);
5342 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
5343 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
5344 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
5345 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
5346 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
5347 		le32_to_cpu(ev->domain_code_6g_ap_sp);
5348 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
5349 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
5350 
5351 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5352 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5353 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5354 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5355 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5356 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5357 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5358 	}
5359 
5360 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5361 
5362 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5363 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
5364 
5365 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5366 
5367 	kfree(tb);
5368 	return 0;
5369 }
5370 
5371 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5372 					struct wmi_peer_delete_resp_event *peer_del_resp)
5373 {
5374 	const void **tb;
5375 	const struct wmi_peer_delete_resp_event *ev;
5376 	int ret;
5377 
5378 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5379 	if (IS_ERR(tb)) {
5380 		ret = PTR_ERR(tb);
5381 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5382 		return ret;
5383 	}
5384 
5385 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5386 	if (!ev) {
5387 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
5388 		kfree(tb);
5389 		return -EPROTO;
5390 	}
5391 
5392 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5393 
5394 	peer_del_resp->vdev_id = ev->vdev_id;
5395 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5396 			ev->peer_macaddr.addr);
5397 
5398 	kfree(tb);
5399 	return 0;
5400 }
5401 
5402 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5403 					struct sk_buff *skb,
5404 					u32 *vdev_id)
5405 {
5406 	const void **tb;
5407 	const struct wmi_vdev_delete_resp_event *ev;
5408 	int ret;
5409 
5410 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5411 	if (IS_ERR(tb)) {
5412 		ret = PTR_ERR(tb);
5413 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5414 		return ret;
5415 	}
5416 
5417 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5418 	if (!ev) {
5419 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5420 		kfree(tb);
5421 		return -EPROTO;
5422 	}
5423 
5424 	*vdev_id = le32_to_cpu(ev->vdev_id);
5425 
5426 	kfree(tb);
5427 	return 0;
5428 }
5429 
5430 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5431 					struct sk_buff *skb,
5432 					u32 *vdev_id, u32 *tx_status)
5433 {
5434 	const void **tb;
5435 	const struct wmi_bcn_tx_status_event *ev;
5436 	int ret;
5437 
5438 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5439 	if (IS_ERR(tb)) {
5440 		ret = PTR_ERR(tb);
5441 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5442 		return ret;
5443 	}
5444 
5445 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
5446 	if (!ev) {
5447 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
5448 		kfree(tb);
5449 		return -EPROTO;
5450 	}
5451 
5452 	*vdev_id = le32_to_cpu(ev->vdev_id);
5453 	*tx_status = le32_to_cpu(ev->tx_status);
5454 
5455 	kfree(tb);
5456 	return 0;
5457 }
5458 
5459 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5460 					      u32 *vdev_id)
5461 {
5462 	const void **tb;
5463 	const struct wmi_vdev_stopped_event *ev;
5464 	int ret;
5465 
5466 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5467 	if (IS_ERR(tb)) {
5468 		ret = PTR_ERR(tb);
5469 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5470 		return ret;
5471 	}
5472 
5473 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
5474 	if (!ev) {
5475 		ath12k_warn(ab, "failed to fetch vdev stop ev");
5476 		kfree(tb);
5477 		return -EPROTO;
5478 	}
5479 
5480 	*vdev_id = le32_to_cpu(ev->vdev_id);
5481 
5482 	kfree(tb);
5483 	return 0;
5484 }
5485 
5486 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
5487 					u16 tag, u16 len,
5488 					const void *ptr, void *data)
5489 {
5490 	struct wmi_tlv_mgmt_rx_parse *parse = data;
5491 
5492 	switch (tag) {
5493 	case WMI_TAG_MGMT_RX_HDR:
5494 		parse->fixed = ptr;
5495 		break;
5496 	case WMI_TAG_ARRAY_BYTE:
5497 		if (!parse->frame_buf_done) {
5498 			parse->frame_buf = ptr;
5499 			parse->frame_buf_done = true;
5500 		}
5501 		break;
5502 	}
5503 	return 0;
5504 }
5505 
5506 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
5507 					  struct sk_buff *skb,
5508 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
5509 {
5510 	struct wmi_tlv_mgmt_rx_parse parse = { };
5511 	const struct ath12k_wmi_mgmt_rx_params *ev;
5512 	const u8 *frame;
5513 	int i, ret;
5514 
5515 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5516 				  ath12k_wmi_tlv_mgmt_rx_parse,
5517 				  &parse);
5518 	if (ret) {
5519 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
5520 		return ret;
5521 	}
5522 
5523 	ev = parse.fixed;
5524 	frame = parse.frame_buf;
5525 
5526 	if (!ev || !frame) {
5527 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
5528 		return -EPROTO;
5529 	}
5530 
5531 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
5532 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
5533 	hdr->channel = le32_to_cpu(ev->channel);
5534 	hdr->snr = le32_to_cpu(ev->snr);
5535 	hdr->rate = le32_to_cpu(ev->rate);
5536 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
5537 	hdr->buf_len = le32_to_cpu(ev->buf_len);
5538 	hdr->status = le32_to_cpu(ev->status);
5539 	hdr->flags = le32_to_cpu(ev->flags);
5540 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
5541 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
5542 
5543 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
5544 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
5545 
5546 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
5547 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
5548 		return -EPROTO;
5549 	}
5550 
5551 	/* shift the sk_buff to point to `frame` */
5552 	skb_trim(skb, 0);
5553 	skb_put(skb, frame - skb->data);
5554 	skb_pull(skb, frame - skb->data);
5555 	skb_put(skb, hdr->buf_len);
5556 
5557 	return 0;
5558 }
5559 
5560 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
5561 				    u32 status)
5562 {
5563 	struct sk_buff *msdu;
5564 	struct ieee80211_tx_info *info;
5565 	struct ath12k_skb_cb *skb_cb;
5566 	int num_mgmt;
5567 
5568 	spin_lock_bh(&ar->txmgmt_idr_lock);
5569 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
5570 
5571 	if (!msdu) {
5572 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
5573 			    desc_id);
5574 		spin_unlock_bh(&ar->txmgmt_idr_lock);
5575 		return -ENOENT;
5576 	}
5577 
5578 	idr_remove(&ar->txmgmt_idr, desc_id);
5579 	spin_unlock_bh(&ar->txmgmt_idr_lock);
5580 
5581 	skb_cb = ATH12K_SKB_CB(msdu);
5582 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
5583 
5584 	info = IEEE80211_SKB_CB(msdu);
5585 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
5586 		info->flags |= IEEE80211_TX_STAT_ACK;
5587 
5588 	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
5589 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
5590 
5591 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
5592 
5593 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5594 
5595 	/* WARN when we received this event without doing any mgmt tx */
5596 	if (num_mgmt < 0)
5597 		WARN_ON_ONCE(1);
5598 
5599 	if (!num_mgmt)
5600 		wake_up(&ar->txmgmt_empty_waitq);
5601 
5602 	return 0;
5603 }
5604 
5605 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
5606 					       struct sk_buff *skb,
5607 					       struct wmi_mgmt_tx_compl_event *param)
5608 {
5609 	const void **tb;
5610 	const struct wmi_mgmt_tx_compl_event *ev;
5611 	int ret;
5612 
5613 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5614 	if (IS_ERR(tb)) {
5615 		ret = PTR_ERR(tb);
5616 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5617 		return ret;
5618 	}
5619 
5620 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
5621 	if (!ev) {
5622 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
5623 		kfree(tb);
5624 		return -EPROTO;
5625 	}
5626 
5627 	param->pdev_id = ev->pdev_id;
5628 	param->desc_id = ev->desc_id;
5629 	param->status = ev->status;
5630 
5631 	kfree(tb);
5632 	return 0;
5633 }
5634 
5635 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
5636 {
5637 	lockdep_assert_held(&ar->data_lock);
5638 
5639 	switch (ar->scan.state) {
5640 	case ATH12K_SCAN_IDLE:
5641 	case ATH12K_SCAN_RUNNING:
5642 	case ATH12K_SCAN_ABORTING:
5643 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5644 			    ath12k_scan_state_str(ar->scan.state),
5645 			    ar->scan.state);
5646 		break;
5647 	case ATH12K_SCAN_STARTING:
5648 		ar->scan.state = ATH12K_SCAN_RUNNING;
5649 
5650 		if (ar->scan.is_roc)
5651 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
5652 
5653 		complete(&ar->scan.started);
5654 		break;
5655 	}
5656 }
5657 
5658 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5659 {
5660 	lockdep_assert_held(&ar->data_lock);
5661 
5662 	switch (ar->scan.state) {
5663 	case ATH12K_SCAN_IDLE:
5664 	case ATH12K_SCAN_RUNNING:
5665 	case ATH12K_SCAN_ABORTING:
5666 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5667 			    ath12k_scan_state_str(ar->scan.state),
5668 			    ar->scan.state);
5669 		break;
5670 	case ATH12K_SCAN_STARTING:
5671 		complete(&ar->scan.started);
5672 		__ath12k_mac_scan_finish(ar);
5673 		break;
5674 	}
5675 }
5676 
5677 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5678 {
5679 	lockdep_assert_held(&ar->data_lock);
5680 
5681 	switch (ar->scan.state) {
5682 	case ATH12K_SCAN_IDLE:
5683 	case ATH12K_SCAN_STARTING:
5684 		/* One suspected reason scan can be completed while starting is
5685 		 * if firmware fails to deliver all scan events to the host,
5686 		 * e.g. when transport pipe is full. This has been observed
5687 		 * with spectral scan phyerr events starving wmi transport
5688 		 * pipe. In such case the "scan completed" event should be (and
5689 		 * is) ignored by the host as it may be just firmware's scan
5690 		 * state machine recovering.
5691 		 */
5692 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5693 			    ath12k_scan_state_str(ar->scan.state),
5694 			    ar->scan.state);
5695 		break;
5696 	case ATH12K_SCAN_RUNNING:
5697 	case ATH12K_SCAN_ABORTING:
5698 		__ath12k_mac_scan_finish(ar);
5699 		break;
5700 	}
5701 }
5702 
5703 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5704 {
5705 	lockdep_assert_held(&ar->data_lock);
5706 
5707 	switch (ar->scan.state) {
5708 	case ATH12K_SCAN_IDLE:
5709 	case ATH12K_SCAN_STARTING:
5710 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5711 			    ath12k_scan_state_str(ar->scan.state),
5712 			    ar->scan.state);
5713 		break;
5714 	case ATH12K_SCAN_RUNNING:
5715 	case ATH12K_SCAN_ABORTING:
5716 		ar->scan_channel = NULL;
5717 		break;
5718 	}
5719 }
5720 
5721 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5722 {
5723 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5724 
5725 	lockdep_assert_held(&ar->data_lock);
5726 
5727 	switch (ar->scan.state) {
5728 	case ATH12K_SCAN_IDLE:
5729 	case ATH12K_SCAN_STARTING:
5730 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5731 			    ath12k_scan_state_str(ar->scan.state),
5732 			    ar->scan.state);
5733 		break;
5734 	case ATH12K_SCAN_RUNNING:
5735 	case ATH12K_SCAN_ABORTING:
5736 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
5737 
5738 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
5739 			complete(&ar->scan.on_channel);
5740 
5741 		break;
5742 	}
5743 }
5744 
5745 static const char *
5746 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5747 			       enum wmi_scan_completion_reason reason)
5748 {
5749 	switch (type) {
5750 	case WMI_SCAN_EVENT_STARTED:
5751 		return "started";
5752 	case WMI_SCAN_EVENT_COMPLETED:
5753 		switch (reason) {
5754 		case WMI_SCAN_REASON_COMPLETED:
5755 			return "completed";
5756 		case WMI_SCAN_REASON_CANCELLED:
5757 			return "completed [cancelled]";
5758 		case WMI_SCAN_REASON_PREEMPTED:
5759 			return "completed [preempted]";
5760 		case WMI_SCAN_REASON_TIMEDOUT:
5761 			return "completed [timedout]";
5762 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
5763 			return "completed [internal err]";
5764 		case WMI_SCAN_REASON_MAX:
5765 			break;
5766 		}
5767 		return "completed [unknown]";
5768 	case WMI_SCAN_EVENT_BSS_CHANNEL:
5769 		return "bss channel";
5770 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5771 		return "foreign channel";
5772 	case WMI_SCAN_EVENT_DEQUEUED:
5773 		return "dequeued";
5774 	case WMI_SCAN_EVENT_PREEMPTED:
5775 		return "preempted";
5776 	case WMI_SCAN_EVENT_START_FAILED:
5777 		return "start failed";
5778 	case WMI_SCAN_EVENT_RESTARTED:
5779 		return "restarted";
5780 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5781 		return "foreign channel exit";
5782 	default:
5783 		return "unknown";
5784 	}
5785 }
5786 
5787 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5788 			       struct wmi_scan_event *scan_evt_param)
5789 {
5790 	const void **tb;
5791 	const struct wmi_scan_event *ev;
5792 	int ret;
5793 
5794 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5795 	if (IS_ERR(tb)) {
5796 		ret = PTR_ERR(tb);
5797 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5798 		return ret;
5799 	}
5800 
5801 	ev = tb[WMI_TAG_SCAN_EVENT];
5802 	if (!ev) {
5803 		ath12k_warn(ab, "failed to fetch scan ev");
5804 		kfree(tb);
5805 		return -EPROTO;
5806 	}
5807 
5808 	scan_evt_param->event_type = ev->event_type;
5809 	scan_evt_param->reason = ev->reason;
5810 	scan_evt_param->channel_freq = ev->channel_freq;
5811 	scan_evt_param->scan_req_id = ev->scan_req_id;
5812 	scan_evt_param->scan_id = ev->scan_id;
5813 	scan_evt_param->vdev_id = ev->vdev_id;
5814 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5815 
5816 	kfree(tb);
5817 	return 0;
5818 }
5819 
5820 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5821 					   struct wmi_peer_sta_kickout_arg *arg)
5822 {
5823 	const void **tb;
5824 	const struct wmi_peer_sta_kickout_event *ev;
5825 	int ret;
5826 
5827 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5828 	if (IS_ERR(tb)) {
5829 		ret = PTR_ERR(tb);
5830 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5831 		return ret;
5832 	}
5833 
5834 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5835 	if (!ev) {
5836 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5837 		kfree(tb);
5838 		return -EPROTO;
5839 	}
5840 
5841 	arg->mac_addr = ev->peer_macaddr.addr;
5842 
5843 	kfree(tb);
5844 	return 0;
5845 }
5846 
5847 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5848 			       struct wmi_roam_event *roam_ev)
5849 {
5850 	const void **tb;
5851 	const struct wmi_roam_event *ev;
5852 	int ret;
5853 
5854 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5855 	if (IS_ERR(tb)) {
5856 		ret = PTR_ERR(tb);
5857 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5858 		return ret;
5859 	}
5860 
5861 	ev = tb[WMI_TAG_ROAM_EVENT];
5862 	if (!ev) {
5863 		ath12k_warn(ab, "failed to fetch roam ev");
5864 		kfree(tb);
5865 		return -EPROTO;
5866 	}
5867 
5868 	roam_ev->vdev_id = ev->vdev_id;
5869 	roam_ev->reason = ev->reason;
5870 	roam_ev->rssi = ev->rssi;
5871 
5872 	kfree(tb);
5873 	return 0;
5874 }
5875 
5876 static int freq_to_idx(struct ath12k *ar, int freq)
5877 {
5878 	struct ieee80211_supported_band *sband;
5879 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5880 	int band, ch, idx = 0;
5881 
5882 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5883 		if (!ar->mac.sbands[band].channels)
5884 			continue;
5885 
5886 		sband = hw->wiphy->bands[band];
5887 		if (!sband)
5888 			continue;
5889 
5890 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
5891 			if (sband->channels[ch].center_freq == freq)
5892 				goto exit;
5893 	}
5894 
5895 exit:
5896 	return idx;
5897 }
5898 
5899 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5900 				    struct wmi_chan_info_event *ch_info_ev)
5901 {
5902 	const void **tb;
5903 	const struct wmi_chan_info_event *ev;
5904 	int ret;
5905 
5906 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5907 	if (IS_ERR(tb)) {
5908 		ret = PTR_ERR(tb);
5909 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5910 		return ret;
5911 	}
5912 
5913 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5914 	if (!ev) {
5915 		ath12k_warn(ab, "failed to fetch chan info ev");
5916 		kfree(tb);
5917 		return -EPROTO;
5918 	}
5919 
5920 	ch_info_ev->err_code = ev->err_code;
5921 	ch_info_ev->freq = ev->freq;
5922 	ch_info_ev->cmd_flags = ev->cmd_flags;
5923 	ch_info_ev->noise_floor = ev->noise_floor;
5924 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
5925 	ch_info_ev->cycle_count = ev->cycle_count;
5926 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5927 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5928 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
5929 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5930 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5931 	ch_info_ev->vdev_id = ev->vdev_id;
5932 
5933 	kfree(tb);
5934 	return 0;
5935 }
5936 
5937 static int
5938 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5939 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5940 {
5941 	const void **tb;
5942 	const struct wmi_pdev_bss_chan_info_event *ev;
5943 	int ret;
5944 
5945 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5946 	if (IS_ERR(tb)) {
5947 		ret = PTR_ERR(tb);
5948 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5949 		return ret;
5950 	}
5951 
5952 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5953 	if (!ev) {
5954 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5955 		kfree(tb);
5956 		return -EPROTO;
5957 	}
5958 
5959 	bss_ch_info_ev->pdev_id = ev->pdev_id;
5960 	bss_ch_info_ev->freq = ev->freq;
5961 	bss_ch_info_ev->noise_floor = ev->noise_floor;
5962 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5963 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5964 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5965 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5966 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5967 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5968 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5969 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5970 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5971 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5972 
5973 	kfree(tb);
5974 	return 0;
5975 }
5976 
5977 static int
5978 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5979 				      struct wmi_vdev_install_key_complete_arg *arg)
5980 {
5981 	const void **tb;
5982 	const struct wmi_vdev_install_key_compl_event *ev;
5983 	int ret;
5984 
5985 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5986 	if (IS_ERR(tb)) {
5987 		ret = PTR_ERR(tb);
5988 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5989 		return ret;
5990 	}
5991 
5992 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5993 	if (!ev) {
5994 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5995 		kfree(tb);
5996 		return -EPROTO;
5997 	}
5998 
5999 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
6000 	arg->macaddr = ev->peer_macaddr.addr;
6001 	arg->key_idx = le32_to_cpu(ev->key_idx);
6002 	arg->key_flags = le32_to_cpu(ev->key_flags);
6003 	arg->status = le32_to_cpu(ev->status);
6004 
6005 	kfree(tb);
6006 	return 0;
6007 }
6008 
6009 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
6010 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
6011 {
6012 	const void **tb;
6013 	const struct wmi_peer_assoc_conf_event *ev;
6014 	int ret;
6015 
6016 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6017 	if (IS_ERR(tb)) {
6018 		ret = PTR_ERR(tb);
6019 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6020 		return ret;
6021 	}
6022 
6023 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
6024 	if (!ev) {
6025 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
6026 		kfree(tb);
6027 		return -EPROTO;
6028 	}
6029 
6030 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
6031 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
6032 
6033 	kfree(tb);
6034 	return 0;
6035 }
6036 
6037 static int
6038 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
6039 			 const struct wmi_pdev_temperature_event *ev)
6040 {
6041 	const void **tb;
6042 	int ret;
6043 
6044 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6045 	if (IS_ERR(tb)) {
6046 		ret = PTR_ERR(tb);
6047 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6048 		return ret;
6049 	}
6050 
6051 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
6052 	if (!ev) {
6053 		ath12k_warn(ab, "failed to fetch pdev temp ev");
6054 		kfree(tb);
6055 		return -EPROTO;
6056 	}
6057 
6058 	kfree(tb);
6059 	return 0;
6060 }
6061 
6062 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
6063 {
6064 	/* try to send pending beacons first. they take priority */
6065 	wake_up(&ab->wmi_ab.tx_credits_wq);
6066 }
6067 
6068 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
6069 {
6070 	const struct wmi_11d_new_cc_event *ev;
6071 	struct ath12k *ar;
6072 	struct ath12k_pdev *pdev;
6073 	const void **tb;
6074 	int ret, i;
6075 
6076 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6077 	if (IS_ERR(tb)) {
6078 		ret = PTR_ERR(tb);
6079 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6080 		return ret;
6081 	}
6082 
6083 	ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
6084 	if (!ev) {
6085 		kfree(tb);
6086 		ath12k_warn(ab, "failed to fetch 11d new cc ev");
6087 		return -EPROTO;
6088 	}
6089 
6090 	spin_lock_bh(&ab->base_lock);
6091 	memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
6092 	spin_unlock_bh(&ab->base_lock);
6093 
6094 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
6095 		   ab->new_alpha2[0],
6096 		   ab->new_alpha2[1]);
6097 
6098 	kfree(tb);
6099 
6100 	for (i = 0; i < ab->num_radios; i++) {
6101 		pdev = &ab->pdevs[i];
6102 		ar = pdev->ar;
6103 		ar->state_11d = ATH12K_11D_IDLE;
6104 		complete(&ar->completed_11d_scan);
6105 	}
6106 
6107 	queue_work(ab->workqueue, &ab->update_11d_work);
6108 
6109 	return 0;
6110 }
6111 
6112 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
6113 				       struct sk_buff *skb)
6114 {
6115 	dev_kfree_skb(skb);
6116 }
6117 
6118 static bool ath12k_reg_is_world_alpha(char *alpha)
6119 {
6120 	if (alpha[0] == '0' && alpha[1] == '0')
6121 		return true;
6122 
6123 	if (alpha[0] == 'n' && alpha[1] == 'a')
6124 		return true;
6125 
6126 	return false;
6127 }
6128 
6129 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
6130 {
6131 	struct ath12k_reg_info *reg_info = NULL;
6132 	struct ieee80211_regdomain *regd = NULL;
6133 	bool intersect = false;
6134 	int ret = 0, pdev_idx, i, j;
6135 	struct ath12k *ar;
6136 
6137 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
6138 	if (!reg_info) {
6139 		ret = -ENOMEM;
6140 		goto fallback;
6141 	}
6142 
6143 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
6144 
6145 	if (ret) {
6146 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
6147 		goto fallback;
6148 	}
6149 
6150 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
6151 		/* In case of failure to set the requested ctry,
6152 		 * fw retains the current regd. We print a failure info
6153 		 * and return from here.
6154 		 */
6155 		ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
6156 		goto mem_free;
6157 	}
6158 
6159 	pdev_idx = reg_info->phy_id;
6160 
6161 	if (pdev_idx >= ab->num_radios) {
6162 		/* Process the event for phy0 only if single_pdev_only
6163 		 * is true. If pdev_idx is valid but not 0, discard the
6164 		 * event. Otherwise, it goes to fallback.
6165 		 */
6166 		if (ab->hw_params->single_pdev_only &&
6167 		    pdev_idx < ab->hw_params->num_rxdma_per_pdev)
6168 			goto mem_free;
6169 		else
6170 			goto fallback;
6171 	}
6172 
6173 	/* Avoid multiple overwrites to default regd, during core
6174 	 * stop-start after mac registration.
6175 	 */
6176 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
6177 	    !memcmp(ab->default_regd[pdev_idx]->alpha2,
6178 		    reg_info->alpha2, 2))
6179 		goto mem_free;
6180 
6181 	/* Intersect new rules with default regd if a new country setting was
6182 	 * requested, i.e a default regd was already set during initialization
6183 	 * and the regd coming from this event has a valid country info.
6184 	 */
6185 	if (ab->default_regd[pdev_idx] &&
6186 	    !ath12k_reg_is_world_alpha((char *)
6187 		ab->default_regd[pdev_idx]->alpha2) &&
6188 	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
6189 		intersect = true;
6190 
6191 	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
6192 	if (!regd) {
6193 		ath12k_warn(ab, "failed to build regd from reg_info\n");
6194 		goto fallback;
6195 	}
6196 
6197 	spin_lock(&ab->base_lock);
6198 	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
6199 		/* Once mac is registered, ar is valid and all CC events from
6200 		 * fw is considered to be received due to user requests
6201 		 * currently.
6202 		 * Free previously built regd before assigning the newly
6203 		 * generated regd to ar. NULL pointer handling will be
6204 		 * taken care by kfree itself.
6205 		 */
6206 		ar = ab->pdevs[pdev_idx].ar;
6207 		kfree(ab->new_regd[pdev_idx]);
6208 		ab->new_regd[pdev_idx] = regd;
6209 		queue_work(ab->workqueue, &ar->regd_update_work);
6210 	} else {
6211 		/* Multiple events for the same *ar is not expected. But we
6212 		 * can still clear any previously stored default_regd if we
6213 		 * are receiving this event for the same radio by mistake.
6214 		 * NULL pointer handling will be taken care by kfree itself.
6215 		 */
6216 		kfree(ab->default_regd[pdev_idx]);
6217 		/* This regd would be applied during mac registration */
6218 		ab->default_regd[pdev_idx] = regd;
6219 	}
6220 	ab->dfs_region = reg_info->dfs_region;
6221 	spin_unlock(&ab->base_lock);
6222 
6223 	goto mem_free;
6224 
6225 fallback:
6226 	/* Fallback to older reg (by sending previous country setting
6227 	 * again if fw has succeeded and we failed to process here.
6228 	 * The Regdomain should be uniform across driver and fw. Since the
6229 	 * FW has processed the command and sent a success status, we expect
6230 	 * this function to succeed as well. If it doesn't, CTRY needs to be
6231 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
6232 	 */
6233 	/* TODO: This is rare, but still should also be handled */
6234 	WARN_ON(1);
6235 mem_free:
6236 	if (reg_info) {
6237 		kfree(reg_info->reg_rules_2g_ptr);
6238 		kfree(reg_info->reg_rules_5g_ptr);
6239 		if (reg_info->is_ext_reg_event) {
6240 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
6241 				kfree(reg_info->reg_rules_6g_ap_ptr[i]);
6242 
6243 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
6244 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
6245 					kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
6246 		}
6247 		kfree(reg_info);
6248 	}
6249 	return ret;
6250 }
6251 
6252 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
6253 				const void *ptr, void *data)
6254 {
6255 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
6256 	struct wmi_ready_event fixed_param;
6257 	struct ath12k_wmi_mac_addr_params *addr_list;
6258 	struct ath12k_pdev *pdev;
6259 	u32 num_mac_addr;
6260 	int i;
6261 
6262 	switch (tag) {
6263 	case WMI_TAG_READY_EVENT:
6264 		memset(&fixed_param, 0, sizeof(fixed_param));
6265 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
6266 		       min_t(u16, sizeof(fixed_param), len));
6267 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
6268 		rdy_parse->num_extra_mac_addr =
6269 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
6270 
6271 		ether_addr_copy(ab->mac_addr,
6272 				fixed_param.ready_event_min.mac_addr.addr);
6273 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
6274 		ab->wmi_ready = true;
6275 		break;
6276 	case WMI_TAG_ARRAY_FIXED_STRUCT:
6277 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
6278 		num_mac_addr = rdy_parse->num_extra_mac_addr;
6279 
6280 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6281 			break;
6282 
6283 		for (i = 0; i < ab->num_radios; i++) {
6284 			pdev = &ab->pdevs[i];
6285 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6286 		}
6287 		ab->pdevs_macaddr_valid = true;
6288 		break;
6289 	default:
6290 		break;
6291 	}
6292 
6293 	return 0;
6294 }
6295 
6296 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
6297 {
6298 	struct ath12k_wmi_rdy_parse rdy_parse = { };
6299 	int ret;
6300 
6301 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6302 				  ath12k_wmi_rdy_parse, &rdy_parse);
6303 	if (ret) {
6304 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
6305 		return ret;
6306 	}
6307 
6308 	complete(&ab->wmi_ab.unified_ready);
6309 	return 0;
6310 }
6311 
6312 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6313 {
6314 	struct wmi_peer_delete_resp_event peer_del_resp;
6315 	struct ath12k *ar;
6316 
6317 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6318 		ath12k_warn(ab, "failed to extract peer delete resp");
6319 		return;
6320 	}
6321 
6322 	rcu_read_lock();
6323 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
6324 	if (!ar) {
6325 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6326 			    peer_del_resp.vdev_id);
6327 		rcu_read_unlock();
6328 		return;
6329 	}
6330 
6331 	complete(&ar->peer_delete_done);
6332 	rcu_read_unlock();
6333 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6334 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6335 }
6336 
6337 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
6338 					  struct sk_buff *skb)
6339 {
6340 	struct ath12k *ar;
6341 	u32 vdev_id = 0;
6342 
6343 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6344 		ath12k_warn(ab, "failed to extract vdev delete resp");
6345 		return;
6346 	}
6347 
6348 	rcu_read_lock();
6349 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6350 	if (!ar) {
6351 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6352 			    vdev_id);
6353 		rcu_read_unlock();
6354 		return;
6355 	}
6356 
6357 	complete(&ar->vdev_delete_done);
6358 
6359 	rcu_read_unlock();
6360 
6361 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6362 		   vdev_id);
6363 }
6364 
6365 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
6366 {
6367 	switch (vdev_resp_status) {
6368 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6369 		return "invalid vdev id";
6370 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6371 		return "not supported";
6372 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6373 		return "dfs violation";
6374 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6375 		return "invalid regdomain";
6376 	default:
6377 		return "unknown";
6378 	}
6379 }
6380 
6381 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6382 {
6383 	struct wmi_vdev_start_resp_event vdev_start_resp;
6384 	struct ath12k *ar;
6385 	u32 status;
6386 
6387 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6388 		ath12k_warn(ab, "failed to extract vdev start resp");
6389 		return;
6390 	}
6391 
6392 	rcu_read_lock();
6393 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
6394 	if (!ar) {
6395 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6396 			    vdev_start_resp.vdev_id);
6397 		rcu_read_unlock();
6398 		return;
6399 	}
6400 
6401 	ar->last_wmi_vdev_start_status = 0;
6402 
6403 	status = le32_to_cpu(vdev_start_resp.status);
6404 
6405 	if (WARN_ON_ONCE(status)) {
6406 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6407 			    status, ath12k_wmi_vdev_resp_print(status));
6408 		ar->last_wmi_vdev_start_status = status;
6409 	}
6410 
6411 	complete(&ar->vdev_setup_done);
6412 
6413 	rcu_read_unlock();
6414 
6415 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6416 		   vdev_start_resp.vdev_id);
6417 }
6418 
6419 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6420 {
6421 	u32 vdev_id, tx_status;
6422 
6423 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6424 		ath12k_warn(ab, "failed to extract bcn tx status");
6425 		return;
6426 	}
6427 }
6428 
6429 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6430 {
6431 	struct ath12k *ar;
6432 	u32 vdev_id = 0;
6433 
6434 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6435 		ath12k_warn(ab, "failed to extract vdev stopped event");
6436 		return;
6437 	}
6438 
6439 	rcu_read_lock();
6440 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6441 	if (!ar) {
6442 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6443 			    vdev_id);
6444 		rcu_read_unlock();
6445 		return;
6446 	}
6447 
6448 	complete(&ar->vdev_setup_done);
6449 
6450 	rcu_read_unlock();
6451 
6452 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6453 }
6454 
6455 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6456 {
6457 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6458 	struct ath12k *ar;
6459 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6460 	struct ieee80211_hdr *hdr;
6461 	u16 fc;
6462 	struct ieee80211_supported_band *sband;
6463 
6464 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6465 		ath12k_warn(ab, "failed to extract mgmt rx event");
6466 		dev_kfree_skb(skb);
6467 		return;
6468 	}
6469 
6470 	memset(status, 0, sizeof(*status));
6471 
6472 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
6473 		   rx_ev.status);
6474 
6475 	rcu_read_lock();
6476 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
6477 
6478 	if (!ar) {
6479 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
6480 			    rx_ev.pdev_id);
6481 		dev_kfree_skb(skb);
6482 		goto exit;
6483 	}
6484 
6485 	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
6486 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
6487 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
6488 			     WMI_RX_STATUS_ERR_CRC))) {
6489 		dev_kfree_skb(skb);
6490 		goto exit;
6491 	}
6492 
6493 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
6494 		status->flag |= RX_FLAG_MMIC_ERROR;
6495 
6496 	if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
6497 	    rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
6498 		status->band = NL80211_BAND_6GHZ;
6499 		status->freq = rx_ev.chan_freq;
6500 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
6501 		status->band = NL80211_BAND_2GHZ;
6502 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
6503 		status->band = NL80211_BAND_5GHZ;
6504 	} else {
6505 		/* Shouldn't happen unless list of advertised channels to
6506 		 * mac80211 has been changed.
6507 		 */
6508 		WARN_ON_ONCE(1);
6509 		dev_kfree_skb(skb);
6510 		goto exit;
6511 	}
6512 
6513 	if (rx_ev.phy_mode == MODE_11B &&
6514 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
6515 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6516 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
6517 
6518 	sband = &ar->mac.sbands[status->band];
6519 
6520 	if (status->band != NL80211_BAND_6GHZ)
6521 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
6522 							      status->band);
6523 
6524 	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
6525 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
6526 
6527 	hdr = (struct ieee80211_hdr *)skb->data;
6528 	fc = le16_to_cpu(hdr->frame_control);
6529 
6530 	/* Firmware is guaranteed to report all essential management frames via
6531 	 * WMI while it can deliver some extra via HTT. Since there can be
6532 	 * duplicates split the reporting wrt monitor/sniffing.
6533 	 */
6534 	status->flag |= RX_FLAG_SKIP_MONITOR;
6535 
6536 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
6537 	 * including group privacy action frames.
6538 	 */
6539 	if (ieee80211_has_protected(hdr->frame_control)) {
6540 		status->flag |= RX_FLAG_DECRYPTED;
6541 
6542 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
6543 			status->flag |= RX_FLAG_IV_STRIPPED |
6544 					RX_FLAG_MMIC_STRIPPED;
6545 			hdr->frame_control = __cpu_to_le16(fc &
6546 					     ~IEEE80211_FCTL_PROTECTED);
6547 		}
6548 	}
6549 
6550 	if (ieee80211_is_beacon(hdr->frame_control))
6551 		ath12k_mac_handle_beacon(ar, skb);
6552 
6553 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6554 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
6555 		   skb, skb->len,
6556 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
6557 
6558 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6559 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
6560 		   status->freq, status->band, status->signal,
6561 		   status->rate_idx);
6562 
6563 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
6564 
6565 exit:
6566 	rcu_read_unlock();
6567 }
6568 
6569 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
6570 {
6571 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
6572 	struct ath12k *ar;
6573 
6574 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
6575 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
6576 		return;
6577 	}
6578 
6579 	rcu_read_lock();
6580 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
6581 	if (!ar) {
6582 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
6583 			    tx_compl_param.pdev_id);
6584 		goto exit;
6585 	}
6586 
6587 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
6588 				 le32_to_cpu(tx_compl_param.status));
6589 
6590 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6591 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6592 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
6593 		   tx_compl_param.status);
6594 
6595 exit:
6596 	rcu_read_unlock();
6597 }
6598 
6599 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
6600 						  u32 vdev_id,
6601 						  enum ath12k_scan_state state)
6602 {
6603 	int i;
6604 	struct ath12k_pdev *pdev;
6605 	struct ath12k *ar;
6606 
6607 	for (i = 0; i < ab->num_radios; i++) {
6608 		pdev = rcu_dereference(ab->pdevs_active[i]);
6609 		if (pdev && pdev->ar) {
6610 			ar = pdev->ar;
6611 
6612 			spin_lock_bh(&ar->data_lock);
6613 			if (ar->scan.state == state &&
6614 			    ar->scan.arvif &&
6615 			    ar->scan.arvif->vdev_id == vdev_id) {
6616 				spin_unlock_bh(&ar->data_lock);
6617 				return ar;
6618 			}
6619 			spin_unlock_bh(&ar->data_lock);
6620 		}
6621 	}
6622 	return NULL;
6623 }
6624 
6625 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
6626 {
6627 	struct ath12k *ar;
6628 	struct wmi_scan_event scan_ev = {0};
6629 
6630 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
6631 		ath12k_warn(ab, "failed to extract scan event");
6632 		return;
6633 	}
6634 
6635 	rcu_read_lock();
6636 
6637 	/* In case the scan was cancelled, ex. during interface teardown,
6638 	 * the interface will not be found in active interfaces.
6639 	 * Rather, in such scenarios, iterate over the active pdev's to
6640 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6641 	 * aborting scan's vdev id matches this event info.
6642 	 */
6643 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
6644 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
6645 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6646 						 ATH12K_SCAN_ABORTING);
6647 		if (!ar)
6648 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6649 							 ATH12K_SCAN_RUNNING);
6650 	} else {
6651 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
6652 	}
6653 
6654 	if (!ar) {
6655 		ath12k_warn(ab, "Received scan event for unknown vdev");
6656 		rcu_read_unlock();
6657 		return;
6658 	}
6659 
6660 	spin_lock_bh(&ar->data_lock);
6661 
6662 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6663 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
6664 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
6665 						  le32_to_cpu(scan_ev.reason)),
6666 		   le32_to_cpu(scan_ev.event_type),
6667 		   le32_to_cpu(scan_ev.reason),
6668 		   le32_to_cpu(scan_ev.channel_freq),
6669 		   le32_to_cpu(scan_ev.scan_req_id),
6670 		   le32_to_cpu(scan_ev.scan_id),
6671 		   le32_to_cpu(scan_ev.vdev_id),
6672 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
6673 
6674 	switch (le32_to_cpu(scan_ev.event_type)) {
6675 	case WMI_SCAN_EVENT_STARTED:
6676 		ath12k_wmi_event_scan_started(ar);
6677 		break;
6678 	case WMI_SCAN_EVENT_COMPLETED:
6679 		ath12k_wmi_event_scan_completed(ar);
6680 		break;
6681 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6682 		ath12k_wmi_event_scan_bss_chan(ar);
6683 		break;
6684 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6685 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
6686 		break;
6687 	case WMI_SCAN_EVENT_START_FAILED:
6688 		ath12k_warn(ab, "received scan start failure event\n");
6689 		ath12k_wmi_event_scan_start_failed(ar);
6690 		break;
6691 	case WMI_SCAN_EVENT_DEQUEUED:
6692 		__ath12k_mac_scan_finish(ar);
6693 		break;
6694 	case WMI_SCAN_EVENT_PREEMPTED:
6695 	case WMI_SCAN_EVENT_RESTARTED:
6696 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6697 	default:
6698 		break;
6699 	}
6700 
6701 	spin_unlock_bh(&ar->data_lock);
6702 
6703 	rcu_read_unlock();
6704 }
6705 
6706 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6707 {
6708 	struct wmi_peer_sta_kickout_arg arg = {};
6709 	struct ieee80211_sta *sta;
6710 	struct ath12k_peer *peer;
6711 	struct ath12k *ar;
6712 
6713 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6714 		ath12k_warn(ab, "failed to extract peer sta kickout event");
6715 		return;
6716 	}
6717 
6718 	rcu_read_lock();
6719 
6720 	spin_lock_bh(&ab->base_lock);
6721 
6722 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6723 
6724 	if (!peer) {
6725 		ath12k_warn(ab, "peer not found %pM\n",
6726 			    arg.mac_addr);
6727 		goto exit;
6728 	}
6729 
6730 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6731 	if (!ar) {
6732 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6733 			    peer->vdev_id);
6734 		goto exit;
6735 	}
6736 
6737 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
6738 					   arg.mac_addr, NULL);
6739 	if (!sta) {
6740 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6741 			    arg.mac_addr);
6742 		goto exit;
6743 	}
6744 
6745 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6746 		   arg.mac_addr);
6747 
6748 	ieee80211_report_low_ack(sta, 10);
6749 
6750 exit:
6751 	spin_unlock_bh(&ab->base_lock);
6752 	rcu_read_unlock();
6753 }
6754 
6755 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6756 {
6757 	struct wmi_roam_event roam_ev = {};
6758 	struct ath12k *ar;
6759 	u32 vdev_id;
6760 	u8 roam_reason;
6761 
6762 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6763 		ath12k_warn(ab, "failed to extract roam event");
6764 		return;
6765 	}
6766 
6767 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
6768 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
6769 				   WMI_ROAM_REASON_MASK);
6770 
6771 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6772 		   "wmi roam event vdev %u reason %d rssi %d\n",
6773 		   vdev_id, roam_reason, roam_ev.rssi);
6774 
6775 	rcu_read_lock();
6776 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6777 	if (!ar) {
6778 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
6779 		rcu_read_unlock();
6780 		return;
6781 	}
6782 
6783 	if (roam_reason >= WMI_ROAM_REASON_MAX)
6784 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6785 			    roam_reason, vdev_id);
6786 
6787 	switch (roam_reason) {
6788 	case WMI_ROAM_REASON_BEACON_MISS:
6789 		ath12k_mac_handle_beacon_miss(ar, vdev_id);
6790 		break;
6791 	case WMI_ROAM_REASON_BETTER_AP:
6792 	case WMI_ROAM_REASON_LOW_RSSI:
6793 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6794 	case WMI_ROAM_REASON_HO_FAILED:
6795 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6796 			    roam_reason, vdev_id);
6797 		break;
6798 	}
6799 
6800 	rcu_read_unlock();
6801 }
6802 
6803 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6804 {
6805 	struct wmi_chan_info_event ch_info_ev = {0};
6806 	struct ath12k *ar;
6807 	struct survey_info *survey;
6808 	int idx;
6809 	/* HW channel counters frequency value in hertz */
6810 	u32 cc_freq_hz = ab->cc_freq_hz;
6811 
6812 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
6813 		ath12k_warn(ab, "failed to extract chan info event");
6814 		return;
6815 	}
6816 
6817 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6818 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6819 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6820 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6821 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6822 		   ch_info_ev.mac_clk_mhz);
6823 
6824 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6825 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6826 		return;
6827 	}
6828 
6829 	rcu_read_lock();
6830 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6831 	if (!ar) {
6832 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6833 			    ch_info_ev.vdev_id);
6834 		rcu_read_unlock();
6835 		return;
6836 	}
6837 	spin_lock_bh(&ar->data_lock);
6838 
6839 	switch (ar->scan.state) {
6840 	case ATH12K_SCAN_IDLE:
6841 	case ATH12K_SCAN_STARTING:
6842 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6843 		goto exit;
6844 	case ATH12K_SCAN_RUNNING:
6845 	case ATH12K_SCAN_ABORTING:
6846 		break;
6847 	}
6848 
6849 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6850 	if (idx >= ARRAY_SIZE(ar->survey)) {
6851 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6852 			    ch_info_ev.freq, idx);
6853 		goto exit;
6854 	}
6855 
6856 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
6857 	 * HW channel counters frequency value
6858 	 */
6859 	if (ch_info_ev.mac_clk_mhz)
6860 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6861 
6862 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6863 		survey = &ar->survey[idx];
6864 		memset(survey, 0, sizeof(*survey));
6865 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6866 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6867 				 SURVEY_INFO_TIME_BUSY;
6868 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6869 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6870 					    cc_freq_hz);
6871 	}
6872 exit:
6873 	spin_unlock_bh(&ar->data_lock);
6874 	rcu_read_unlock();
6875 }
6876 
6877 static void
6878 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6879 {
6880 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6881 	struct survey_info *survey;
6882 	struct ath12k *ar;
6883 	u32 cc_freq_hz = ab->cc_freq_hz;
6884 	u64 busy, total, tx, rx, rx_bss;
6885 	int idx;
6886 
6887 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6888 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
6889 		return;
6890 	}
6891 
6892 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6893 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6894 
6895 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6896 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6897 
6898 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6899 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6900 
6901 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6902 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6903 
6904 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6905 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6906 
6907 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6908 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6909 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6910 		   bss_ch_info_ev.noise_floor, busy, total,
6911 		   tx, rx, rx_bss);
6912 
6913 	rcu_read_lock();
6914 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6915 
6916 	if (!ar) {
6917 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6918 			    bss_ch_info_ev.pdev_id);
6919 		rcu_read_unlock();
6920 		return;
6921 	}
6922 
6923 	spin_lock_bh(&ar->data_lock);
6924 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6925 	if (idx >= ARRAY_SIZE(ar->survey)) {
6926 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6927 			    bss_ch_info_ev.freq, idx);
6928 		goto exit;
6929 	}
6930 
6931 	survey = &ar->survey[idx];
6932 
6933 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
6934 	survey->time      = div_u64(total, cc_freq_hz);
6935 	survey->time_busy = div_u64(busy, cc_freq_hz);
6936 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
6937 	survey->time_tx   = div_u64(tx, cc_freq_hz);
6938 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
6939 			     SURVEY_INFO_TIME |
6940 			     SURVEY_INFO_TIME_BUSY |
6941 			     SURVEY_INFO_TIME_RX |
6942 			     SURVEY_INFO_TIME_TX);
6943 exit:
6944 	spin_unlock_bh(&ar->data_lock);
6945 	complete(&ar->bss_survey_done);
6946 
6947 	rcu_read_unlock();
6948 }
6949 
6950 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6951 						struct sk_buff *skb)
6952 {
6953 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6954 	struct ath12k *ar;
6955 
6956 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6957 		ath12k_warn(ab, "failed to extract install key compl event");
6958 		return;
6959 	}
6960 
6961 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6962 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6963 		   install_key_compl.key_idx, install_key_compl.key_flags,
6964 		   install_key_compl.macaddr, install_key_compl.status);
6965 
6966 	rcu_read_lock();
6967 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6968 	if (!ar) {
6969 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6970 			    install_key_compl.vdev_id);
6971 		rcu_read_unlock();
6972 		return;
6973 	}
6974 
6975 	ar->install_key_status = 0;
6976 
6977 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6978 		ath12k_warn(ab, "install key failed for %pM status %d\n",
6979 			    install_key_compl.macaddr, install_key_compl.status);
6980 		ar->install_key_status = install_key_compl.status;
6981 	}
6982 
6983 	complete(&ar->install_key_done);
6984 	rcu_read_unlock();
6985 }
6986 
6987 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6988 					  u16 tag, u16 len,
6989 					  const void *ptr,
6990 					  void *data)
6991 {
6992 	const struct wmi_service_available_event *ev;
6993 	u32 *wmi_ext2_service_bitmap;
6994 	int i, j;
6995 	u16 expected_len;
6996 
6997 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6998 	if (len < expected_len) {
6999 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
7000 			    len, tag);
7001 		return -EINVAL;
7002 	}
7003 
7004 	switch (tag) {
7005 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
7006 		ev = (struct wmi_service_available_event *)ptr;
7007 		for (i = 0, j = WMI_MAX_SERVICE;
7008 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
7009 		     i++) {
7010 			do {
7011 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
7012 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7013 					set_bit(j, ab->wmi_ab.svc_map);
7014 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7015 		}
7016 
7017 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7018 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
7019 			   ev->wmi_service_segment_bitmap[0],
7020 			   ev->wmi_service_segment_bitmap[1],
7021 			   ev->wmi_service_segment_bitmap[2],
7022 			   ev->wmi_service_segment_bitmap[3]);
7023 		break;
7024 	case WMI_TAG_ARRAY_UINT32:
7025 		wmi_ext2_service_bitmap = (u32 *)ptr;
7026 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
7027 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
7028 		     i++) {
7029 			do {
7030 				if (wmi_ext2_service_bitmap[i] &
7031 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7032 					set_bit(j, ab->wmi_ab.svc_map);
7033 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7034 		}
7035 
7036 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7037 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
7038 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
7039 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
7040 		break;
7041 	}
7042 	return 0;
7043 }
7044 
7045 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
7046 {
7047 	int ret;
7048 
7049 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7050 				  ath12k_wmi_tlv_services_parser,
7051 				  NULL);
7052 	return ret;
7053 }
7054 
7055 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
7056 {
7057 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
7058 	struct ath12k *ar;
7059 
7060 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
7061 		ath12k_warn(ab, "failed to extract peer assoc conf event");
7062 		return;
7063 	}
7064 
7065 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7066 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
7067 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
7068 
7069 	rcu_read_lock();
7070 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
7071 
7072 	if (!ar) {
7073 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
7074 			    peer_assoc_conf.vdev_id);
7075 		rcu_read_unlock();
7076 		return;
7077 	}
7078 
7079 	complete(&ar->peer_assoc_done);
7080 	rcu_read_unlock();
7081 }
7082 
7083 static void
7084 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
7085 			      struct ath12k_fw_stats *fw_stats,
7086 			      char *buf, u32 *length)
7087 {
7088 	const struct ath12k_fw_stats_vdev *vdev;
7089 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7090 	struct ath12k_link_vif *arvif;
7091 	u32 len = *length;
7092 	u8 *vif_macaddr;
7093 	int i;
7094 
7095 	len += scnprintf(buf + len, buf_len - len, "\n");
7096 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7097 			 "ath12k VDEV stats");
7098 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7099 			 "=================");
7100 
7101 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
7102 		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
7103 		if (!arvif)
7104 			continue;
7105 		vif_macaddr = arvif->ahvif->vif->addr;
7106 
7107 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7108 				 "VDEV ID", vdev->vdev_id);
7109 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7110 				 "VDEV MAC address", vif_macaddr);
7111 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7112 				 "beacon snr", vdev->beacon_snr);
7113 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7114 				 "data snr", vdev->data_snr);
7115 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7116 				 "num rx frames", vdev->num_rx_frames);
7117 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7118 				 "num rts fail", vdev->num_rts_fail);
7119 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7120 				 "num rts success", vdev->num_rts_success);
7121 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7122 				 "num rx err", vdev->num_rx_err);
7123 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7124 				 "num rx discard", vdev->num_rx_discard);
7125 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7126 				 "num tx not acked", vdev->num_tx_not_acked);
7127 
7128 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7129 			len += scnprintf(buf + len, buf_len - len,
7130 					"%25s [%02d] %u\n",
7131 					"num tx frames", i,
7132 					vdev->num_tx_frames[i]);
7133 
7134 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7135 			len += scnprintf(buf + len, buf_len - len,
7136 					"%25s [%02d] %u\n",
7137 					"num tx frames retries", i,
7138 					vdev->num_tx_frames_retries[i]);
7139 
7140 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7141 			len += scnprintf(buf + len, buf_len - len,
7142 					"%25s [%02d] %u\n",
7143 					"num tx frames failures", i,
7144 					vdev->num_tx_frames_failures[i]);
7145 
7146 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7147 			len += scnprintf(buf + len, buf_len - len,
7148 					"%25s [%02d] 0x%08x\n",
7149 					"tx rate history", i,
7150 					vdev->tx_rate_history[i]);
7151 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7152 			len += scnprintf(buf + len, buf_len - len,
7153 					"%25s [%02d] %u\n",
7154 					"beacon rssi history", i,
7155 					vdev->beacon_rssi_history[i]);
7156 
7157 		len += scnprintf(buf + len, buf_len - len, "\n");
7158 		*length = len;
7159 	}
7160 }
7161 
7162 static void
7163 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
7164 			     struct ath12k_fw_stats *fw_stats,
7165 			     char *buf, u32 *length)
7166 {
7167 	const struct ath12k_fw_stats_bcn *bcn;
7168 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7169 	struct ath12k_link_vif *arvif;
7170 	u32 len = *length;
7171 	size_t num_bcn;
7172 
7173 	num_bcn = list_count_nodes(&fw_stats->bcn);
7174 
7175 	len += scnprintf(buf + len, buf_len - len, "\n");
7176 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
7177 			 "ath12k Beacon stats", num_bcn);
7178 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7179 			 "===================");
7180 
7181 	list_for_each_entry(bcn, &fw_stats->bcn, list) {
7182 		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
7183 		if (!arvif)
7184 			continue;
7185 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7186 				 "VDEV ID", bcn->vdev_id);
7187 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7188 				 "VDEV MAC address", arvif->ahvif->vif->addr);
7189 		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7190 				 "================");
7191 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7192 				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
7193 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7194 				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
7195 
7196 		len += scnprintf(buf + len, buf_len - len, "\n");
7197 		*length = len;
7198 	}
7199 }
7200 
7201 static void
7202 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7203 				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
7204 {
7205 	u32 len = *length;
7206 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7207 
7208 	len = scnprintf(buf + len, buf_len - len, "\n");
7209 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7210 			"ath12k PDEV stats");
7211 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7212 			"=================");
7213 
7214 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7215 			"Channel noise floor", pdev->ch_noise_floor);
7216 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7217 			"Channel TX power", pdev->chan_tx_power);
7218 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7219 			"TX frame count", pdev->tx_frame_count);
7220 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7221 			"RX frame count", pdev->rx_frame_count);
7222 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7223 			"RX clear count", pdev->rx_clear_count);
7224 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7225 			"Cycle count", pdev->cycle_count);
7226 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7227 			"PHY error count", pdev->phy_err_count);
7228 	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
7229 			"soc drop count", fw_soc_drop_cnt);
7230 
7231 	*length = len;
7232 }
7233 
7234 static void
7235 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7236 				 char *buf, u32 *length)
7237 {
7238 	u32 len = *length;
7239 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7240 
7241 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7242 			 "ath12k PDEV TX stats");
7243 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7244 			 "====================");
7245 
7246 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7247 			 "HTT cookies queued", pdev->comp_queued);
7248 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7249 			 "HTT cookies disp.", pdev->comp_delivered);
7250 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7251 			 "MSDU queued", pdev->msdu_enqued);
7252 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7253 			 "MPDU queued", pdev->mpdu_enqued);
7254 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7255 			 "MSDUs dropped", pdev->wmm_drop);
7256 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7257 			 "Local enqued", pdev->local_enqued);
7258 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7259 			 "Local freed", pdev->local_freed);
7260 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7261 			 "HW queued", pdev->hw_queued);
7262 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7263 			 "PPDUs reaped", pdev->hw_reaped);
7264 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7265 			 "Num underruns", pdev->underrun);
7266 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7267 			 "PPDUs cleaned", pdev->tx_abort);
7268 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7269 			 "MPDUs requeued", pdev->mpdus_requed);
7270 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7271 			 "Excessive retries", pdev->tx_ko);
7272 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7273 			 "HW rate", pdev->data_rc);
7274 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7275 			 "Sched self triggers", pdev->self_triggers);
7276 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7277 			 "Dropped due to SW retries",
7278 			 pdev->sw_retry_failure);
7279 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7280 			 "Illegal rate phy errors",
7281 			 pdev->illgl_rate_phy_err);
7282 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7283 			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
7284 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7285 			 "TX timeout", pdev->pdev_tx_timeout);
7286 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7287 			 "PDEV resets", pdev->pdev_resets);
7288 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7289 			 "Stateless TIDs alloc failures",
7290 			 pdev->stateless_tid_alloc_failure);
7291 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7292 			 "PHY underrun", pdev->phy_underrun);
7293 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7294 			 "MPDU is more than txop limit", pdev->txop_ovf);
7295 	*length = len;
7296 }
7297 
7298 static void
7299 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7300 				 char *buf, u32 *length)
7301 {
7302 	u32 len = *length;
7303 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7304 
7305 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7306 			 "ath12k PDEV RX stats");
7307 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7308 			 "====================");
7309 
7310 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7311 			 "Mid PPDU route change",
7312 			 pdev->mid_ppdu_route_change);
7313 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7314 			 "Tot. number of statuses", pdev->status_rcvd);
7315 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7316 			 "Extra frags on rings 0", pdev->r0_frags);
7317 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7318 			 "Extra frags on rings 1", pdev->r1_frags);
7319 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7320 			 "Extra frags on rings 2", pdev->r2_frags);
7321 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7322 			 "Extra frags on rings 3", pdev->r3_frags);
7323 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7324 			 "MSDUs delivered to HTT", pdev->htt_msdus);
7325 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7326 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
7327 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7328 			 "MSDUs delivered to stack", pdev->loc_msdus);
7329 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7330 			 "MPDUs delivered to stack", pdev->loc_mpdus);
7331 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7332 			 "Oversized AMSUs", pdev->oversize_amsdu);
7333 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7334 			 "PHY errors", pdev->phy_errs);
7335 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7336 			 "PHY errors drops", pdev->phy_err_drop);
7337 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7338 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
7339 	*length = len;
7340 }
7341 
7342 static void
7343 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
7344 			      struct ath12k_fw_stats *fw_stats,
7345 			      char *buf, u32 *length)
7346 {
7347 	const struct ath12k_fw_stats_pdev *pdev;
7348 	u32 len = *length;
7349 
7350 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
7351 					struct ath12k_fw_stats_pdev, list);
7352 	if (!pdev) {
7353 		ath12k_warn(ar->ab, "failed to get pdev stats\n");
7354 		return;
7355 	}
7356 
7357 	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
7358 					   ar->ab->fw_soc_drop_count);
7359 	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
7360 	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
7361 
7362 	*length = len;
7363 }
7364 
7365 void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
7366 			      struct ath12k_fw_stats *fw_stats,
7367 			      u32 stats_id, char *buf)
7368 {
7369 	u32 len = 0;
7370 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7371 
7372 	spin_lock_bh(&ar->data_lock);
7373 
7374 	switch (stats_id) {
7375 	case WMI_REQUEST_VDEV_STAT:
7376 		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
7377 		break;
7378 	case WMI_REQUEST_BCN_STAT:
7379 		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
7380 		break;
7381 	case WMI_REQUEST_PDEV_STAT:
7382 		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
7383 		break;
7384 	default:
7385 		break;
7386 	}
7387 
7388 	spin_unlock_bh(&ar->data_lock);
7389 
7390 	if (len >= buf_len)
7391 		buf[len - 1] = 0;
7392 	else
7393 		buf[len] = 0;
7394 
7395 	ath12k_fw_stats_reset(ar);
7396 }
7397 
7398 static void
7399 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
7400 			   struct ath12k_fw_stats_vdev *dst)
7401 {
7402 	int i;
7403 
7404 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7405 	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
7406 	dst->data_snr = le32_to_cpu(src->data_snr);
7407 	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
7408 	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
7409 	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
7410 	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
7411 	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
7412 	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
7413 
7414 	for (i = 0; i < WLAN_MAX_AC; i++)
7415 		dst->num_tx_frames[i] =
7416 			le32_to_cpu(src->num_tx_frames[i]);
7417 
7418 	for (i = 0; i < WLAN_MAX_AC; i++)
7419 		dst->num_tx_frames_retries[i] =
7420 			le32_to_cpu(src->num_tx_frames_retries[i]);
7421 
7422 	for (i = 0; i < WLAN_MAX_AC; i++)
7423 		dst->num_tx_frames_failures[i] =
7424 			le32_to_cpu(src->num_tx_frames_failures[i]);
7425 
7426 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7427 		dst->tx_rate_history[i] =
7428 			le32_to_cpu(src->tx_rate_history[i]);
7429 
7430 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7431 		dst->beacon_rssi_history[i] =
7432 			le32_to_cpu(src->beacon_rssi_history[i]);
7433 }
7434 
7435 static void
7436 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
7437 			  struct ath12k_fw_stats_bcn *dst)
7438 {
7439 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7440 	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
7441 	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
7442 }
7443 
7444 static void
7445 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
7446 				struct ath12k_fw_stats_pdev *dst)
7447 {
7448 	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
7449 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
7450 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
7451 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
7452 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
7453 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
7454 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
7455 }
7456 
7457 static void
7458 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
7459 			      struct ath12k_fw_stats_pdev *dst)
7460 {
7461 	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
7462 	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
7463 	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
7464 	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
7465 	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
7466 	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
7467 	dst->local_freed = a_sle32_to_cpu(src->local_freed);
7468 	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
7469 	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
7470 	dst->underrun = a_sle32_to_cpu(src->underrun);
7471 	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
7472 	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
7473 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
7474 	dst->data_rc = __le32_to_cpu(src->data_rc);
7475 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
7476 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
7477 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
7478 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
7479 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
7480 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
7481 	dst->stateless_tid_alloc_failure =
7482 		__le32_to_cpu(src->stateless_tid_alloc_failure);
7483 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
7484 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
7485 }
7486 
7487 static void
7488 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
7489 			      struct ath12k_fw_stats_pdev *dst)
7490 {
7491 	dst->mid_ppdu_route_change =
7492 		a_sle32_to_cpu(src->mid_ppdu_route_change);
7493 	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
7494 	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
7495 	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
7496 	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
7497 	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
7498 	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
7499 	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
7500 	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
7501 	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
7502 	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
7503 	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
7504 	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
7505 	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
7506 }
7507 
7508 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
7509 					      struct wmi_tlv_fw_stats_parse *parse,
7510 					      const void *ptr,
7511 					      u16 len)
7512 {
7513 	const struct wmi_stats_event *ev = parse->ev;
7514 	struct ath12k_fw_stats *stats = parse->stats;
7515 	struct ath12k *ar;
7516 	struct ath12k_link_vif *arvif;
7517 	struct ieee80211_sta *sta;
7518 	struct ath12k_sta *ahsta;
7519 	struct ath12k_link_sta *arsta;
7520 	int i, ret = 0;
7521 	const void *data = ptr;
7522 
7523 	if (!ev) {
7524 		ath12k_warn(ab, "failed to fetch update stats ev");
7525 		return -EPROTO;
7526 	}
7527 
7528 	if (!stats)
7529 		return -EINVAL;
7530 
7531 	rcu_read_lock();
7532 
7533 	stats->pdev_id = le32_to_cpu(ev->pdev_id);
7534 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
7535 	if (!ar) {
7536 		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
7537 			    le32_to_cpu(ev->pdev_id));
7538 		ret = -EPROTO;
7539 		goto exit;
7540 	}
7541 
7542 	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
7543 		const struct wmi_vdev_stats_params *src;
7544 		struct ath12k_fw_stats_vdev *dst;
7545 
7546 		src = data;
7547 		if (len < sizeof(*src)) {
7548 			ret = -EPROTO;
7549 			goto exit;
7550 		}
7551 
7552 		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
7553 		if (arvif) {
7554 			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
7555 							   arvif->bssid,
7556 							   NULL);
7557 			if (sta) {
7558 				ahsta = ath12k_sta_to_ahsta(sta);
7559 				arsta = &ahsta->deflink;
7560 				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
7561 				ath12k_dbg(ab, ATH12K_DBG_WMI,
7562 					   "wmi stats vdev id %d snr %d\n",
7563 					   src->vdev_id, src->beacon_snr);
7564 			} else {
7565 				ath12k_dbg(ab, ATH12K_DBG_WMI,
7566 					   "not found station bssid %pM for vdev stat\n",
7567 					   arvif->bssid);
7568 			}
7569 		}
7570 
7571 		data += sizeof(*src);
7572 		len -= sizeof(*src);
7573 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7574 		if (!dst)
7575 			continue;
7576 		ath12k_wmi_pull_vdev_stats(src, dst);
7577 		stats->stats_id = WMI_REQUEST_VDEV_STAT;
7578 		list_add_tail(&dst->list, &stats->vdevs);
7579 	}
7580 	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
7581 		const struct ath12k_wmi_bcn_stats_params *src;
7582 		struct ath12k_fw_stats_bcn *dst;
7583 
7584 		src = data;
7585 		if (len < sizeof(*src)) {
7586 			ret = -EPROTO;
7587 			goto exit;
7588 		}
7589 
7590 		data += sizeof(*src);
7591 		len -= sizeof(*src);
7592 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7593 		if (!dst)
7594 			continue;
7595 		ath12k_wmi_pull_bcn_stats(src, dst);
7596 		stats->stats_id = WMI_REQUEST_BCN_STAT;
7597 		list_add_tail(&dst->list, &stats->bcn);
7598 	}
7599 	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
7600 		const struct ath12k_wmi_pdev_stats_params *src;
7601 		struct ath12k_fw_stats_pdev *dst;
7602 
7603 		src = data;
7604 		if (len < sizeof(*src)) {
7605 			ret = -EPROTO;
7606 			goto exit;
7607 		}
7608 
7609 		stats->stats_id = WMI_REQUEST_PDEV_STAT;
7610 
7611 		data += sizeof(*src);
7612 		len -= sizeof(*src);
7613 
7614 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7615 		if (!dst)
7616 			continue;
7617 
7618 		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
7619 		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
7620 		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
7621 		list_add_tail(&dst->list, &stats->pdevs);
7622 	}
7623 
7624 exit:
7625 	rcu_read_unlock();
7626 	return ret;
7627 }
7628 
7629 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
7630 					 u16 tag, u16 len,
7631 					 const void *ptr, void *data)
7632 {
7633 	struct wmi_tlv_fw_stats_parse *parse = data;
7634 	int ret = 0;
7635 
7636 	switch (tag) {
7637 	case WMI_TAG_STATS_EVENT:
7638 		parse->ev = ptr;
7639 		break;
7640 	case WMI_TAG_ARRAY_BYTE:
7641 		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
7642 		break;
7643 	default:
7644 		break;
7645 	}
7646 	return ret;
7647 }
7648 
7649 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
7650 				    struct ath12k_fw_stats *stats)
7651 {
7652 	struct wmi_tlv_fw_stats_parse parse = {};
7653 
7654 	stats->stats_id = 0;
7655 	parse.stats = stats;
7656 
7657 	return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7658 				   ath12k_wmi_tlv_fw_stats_parse,
7659 				   &parse);
7660 }
7661 
7662 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
7663 {
7664 	struct ath12k_fw_stats stats = {};
7665 	struct ath12k *ar;
7666 	int ret;
7667 
7668 	INIT_LIST_HEAD(&stats.pdevs);
7669 	INIT_LIST_HEAD(&stats.vdevs);
7670 	INIT_LIST_HEAD(&stats.bcn);
7671 
7672 	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
7673 	if (ret) {
7674 		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
7675 		goto free;
7676 	}
7677 
7678 	ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
7679 
7680 	rcu_read_lock();
7681 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
7682 	if (!ar) {
7683 		rcu_read_unlock();
7684 		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
7685 			    stats.pdev_id, ret);
7686 		goto free;
7687 	}
7688 
7689 	spin_lock_bh(&ar->data_lock);
7690 
7691 	/* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
7692 	 * debugfs fw stats. Therefore, processing it separately.
7693 	 */
7694 	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
7695 		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
7696 		ar->fw_stats.fw_stats_done = true;
7697 		goto complete;
7698 	}
7699 
7700 	/* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested only
7701 	 * via debugfs fw stats. Hence, processing these in debugfs context.
7702 	 */
7703 	ath12k_debugfs_fw_stats_process(ar, &stats);
7704 
7705 complete:
7706 	complete(&ar->fw_stats_complete);
7707 	spin_unlock_bh(&ar->data_lock);
7708 	rcu_read_unlock();
7709 
7710 	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
7711 	 * at this point, no need to free the individual list.
7712 	 */
7713 	return;
7714 
7715 free:
7716 	ath12k_fw_stats_free(&stats);
7717 }
7718 
7719 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
7720  * is not part of BDF CTL(Conformance test limits) table entries.
7721  */
7722 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
7723 						 struct sk_buff *skb)
7724 {
7725 	const void **tb;
7726 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
7727 	int ret;
7728 
7729 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7730 	if (IS_ERR(tb)) {
7731 		ret = PTR_ERR(tb);
7732 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7733 		return;
7734 	}
7735 
7736 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
7737 	if (!ev) {
7738 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
7739 		kfree(tb);
7740 		return;
7741 	}
7742 
7743 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7744 		   "pdev ctl failsafe check ev status %d\n",
7745 		   ev->ctl_failsafe_status);
7746 
7747 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
7748 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
7749 	 */
7750 	if (ev->ctl_failsafe_status != 0)
7751 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
7752 			    ev->ctl_failsafe_status);
7753 
7754 	kfree(tb);
7755 }
7756 
7757 static void
7758 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
7759 					  const struct ath12k_wmi_pdev_csa_event *ev,
7760 					  const u32 *vdev_ids)
7761 {
7762 	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
7763 	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
7764 	struct ieee80211_bss_conf *conf;
7765 	struct ath12k_link_vif *arvif;
7766 	struct ath12k_vif *ahvif;
7767 	int i;
7768 
7769 	rcu_read_lock();
7770 	for (i = 0; i < num_vdevs; i++) {
7771 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
7772 
7773 		if (!arvif) {
7774 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
7775 				    vdev_ids[i]);
7776 			continue;
7777 		}
7778 		ahvif = arvif->ahvif;
7779 
7780 		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
7781 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
7782 				    arvif->link_id);
7783 			continue;
7784 		}
7785 
7786 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
7787 		if (!conf) {
7788 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
7789 				    ahvif->vif->addr, arvif->link_id);
7790 			continue;
7791 		}
7792 
7793 		if (!arvif->is_up || !conf->csa_active)
7794 			continue;
7795 
7796 		/* Finish CSA when counter reaches zero */
7797 		if (!current_switch_count) {
7798 			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
7799 			arvif->current_cntdown_counter = 0;
7800 		} else if (current_switch_count > 1) {
7801 			/* If the count in event is not what we expect, don't update the
7802 			 * mac80211 count. Since during beacon Tx failure, count in the
7803 			 * firmware will not decrement and this event will come with the
7804 			 * previous count value again
7805 			 */
7806 			if (current_switch_count != arvif->current_cntdown_counter)
7807 				continue;
7808 
7809 			arvif->current_cntdown_counter =
7810 				ieee80211_beacon_update_cntdwn(ahvif->vif,
7811 							       arvif->link_id);
7812 		}
7813 	}
7814 	rcu_read_unlock();
7815 }
7816 
7817 static void
7818 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
7819 					      struct sk_buff *skb)
7820 {
7821 	const void **tb;
7822 	const struct ath12k_wmi_pdev_csa_event *ev;
7823 	const u32 *vdev_ids;
7824 	int ret;
7825 
7826 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7827 	if (IS_ERR(tb)) {
7828 		ret = PTR_ERR(tb);
7829 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7830 		return;
7831 	}
7832 
7833 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
7834 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
7835 
7836 	if (!ev || !vdev_ids) {
7837 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
7838 		kfree(tb);
7839 		return;
7840 	}
7841 
7842 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7843 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
7844 		   ev->current_switch_count, ev->pdev_id,
7845 		   ev->num_vdevs);
7846 
7847 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
7848 
7849 	kfree(tb);
7850 }
7851 
7852 static void
7853 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
7854 {
7855 	const void **tb;
7856 	struct ath12k_mac_get_any_chanctx_conf_arg arg;
7857 	const struct ath12k_wmi_pdev_radar_event *ev;
7858 	struct ath12k *ar;
7859 	int ret;
7860 
7861 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7862 	if (IS_ERR(tb)) {
7863 		ret = PTR_ERR(tb);
7864 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7865 		return;
7866 	}
7867 
7868 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
7869 
7870 	if (!ev) {
7871 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
7872 		kfree(tb);
7873 		return;
7874 	}
7875 
7876 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7877 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
7878 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
7879 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
7880 		   ev->freq_offset, ev->sidx);
7881 
7882 	rcu_read_lock();
7883 
7884 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
7885 
7886 	if (!ar) {
7887 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
7888 			    ev->pdev_id);
7889 		goto exit;
7890 	}
7891 
7892 	arg.ar = ar;
7893 	arg.chanctx_conf = NULL;
7894 	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
7895 					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
7896 	if (!arg.chanctx_conf) {
7897 		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
7898 		goto exit;
7899 	}
7900 
7901 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
7902 		   ev->pdev_id);
7903 
7904 	if (ar->dfs_block_radar_events)
7905 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
7906 	else
7907 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
7908 
7909 exit:
7910 	rcu_read_unlock();
7911 
7912 	kfree(tb);
7913 }
7914 
7915 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
7916 					  struct sk_buff *skb)
7917 {
7918 	const struct ath12k_wmi_ftm_event *ev;
7919 	const void **tb;
7920 	int ret;
7921 	u16 length;
7922 
7923 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7924 
7925 	if (IS_ERR(tb)) {
7926 		ret = PTR_ERR(tb);
7927 		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
7928 		return;
7929 	}
7930 
7931 	ev = tb[WMI_TAG_ARRAY_BYTE];
7932 	if (!ev) {
7933 		ath12k_warn(ab, "failed to fetch ftm msg\n");
7934 		kfree(tb);
7935 		return;
7936 	}
7937 
7938 	length = skb->len - TLV_HDR_SIZE;
7939 	ath12k_tm_process_event(ab, cmd_id, ev, length);
7940 	kfree(tb);
7941 	tb = NULL;
7942 }
7943 
7944 static void
7945 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
7946 				  struct sk_buff *skb)
7947 {
7948 	struct ath12k *ar;
7949 	struct wmi_pdev_temperature_event ev = {0};
7950 
7951 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
7952 		ath12k_warn(ab, "failed to extract pdev temperature event");
7953 		return;
7954 	}
7955 
7956 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7957 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
7958 
7959 	rcu_read_lock();
7960 
7961 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
7962 	if (!ar) {
7963 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
7964 		goto exit;
7965 	}
7966 
7967 exit:
7968 	rcu_read_unlock();
7969 }
7970 
7971 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
7972 					struct sk_buff *skb)
7973 {
7974 	const void **tb;
7975 	const struct wmi_fils_discovery_event *ev;
7976 	int ret;
7977 
7978 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7979 	if (IS_ERR(tb)) {
7980 		ret = PTR_ERR(tb);
7981 		ath12k_warn(ab,
7982 			    "failed to parse FILS discovery event tlv %d\n",
7983 			    ret);
7984 		return;
7985 	}
7986 
7987 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
7988 	if (!ev) {
7989 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
7990 		kfree(tb);
7991 		return;
7992 	}
7993 
7994 	ath12k_warn(ab,
7995 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
7996 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
7997 
7998 	kfree(tb);
7999 }
8000 
8001 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
8002 					      struct sk_buff *skb)
8003 {
8004 	const void **tb;
8005 	const struct wmi_probe_resp_tx_status_event *ev;
8006 	int ret;
8007 
8008 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8009 	if (IS_ERR(tb)) {
8010 		ret = PTR_ERR(tb);
8011 		ath12k_warn(ab,
8012 			    "failed to parse probe response transmission status event tlv: %d\n",
8013 			    ret);
8014 		return;
8015 	}
8016 
8017 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
8018 	if (!ev) {
8019 		ath12k_warn(ab,
8020 			    "failed to fetch probe response transmission status event");
8021 		kfree(tb);
8022 		return;
8023 	}
8024 
8025 	if (ev->tx_status)
8026 		ath12k_warn(ab,
8027 			    "Probe response transmission failed for vdev_id %u, status %u\n",
8028 			    ev->vdev_id, ev->tx_status);
8029 
8030 	kfree(tb);
8031 }
8032 
8033 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
8034 				    struct sk_buff *skb)
8035 {
8036 	const void **tb;
8037 	const struct wmi_p2p_noa_event *ev;
8038 	const struct ath12k_wmi_p2p_noa_info *noa;
8039 	struct ath12k *ar;
8040 	int ret, vdev_id;
8041 
8042 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8043 	if (IS_ERR(tb)) {
8044 		ret = PTR_ERR(tb);
8045 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
8046 		return ret;
8047 	}
8048 
8049 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
8050 	noa = tb[WMI_TAG_P2P_NOA_INFO];
8051 
8052 	if (!ev || !noa) {
8053 		ret = -EPROTO;
8054 		goto out;
8055 	}
8056 
8057 	vdev_id = __le32_to_cpu(ev->vdev_id);
8058 
8059 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8060 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
8061 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
8062 
8063 	rcu_read_lock();
8064 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
8065 	if (!ar) {
8066 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
8067 			    vdev_id);
8068 		ret = -EINVAL;
8069 		goto unlock;
8070 	}
8071 
8072 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
8073 
8074 	ret = 0;
8075 
8076 unlock:
8077 	rcu_read_unlock();
8078 out:
8079 	kfree(tb);
8080 	return ret;
8081 }
8082 
8083 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
8084 					     struct sk_buff *skb)
8085 {
8086 	const struct wmi_rfkill_state_change_event *ev;
8087 	const void **tb;
8088 	int ret;
8089 
8090 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8091 	if (IS_ERR(tb)) {
8092 		ret = PTR_ERR(tb);
8093 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8094 		return;
8095 	}
8096 
8097 	ev = tb[WMI_TAG_RFKILL_EVENT];
8098 	if (!ev) {
8099 		kfree(tb);
8100 		return;
8101 	}
8102 
8103 	ath12k_dbg(ab, ATH12K_DBG_MAC,
8104 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
8105 		   le32_to_cpu(ev->gpio_pin_num),
8106 		   le32_to_cpu(ev->int_type),
8107 		   le32_to_cpu(ev->radio_state));
8108 
8109 	spin_lock_bh(&ab->base_lock);
8110 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
8111 	spin_unlock_bh(&ab->base_lock);
8112 
8113 	queue_work(ab->workqueue, &ab->rfkill_work);
8114 	kfree(tb);
8115 }
8116 
8117 static void
8118 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
8119 {
8120 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
8121 }
8122 
8123 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
8124 					struct sk_buff *skb)
8125 {
8126 	const void **tb;
8127 	const struct wmi_twt_enable_event *ev;
8128 	int ret;
8129 
8130 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8131 	if (IS_ERR(tb)) {
8132 		ret = PTR_ERR(tb);
8133 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
8134 			    ret);
8135 		return;
8136 	}
8137 
8138 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
8139 	if (!ev) {
8140 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
8141 		goto exit;
8142 	}
8143 
8144 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
8145 		   le32_to_cpu(ev->pdev_id),
8146 		   le32_to_cpu(ev->status));
8147 
8148 exit:
8149 	kfree(tb);
8150 }
8151 
8152 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
8153 					 struct sk_buff *skb)
8154 {
8155 	const void **tb;
8156 	const struct wmi_twt_disable_event *ev;
8157 	int ret;
8158 
8159 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8160 	if (IS_ERR(tb)) {
8161 		ret = PTR_ERR(tb);
8162 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
8163 			    ret);
8164 		return;
8165 	}
8166 
8167 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
8168 	if (!ev) {
8169 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
8170 		goto exit;
8171 	}
8172 
8173 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
8174 		   le32_to_cpu(ev->pdev_id),
8175 		   le32_to_cpu(ev->status));
8176 
8177 exit:
8178 	kfree(tb);
8179 }
8180 
8181 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
8182 					    u16 tag, u16 len,
8183 					    const void *ptr, void *data)
8184 {
8185 	const struct wmi_wow_ev_pg_fault_param *pf_param;
8186 	const struct wmi_wow_ev_param *param;
8187 	struct wmi_wow_ev_arg *arg = data;
8188 	int pf_len;
8189 
8190 	switch (tag) {
8191 	case WMI_TAG_WOW_EVENT_INFO:
8192 		param = ptr;
8193 		arg->wake_reason = le32_to_cpu(param->wake_reason);
8194 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
8195 			   arg->wake_reason, wow_reason(arg->wake_reason));
8196 		break;
8197 
8198 	case WMI_TAG_ARRAY_BYTE:
8199 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
8200 			pf_param = ptr;
8201 			pf_len = le32_to_cpu(pf_param->len);
8202 			if (pf_len > len - sizeof(pf_len) ||
8203 			    pf_len < 0) {
8204 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
8205 					    pf_len);
8206 				return -EINVAL;
8207 			}
8208 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
8209 				   pf_len);
8210 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
8211 					"wow_reason_page_fault packet present",
8212 					"wow_pg_fault ",
8213 					pf_param->data,
8214 					pf_len);
8215 		}
8216 		break;
8217 	default:
8218 		break;
8219 	}
8220 
8221 	return 0;
8222 }
8223 
8224 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
8225 {
8226 	struct wmi_wow_ev_arg arg = { };
8227 	int ret;
8228 
8229 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8230 				  ath12k_wmi_wow_wakeup_host_parse,
8231 				  &arg);
8232 	if (ret) {
8233 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
8234 			    ret);
8235 		return;
8236 	}
8237 
8238 	complete(&ab->wow.wakeup_completed);
8239 }
8240 
8241 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
8242 						struct sk_buff *skb)
8243 {
8244 	const struct wmi_gtk_offload_status_event *ev;
8245 	struct ath12k_link_vif *arvif;
8246 	__be64 replay_ctr_be;
8247 	u64 replay_ctr;
8248 	const void **tb;
8249 	int ret;
8250 
8251 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8252 	if (IS_ERR(tb)) {
8253 		ret = PTR_ERR(tb);
8254 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8255 		return;
8256 	}
8257 
8258 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
8259 	if (!ev) {
8260 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
8261 		kfree(tb);
8262 		return;
8263 	}
8264 
8265 	rcu_read_lock();
8266 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
8267 	if (!arvif) {
8268 		rcu_read_unlock();
8269 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
8270 			    le32_to_cpu(ev->vdev_id));
8271 		kfree(tb);
8272 		return;
8273 	}
8274 
8275 	replay_ctr = le64_to_cpu(ev->replay_ctr);
8276 	arvif->rekey_data.replay_ctr = replay_ctr;
8277 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
8278 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
8279 
8280 	/* supplicant expects big-endian replay counter */
8281 	replay_ctr_be = cpu_to_be64(replay_ctr);
8282 
8283 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
8284 				   (void *)&replay_ctr_be, GFP_ATOMIC);
8285 
8286 	rcu_read_unlock();
8287 
8288 	kfree(tb);
8289 }
8290 
8291 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
8292 						struct sk_buff *skb)
8293 {
8294 	const struct wmi_mlo_setup_complete_event *ev;
8295 	struct ath12k *ar = NULL;
8296 	struct ath12k_pdev *pdev;
8297 	const void **tb;
8298 	int ret, i;
8299 
8300 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8301 	if (IS_ERR(tb)) {
8302 		ret = PTR_ERR(tb);
8303 		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
8304 			    ret);
8305 		return;
8306 	}
8307 
8308 	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
8309 	if (!ev) {
8310 		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
8311 		kfree(tb);
8312 		return;
8313 	}
8314 
8315 	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
8316 		goto skip_lookup;
8317 
8318 	for (i = 0; i < ab->num_radios; i++) {
8319 		pdev = &ab->pdevs[i];
8320 		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
8321 			ar = pdev->ar;
8322 			break;
8323 		}
8324 	}
8325 
8326 skip_lookup:
8327 	if (!ar) {
8328 		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
8329 			    ev->pdev_id, ev->status);
8330 		goto out;
8331 	}
8332 
8333 	ar->mlo_setup_status = le32_to_cpu(ev->status);
8334 	complete(&ar->mlo_setup_done);
8335 
8336 out:
8337 	kfree(tb);
8338 }
8339 
8340 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
8341 					       struct sk_buff *skb)
8342 {
8343 	const struct wmi_mlo_teardown_complete_event *ev;
8344 	const void **tb;
8345 	int ret;
8346 
8347 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8348 	if (IS_ERR(tb)) {
8349 		ret = PTR_ERR(tb);
8350 		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
8351 		return;
8352 	}
8353 
8354 	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
8355 	if (!ev) {
8356 		ath12k_warn(ab, "failed to fetch teardown complete event\n");
8357 		kfree(tb);
8358 		return;
8359 	}
8360 
8361 	kfree(tb);
8362 }
8363 
8364 #ifdef CONFIG_ATH12K_DEBUGFS
8365 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
8366 					    const void *ptr, u16 tag, u16 len,
8367 					    struct wmi_tpc_stats_arg *tpc_stats)
8368 {
8369 	u32 len1, len2, len3, len4;
8370 	s16 *dst_ptr;
8371 	s8 *dst_ptr_ctl;
8372 
8373 	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
8374 	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
8375 	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
8376 	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
8377 
8378 	switch (tpc_stats->event_count) {
8379 	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
8380 		if (len1 > len)
8381 			return -ENOBUFS;
8382 
8383 		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
8384 			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
8385 			memcpy(dst_ptr, ptr, len1);
8386 		}
8387 		break;
8388 	case ATH12K_TPC_STATS_RATES_EVENT1:
8389 		if (len2 > len)
8390 			return -ENOBUFS;
8391 
8392 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
8393 			dst_ptr = tpc_stats->rates_array1.rate_array;
8394 			memcpy(dst_ptr, ptr, len2);
8395 		}
8396 		break;
8397 	case ATH12K_TPC_STATS_RATES_EVENT2:
8398 		if (len3 > len)
8399 			return -ENOBUFS;
8400 
8401 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
8402 			dst_ptr = tpc_stats->rates_array2.rate_array;
8403 			memcpy(dst_ptr, ptr, len3);
8404 		}
8405 		break;
8406 	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
8407 		if (len4 > len)
8408 			return -ENOBUFS;
8409 
8410 		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
8411 			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
8412 			memcpy(dst_ptr_ctl, ptr, len4);
8413 		}
8414 		break;
8415 	}
8416 	return 0;
8417 }
8418 
8419 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
8420 				  struct wmi_tpc_stats_arg *tpc_stats,
8421 				  struct wmi_max_reg_power_fixed_params *ev)
8422 {
8423 	struct wmi_max_reg_power_allowed_arg *reg_pwr;
8424 	u32 total_size;
8425 
8426 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8427 		   "Received reg power array type %d length %d for tpc stats\n",
8428 		   ev->reg_power_type, ev->reg_array_len);
8429 
8430 	switch (le32_to_cpu(ev->reg_power_type)) {
8431 	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
8432 		reg_pwr = &tpc_stats->max_reg_allowed_power;
8433 		break;
8434 	default:
8435 		return -EINVAL;
8436 	}
8437 
8438 	/* Each entry is 2 byte hence multiplying the indices with 2 */
8439 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
8440 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
8441 	if (le32_to_cpu(ev->reg_array_len) != total_size) {
8442 		ath12k_warn(ab,
8443 			    "Total size and reg_array_len doesn't match for tpc stats\n");
8444 		return -EINVAL;
8445 	}
8446 
8447 	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
8448 
8449 	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
8450 					 GFP_ATOMIC);
8451 	if (!reg_pwr->reg_pwr_array)
8452 		return -ENOMEM;
8453 
8454 	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
8455 
8456 	return 0;
8457 }
8458 
8459 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
8460 				     struct wmi_tpc_stats_arg *tpc_stats,
8461 				     struct wmi_tpc_rates_array_fixed_params *ev)
8462 {
8463 	struct wmi_tpc_rates_array_arg *rates_array;
8464 	u32 flag = 0, rate_array_len;
8465 
8466 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8467 		   "Received rates array type %d length %d for tpc stats\n",
8468 		   ev->rate_array_type, ev->rate_array_len);
8469 
8470 	switch (le32_to_cpu(ev->rate_array_type)) {
8471 	case ATH12K_TPC_STATS_RATES_ARRAY1:
8472 		rates_array = &tpc_stats->rates_array1;
8473 		flag = WMI_TPC_RATES_ARRAY1;
8474 		break;
8475 	case ATH12K_TPC_STATS_RATES_ARRAY2:
8476 		rates_array = &tpc_stats->rates_array2;
8477 		flag = WMI_TPC_RATES_ARRAY2;
8478 		break;
8479 	default:
8480 		ath12k_warn(ab,
8481 			    "Received invalid type of rates array for tpc stats\n");
8482 		return -EINVAL;
8483 	}
8484 	memcpy(&rates_array->tpc_rates_array, ev,
8485 	       sizeof(struct wmi_tpc_rates_array_fixed_params));
8486 	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
8487 	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
8488 	if (!rates_array->rate_array)
8489 		return -ENOMEM;
8490 
8491 	tpc_stats->tlvs_rcvd |= flag;
8492 	return 0;
8493 }
8494 
8495 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
8496 				      struct wmi_tpc_stats_arg *tpc_stats,
8497 				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
8498 {
8499 	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
8500 	u32 total_size, ctl_array_len, flag = 0;
8501 
8502 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8503 		   "Received ctl array type %d length %d for tpc stats\n",
8504 		   ev->ctl_array_type, ev->ctl_array_len);
8505 
8506 	switch (le32_to_cpu(ev->ctl_array_type)) {
8507 	case ATH12K_TPC_STATS_CTL_ARRAY:
8508 		ctl_array = &tpc_stats->ctl_array;
8509 		flag = WMI_TPC_CTL_PWR_ARRAY;
8510 		break;
8511 	default:
8512 		ath12k_warn(ab,
8513 			    "Received invalid type of ctl pwr table for tpc stats\n");
8514 		return -EINVAL;
8515 	}
8516 
8517 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
8518 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
8519 	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
8520 		ath12k_warn(ab,
8521 			    "Total size and ctl_array_len doesn't match for tpc stats\n");
8522 		return -EINVAL;
8523 	}
8524 
8525 	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
8526 	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
8527 	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
8528 	if (!ctl_array->ctl_pwr_table)
8529 		return -ENOMEM;
8530 
8531 	tpc_stats->tlvs_rcvd |= flag;
8532 	return 0;
8533 }
8534 
8535 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
8536 					      u16 tag, u16 len,
8537 					      const void *ptr, void *data)
8538 {
8539 	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
8540 	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
8541 	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
8542 	struct wmi_tpc_stats_arg *tpc_stats = data;
8543 	struct wmi_tpc_config_params *tpc_config;
8544 	int ret = 0;
8545 
8546 	if (!tpc_stats) {
8547 		ath12k_warn(ab, "tpc stats memory unavailable\n");
8548 		return -EINVAL;
8549 	}
8550 
8551 	switch (tag) {
8552 	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
8553 		tpc_config = (struct wmi_tpc_config_params *)ptr;
8554 		memcpy(&tpc_stats->tpc_config, tpc_config,
8555 		       sizeof(struct wmi_tpc_config_params));
8556 		break;
8557 	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
8558 		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
8559 		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
8560 		break;
8561 	case WMI_TAG_TPC_STATS_RATES_ARRAY:
8562 		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
8563 		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
8564 		break;
8565 	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
8566 		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
8567 		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
8568 		break;
8569 	default:
8570 		ath12k_warn(ab,
8571 			    "Received invalid tag for tpc stats in subtlvs\n");
8572 		return -EINVAL;
8573 	}
8574 	return ret;
8575 }
8576 
8577 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
8578 					     u16 tag, u16 len,
8579 					     const void *ptr, void *data)
8580 {
8581 	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
8582 	int ret;
8583 
8584 	switch (tag) {
8585 	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
8586 		ret = 0;
8587 		/* Fixed param is already processed*/
8588 		break;
8589 	case WMI_TAG_ARRAY_STRUCT:
8590 		/* len 0 is expected for array of struct when there
8591 		 * is no content of that type to pack inside that tlv
8592 		 */
8593 		if (len == 0)
8594 			return 0;
8595 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
8596 					  ath12k_wmi_tpc_stats_subtlv_parser,
8597 					  tpc_stats);
8598 		break;
8599 	case WMI_TAG_ARRAY_INT16:
8600 		if (len == 0)
8601 			return 0;
8602 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
8603 						       WMI_TAG_ARRAY_INT16,
8604 						       len, tpc_stats);
8605 		break;
8606 	case WMI_TAG_ARRAY_BYTE:
8607 		if (len == 0)
8608 			return 0;
8609 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
8610 						       WMI_TAG_ARRAY_BYTE,
8611 						       len, tpc_stats);
8612 		break;
8613 	default:
8614 		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
8615 		ret = -EINVAL;
8616 		break;
8617 	}
8618 	return ret;
8619 }
8620 
8621 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
8622 {
8623 	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
8624 
8625 	lockdep_assert_held(&ar->data_lock);
8626 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
8627 	if (tpc_stats) {
8628 		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
8629 		kfree(tpc_stats->rates_array1.rate_array);
8630 		kfree(tpc_stats->rates_array2.rate_array);
8631 		kfree(tpc_stats->ctl_array.ctl_pwr_table);
8632 		kfree(tpc_stats);
8633 		ar->debug.tpc_stats = NULL;
8634 	}
8635 }
8636 
8637 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
8638 					 struct sk_buff *skb)
8639 {
8640 	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
8641 	struct wmi_tpc_stats_arg *tpc_stats;
8642 	const struct wmi_tlv *tlv;
8643 	void *ptr = skb->data;
8644 	struct ath12k *ar;
8645 	u16 tlv_tag;
8646 	u32 event_count;
8647 	int ret;
8648 
8649 	if (!skb->data) {
8650 		ath12k_warn(ab, "No data present in tpc stats event\n");
8651 		return;
8652 	}
8653 
8654 	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
8655 		ath12k_warn(ab, "TPC stats event size invalid\n");
8656 		return;
8657 	}
8658 
8659 	tlv = (struct wmi_tlv *)ptr;
8660 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
8661 	ptr += sizeof(*tlv);
8662 
8663 	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
8664 		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
8665 		return;
8666 	}
8667 
8668 	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
8669 	rcu_read_lock();
8670 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
8671 	if (!ar) {
8672 		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
8673 		rcu_read_unlock();
8674 		return;
8675 	}
8676 	spin_lock_bh(&ar->data_lock);
8677 	if (!ar->debug.tpc_request) {
8678 		/* Event is received either without request or the
8679 		 * timeout, if memory is already allocated free it
8680 		 */
8681 		if (ar->debug.tpc_stats) {
8682 			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
8683 			ath12k_wmi_free_tpc_stats_mem(ar);
8684 		}
8685 		goto unlock;
8686 	}
8687 
8688 	event_count = le32_to_cpu(fixed_param->event_count);
8689 	if (event_count == 0) {
8690 		if (ar->debug.tpc_stats) {
8691 			ath12k_warn(ab,
8692 				    "Invalid tpc memory present\n");
8693 			goto unlock;
8694 		}
8695 		ar->debug.tpc_stats =
8696 			kzalloc(sizeof(struct wmi_tpc_stats_arg),
8697 				GFP_ATOMIC);
8698 		if (!ar->debug.tpc_stats) {
8699 			ath12k_warn(ab,
8700 				    "Failed to allocate memory for tpc stats\n");
8701 			goto unlock;
8702 		}
8703 	}
8704 
8705 	tpc_stats = ar->debug.tpc_stats;
8706 	if (!tpc_stats) {
8707 		ath12k_warn(ab, "tpc stats memory unavailable\n");
8708 		goto unlock;
8709 	}
8710 
8711 	if (!(event_count == 0)) {
8712 		if (event_count != tpc_stats->event_count + 1) {
8713 			ath12k_warn(ab,
8714 				    "Invalid tpc event received\n");
8715 			goto unlock;
8716 		}
8717 	}
8718 	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
8719 	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
8720 	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
8721 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8722 		   "tpc stats event_count %d\n",
8723 		   tpc_stats->event_count);
8724 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8725 				  ath12k_wmi_tpc_stats_event_parser,
8726 				  tpc_stats);
8727 	if (ret) {
8728 		ath12k_wmi_free_tpc_stats_mem(ar);
8729 		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
8730 		goto unlock;
8731 	}
8732 
8733 	if (tpc_stats->end_of_event)
8734 		complete(&ar->debug.tpc_complete);
8735 
8736 unlock:
8737 	spin_unlock_bh(&ar->data_lock);
8738 	rcu_read_unlock();
8739 }
8740 #else
8741 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
8742 					 struct sk_buff *skb)
8743 {
8744 }
8745 #endif
8746 
8747 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
8748 {
8749 	struct wmi_cmd_hdr *cmd_hdr;
8750 	enum wmi_tlv_event_id id;
8751 
8752 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
8753 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
8754 
8755 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
8756 		goto out;
8757 
8758 	switch (id) {
8759 		/* Process all the WMI events here */
8760 	case WMI_SERVICE_READY_EVENTID:
8761 		ath12k_service_ready_event(ab, skb);
8762 		break;
8763 	case WMI_SERVICE_READY_EXT_EVENTID:
8764 		ath12k_service_ready_ext_event(ab, skb);
8765 		break;
8766 	case WMI_SERVICE_READY_EXT2_EVENTID:
8767 		ath12k_service_ready_ext2_event(ab, skb);
8768 		break;
8769 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
8770 		ath12k_reg_chan_list_event(ab, skb);
8771 		break;
8772 	case WMI_READY_EVENTID:
8773 		ath12k_ready_event(ab, skb);
8774 		break;
8775 	case WMI_PEER_DELETE_RESP_EVENTID:
8776 		ath12k_peer_delete_resp_event(ab, skb);
8777 		break;
8778 	case WMI_VDEV_START_RESP_EVENTID:
8779 		ath12k_vdev_start_resp_event(ab, skb);
8780 		break;
8781 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
8782 		ath12k_bcn_tx_status_event(ab, skb);
8783 		break;
8784 	case WMI_VDEV_STOPPED_EVENTID:
8785 		ath12k_vdev_stopped_event(ab, skb);
8786 		break;
8787 	case WMI_MGMT_RX_EVENTID:
8788 		ath12k_mgmt_rx_event(ab, skb);
8789 		/* mgmt_rx_event() owns the skb now! */
8790 		return;
8791 	case WMI_MGMT_TX_COMPLETION_EVENTID:
8792 		ath12k_mgmt_tx_compl_event(ab, skb);
8793 		break;
8794 	case WMI_SCAN_EVENTID:
8795 		ath12k_scan_event(ab, skb);
8796 		break;
8797 	case WMI_PEER_STA_KICKOUT_EVENTID:
8798 		ath12k_peer_sta_kickout_event(ab, skb);
8799 		break;
8800 	case WMI_ROAM_EVENTID:
8801 		ath12k_roam_event(ab, skb);
8802 		break;
8803 	case WMI_CHAN_INFO_EVENTID:
8804 		ath12k_chan_info_event(ab, skb);
8805 		break;
8806 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
8807 		ath12k_pdev_bss_chan_info_event(ab, skb);
8808 		break;
8809 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
8810 		ath12k_vdev_install_key_compl_event(ab, skb);
8811 		break;
8812 	case WMI_SERVICE_AVAILABLE_EVENTID:
8813 		ath12k_service_available_event(ab, skb);
8814 		break;
8815 	case WMI_PEER_ASSOC_CONF_EVENTID:
8816 		ath12k_peer_assoc_conf_event(ab, skb);
8817 		break;
8818 	case WMI_UPDATE_STATS_EVENTID:
8819 		ath12k_update_stats_event(ab, skb);
8820 		break;
8821 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
8822 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
8823 		break;
8824 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
8825 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
8826 		break;
8827 	case WMI_PDEV_TEMPERATURE_EVENTID:
8828 		ath12k_wmi_pdev_temperature_event(ab, skb);
8829 		break;
8830 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
8831 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
8832 		break;
8833 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
8834 		ath12k_fils_discovery_event(ab, skb);
8835 		break;
8836 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
8837 		ath12k_probe_resp_tx_status_event(ab, skb);
8838 		break;
8839 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
8840 		ath12k_rfkill_state_change_event(ab, skb);
8841 		break;
8842 	case WMI_TWT_ENABLE_EVENTID:
8843 		ath12k_wmi_twt_enable_event(ab, skb);
8844 		break;
8845 	case WMI_TWT_DISABLE_EVENTID:
8846 		ath12k_wmi_twt_disable_event(ab, skb);
8847 		break;
8848 	case WMI_P2P_NOA_EVENTID:
8849 		ath12k_wmi_p2p_noa_event(ab, skb);
8850 		break;
8851 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
8852 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
8853 		break;
8854 	case WMI_VDEV_DELETE_RESP_EVENTID:
8855 		ath12k_vdev_delete_resp_event(ab, skb);
8856 		break;
8857 	case WMI_DIAG_EVENTID:
8858 		ath12k_wmi_diag_event(ab, skb);
8859 		break;
8860 	case WMI_WOW_WAKEUP_HOST_EVENTID:
8861 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
8862 		break;
8863 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
8864 		ath12k_wmi_gtk_offload_status_event(ab, skb);
8865 		break;
8866 	case WMI_MLO_SETUP_COMPLETE_EVENTID:
8867 		ath12k_wmi_event_mlo_setup_complete(ab, skb);
8868 		break;
8869 	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
8870 		ath12k_wmi_event_teardown_complete(ab, skb);
8871 		break;
8872 	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
8873 		ath12k_wmi_process_tpc_stats(ab, skb);
8874 		break;
8875 	case WMI_11D_NEW_COUNTRY_EVENTID:
8876 		ath12k_reg_11d_new_cc_event(ab, skb);
8877 		break;
8878 	/* add Unsupported events (rare) here */
8879 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
8880 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
8881 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
8882 		ath12k_dbg(ab, ATH12K_DBG_WMI,
8883 			   "ignoring unsupported event 0x%x\n", id);
8884 		break;
8885 	/* add Unsupported events (frequent) here */
8886 	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
8887 	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
8888 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
8889 		/* debug might flood hence silently ignore (no-op) */
8890 		break;
8891 	case WMI_PDEV_UTF_EVENTID:
8892 		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
8893 			ath12k_tm_wmi_event_segmented(ab, id, skb);
8894 		else
8895 			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
8896 		break;
8897 	default:
8898 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
8899 		break;
8900 	}
8901 
8902 out:
8903 	dev_kfree_skb(skb);
8904 }
8905 
8906 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
8907 					   u32 pdev_idx)
8908 {
8909 	int status;
8910 	static const u32 svc_id[] = {
8911 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
8912 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
8913 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
8914 	};
8915 	struct ath12k_htc_svc_conn_req conn_req = {};
8916 	struct ath12k_htc_svc_conn_resp conn_resp = {};
8917 
8918 	/* these fields are the same for all service endpoints */
8919 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
8920 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
8921 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
8922 
8923 	/* connect to control service */
8924 	conn_req.service_id = svc_id[pdev_idx];
8925 
8926 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
8927 	if (status) {
8928 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
8929 			    status);
8930 		return status;
8931 	}
8932 
8933 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
8934 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
8935 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
8936 
8937 	return 0;
8938 }
8939 
8940 static int
8941 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
8942 			      struct wmi_unit_test_cmd ut_cmd,
8943 			      u32 *test_args)
8944 {
8945 	struct ath12k_wmi_pdev *wmi = ar->wmi;
8946 	struct wmi_unit_test_cmd *cmd;
8947 	struct sk_buff *skb;
8948 	struct wmi_tlv *tlv;
8949 	void *ptr;
8950 	u32 *ut_cmd_args;
8951 	int buf_len, arg_len;
8952 	int ret;
8953 	int i;
8954 
8955 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
8956 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
8957 
8958 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
8959 	if (!skb)
8960 		return -ENOMEM;
8961 
8962 	cmd = (struct wmi_unit_test_cmd *)skb->data;
8963 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
8964 						 sizeof(ut_cmd));
8965 
8966 	cmd->vdev_id = ut_cmd.vdev_id;
8967 	cmd->module_id = ut_cmd.module_id;
8968 	cmd->num_args = ut_cmd.num_args;
8969 	cmd->diag_token = ut_cmd.diag_token;
8970 
8971 	ptr = skb->data + sizeof(ut_cmd);
8972 
8973 	tlv = ptr;
8974 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
8975 
8976 	ptr += TLV_HDR_SIZE;
8977 
8978 	ut_cmd_args = ptr;
8979 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
8980 		ut_cmd_args[i] = test_args[i];
8981 
8982 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8983 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
8984 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
8985 		   cmd->diag_token);
8986 
8987 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
8988 
8989 	if (ret) {
8990 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
8991 			    ret);
8992 		dev_kfree_skb(skb);
8993 	}
8994 
8995 	return ret;
8996 }
8997 
8998 int ath12k_wmi_simulate_radar(struct ath12k *ar)
8999 {
9000 	struct ath12k_link_vif *arvif;
9001 	u32 dfs_args[DFS_MAX_TEST_ARGS];
9002 	struct wmi_unit_test_cmd wmi_ut;
9003 	bool arvif_found = false;
9004 
9005 	list_for_each_entry(arvif, &ar->arvifs, list) {
9006 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
9007 			arvif_found = true;
9008 			break;
9009 		}
9010 	}
9011 
9012 	if (!arvif_found)
9013 		return -EINVAL;
9014 
9015 	dfs_args[DFS_TEST_CMDID] = 0;
9016 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
9017 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
9018 	 * freq offset (b3 - b10) to unit test. For simulation
9019 	 * purpose this can be set to 0 which is valid.
9020 	 */
9021 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
9022 
9023 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
9024 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
9025 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
9026 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
9027 
9028 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
9029 
9030 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
9031 }
9032 
9033 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
9034 				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
9035 {
9036 	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
9037 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9038 	struct sk_buff *skb;
9039 	struct wmi_tlv *tlv;
9040 	__le32 *pdev_id;
9041 	u32 buf_len;
9042 	void *ptr;
9043 	int ret;
9044 
9045 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
9046 
9047 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9048 	if (!skb)
9049 		return -ENOMEM;
9050 	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
9051 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
9052 						 sizeof(*cmd));
9053 
9054 	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
9055 	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
9056 	cmd->subid = cpu_to_le32(tpc_stats_type);
9057 
9058 	ptr = skb->data + sizeof(*cmd);
9059 
9060 	/* The below TLV arrays optionally follow this fixed param TLV structure
9061 	 * 1. ARRAY_UINT32 pdev_ids[]
9062 	 *      If this array is present and non-zero length, stats should only
9063 	 *      be provided from the pdevs identified in the array.
9064 	 * 2. ARRAY_UNIT32 vdev_ids[]
9065 	 *      If this array is present and non-zero length, stats should only
9066 	 *      be provided from the vdevs identified in the array.
9067 	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
9068 	 *      If this array is present and non-zero length, stats should only
9069 	 *      be provided from the peers with the MAC addresses specified
9070 	 *      in the array
9071 	 */
9072 	tlv = ptr;
9073 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9074 	ptr += TLV_HDR_SIZE;
9075 
9076 	pdev_id = ptr;
9077 	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
9078 	ptr += sizeof(*pdev_id);
9079 
9080 	tlv = ptr;
9081 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9082 	ptr += TLV_HDR_SIZE;
9083 
9084 	tlv = ptr;
9085 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
9086 	ptr += TLV_HDR_SIZE;
9087 
9088 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
9089 	if (ret) {
9090 		ath12k_warn(ar->ab,
9091 			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
9092 		dev_kfree_skb(skb);
9093 		return ret;
9094 	}
9095 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
9096 		   ar->pdev->pdev_id);
9097 
9098 	return ret;
9099 }
9100 
9101 int ath12k_wmi_connect(struct ath12k_base *ab)
9102 {
9103 	u32 i;
9104 	u8 wmi_ep_count;
9105 
9106 	wmi_ep_count = ab->htc.wmi_ep_count;
9107 	if (wmi_ep_count > ab->hw_params->max_radios)
9108 		return -1;
9109 
9110 	for (i = 0; i < wmi_ep_count; i++)
9111 		ath12k_connect_pdev_htc_service(ab, i);
9112 
9113 	return 0;
9114 }
9115 
9116 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
9117 {
9118 	if (WARN_ON(pdev_id >= MAX_RADIOS))
9119 		return;
9120 
9121 	/* TODO: Deinit any pdev specific wmi resource */
9122 }
9123 
9124 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
9125 			   u8 pdev_id)
9126 {
9127 	struct ath12k_wmi_pdev *wmi_handle;
9128 
9129 	if (pdev_id >= ab->hw_params->max_radios)
9130 		return -EINVAL;
9131 
9132 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
9133 
9134 	wmi_handle->wmi_ab = &ab->wmi_ab;
9135 
9136 	ab->wmi_ab.ab = ab;
9137 	/* TODO: Init remaining resource specific to pdev */
9138 
9139 	return 0;
9140 }
9141 
9142 int ath12k_wmi_attach(struct ath12k_base *ab)
9143 {
9144 	int ret;
9145 
9146 	ret = ath12k_wmi_pdev_attach(ab, 0);
9147 	if (ret)
9148 		return ret;
9149 
9150 	ab->wmi_ab.ab = ab;
9151 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
9152 
9153 	/* It's overwritten when service_ext_ready is handled */
9154 	if (ab->hw_params->single_pdev_only)
9155 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
9156 
9157 	/* TODO: Init remaining wmi soc resources required */
9158 	init_completion(&ab->wmi_ab.service_ready);
9159 	init_completion(&ab->wmi_ab.unified_ready);
9160 
9161 	return 0;
9162 }
9163 
9164 void ath12k_wmi_detach(struct ath12k_base *ab)
9165 {
9166 	int i;
9167 
9168 	/* TODO: Deinit wmi resource specific to SOC as required */
9169 
9170 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
9171 		ath12k_wmi_pdev_detach(ab, i);
9172 
9173 	ath12k_wmi_free_dbring_caps(ab);
9174 }
9175 
9176 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
9177 {
9178 	struct wmi_hw_data_filter_cmd *cmd;
9179 	struct sk_buff *skb;
9180 	int len;
9181 
9182 	len = sizeof(*cmd);
9183 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9184 
9185 	if (!skb)
9186 		return -ENOMEM;
9187 
9188 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
9189 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
9190 						 sizeof(*cmd));
9191 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9192 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
9193 
9194 	/* Set all modes in case of disable */
9195 	if (arg->enable)
9196 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
9197 	else
9198 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
9199 
9200 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9201 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
9202 		   arg->enable, arg->hw_filter_bitmap);
9203 
9204 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
9205 }
9206 
9207 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
9208 {
9209 	struct wmi_wow_host_wakeup_cmd *cmd;
9210 	struct sk_buff *skb;
9211 	size_t len;
9212 
9213 	len = sizeof(*cmd);
9214 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9215 	if (!skb)
9216 		return -ENOMEM;
9217 
9218 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
9219 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
9220 						 sizeof(*cmd));
9221 
9222 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
9223 
9224 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
9225 }
9226 
9227 int ath12k_wmi_wow_enable(struct ath12k *ar)
9228 {
9229 	struct wmi_wow_enable_cmd *cmd;
9230 	struct sk_buff *skb;
9231 	int len;
9232 
9233 	len = sizeof(*cmd);
9234 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9235 	if (!skb)
9236 		return -ENOMEM;
9237 
9238 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
9239 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
9240 						 sizeof(*cmd));
9241 
9242 	cmd->enable = cpu_to_le32(1);
9243 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
9244 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
9245 
9246 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
9247 }
9248 
9249 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
9250 				    enum wmi_wow_wakeup_event event,
9251 				    u32 enable)
9252 {
9253 	struct wmi_wow_add_del_event_cmd *cmd;
9254 	struct sk_buff *skb;
9255 	size_t len;
9256 
9257 	len = sizeof(*cmd);
9258 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9259 	if (!skb)
9260 		return -ENOMEM;
9261 
9262 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
9263 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
9264 						 sizeof(*cmd));
9265 	cmd->vdev_id = cpu_to_le32(vdev_id);
9266 	cmd->is_add = cpu_to_le32(enable);
9267 	cmd->event_bitmap = cpu_to_le32((1 << event));
9268 
9269 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
9270 		   wow_wakeup_event(event), enable, vdev_id);
9271 
9272 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
9273 }
9274 
9275 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
9276 			       const u8 *pattern, const u8 *mask,
9277 			       int pattern_len, int pattern_offset)
9278 {
9279 	struct wmi_wow_add_pattern_cmd *cmd;
9280 	struct wmi_wow_bitmap_pattern_params *bitmap;
9281 	struct wmi_tlv *tlv;
9282 	struct sk_buff *skb;
9283 	void *ptr;
9284 	size_t len;
9285 
9286 	len = sizeof(*cmd) +
9287 	      sizeof(*tlv) +			/* array struct */
9288 	      sizeof(*bitmap) +			/* bitmap */
9289 	      sizeof(*tlv) +			/* empty ipv4 sync */
9290 	      sizeof(*tlv) +			/* empty ipv6 sync */
9291 	      sizeof(*tlv) +			/* empty magic */
9292 	      sizeof(*tlv) +			/* empty info timeout */
9293 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
9294 
9295 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9296 	if (!skb)
9297 		return -ENOMEM;
9298 
9299 	/* cmd */
9300 	ptr = skb->data;
9301 	cmd = ptr;
9302 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
9303 						 sizeof(*cmd));
9304 	cmd->vdev_id = cpu_to_le32(vdev_id);
9305 	cmd->pattern_id = cpu_to_le32(pattern_id);
9306 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
9307 
9308 	ptr += sizeof(*cmd);
9309 
9310 	/* bitmap */
9311 	tlv = ptr;
9312 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
9313 
9314 	ptr += sizeof(*tlv);
9315 
9316 	bitmap = ptr;
9317 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
9318 						    sizeof(*bitmap));
9319 	memcpy(bitmap->patternbuf, pattern, pattern_len);
9320 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
9321 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
9322 	bitmap->pattern_len = cpu_to_le32(pattern_len);
9323 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
9324 	bitmap->pattern_id = cpu_to_le32(pattern_id);
9325 
9326 	ptr += sizeof(*bitmap);
9327 
9328 	/* ipv4 sync */
9329 	tlv = ptr;
9330 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9331 
9332 	ptr += sizeof(*tlv);
9333 
9334 	/* ipv6 sync */
9335 	tlv = ptr;
9336 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9337 
9338 	ptr += sizeof(*tlv);
9339 
9340 	/* magic */
9341 	tlv = ptr;
9342 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9343 
9344 	ptr += sizeof(*tlv);
9345 
9346 	/* pattern info timeout */
9347 	tlv = ptr;
9348 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9349 
9350 	ptr += sizeof(*tlv);
9351 
9352 	/* ratelimit interval */
9353 	tlv = ptr;
9354 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9355 
9356 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
9357 		   vdev_id, pattern_id, pattern_offset, pattern_len);
9358 
9359 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
9360 			bitmap->patternbuf, pattern_len);
9361 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
9362 			bitmap->bitmaskbuf, pattern_len);
9363 
9364 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
9365 }
9366 
9367 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
9368 {
9369 	struct wmi_wow_del_pattern_cmd *cmd;
9370 	struct sk_buff *skb;
9371 	size_t len;
9372 
9373 	len = sizeof(*cmd);
9374 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9375 	if (!skb)
9376 		return -ENOMEM;
9377 
9378 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
9379 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
9380 						 sizeof(*cmd));
9381 	cmd->vdev_id = cpu_to_le32(vdev_id);
9382 	cmd->pattern_id = cpu_to_le32(pattern_id);
9383 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
9384 
9385 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
9386 		   vdev_id, pattern_id);
9387 
9388 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
9389 }
9390 
9391 static struct sk_buff *
9392 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
9393 				   struct wmi_pno_scan_req_arg *pno)
9394 {
9395 	struct nlo_configured_params *nlo_list;
9396 	size_t len, nlo_list_len, channel_list_len;
9397 	struct wmi_wow_nlo_config_cmd *cmd;
9398 	__le32 *channel_list;
9399 	struct wmi_tlv *tlv;
9400 	struct sk_buff *skb;
9401 	void *ptr;
9402 	u32 i;
9403 
9404 	len = sizeof(*cmd) +
9405 	      sizeof(*tlv) +
9406 	      /* TLV place holder for array of structures
9407 	       * nlo_configured_params(nlo_list)
9408 	       */
9409 	      sizeof(*tlv);
9410 	      /* TLV place holder for array of uint32 channel_list */
9411 
9412 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
9413 	len += channel_list_len;
9414 
9415 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
9416 	len += nlo_list_len;
9417 
9418 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9419 	if (!skb)
9420 		return ERR_PTR(-ENOMEM);
9421 
9422 	ptr = skb->data;
9423 	cmd = ptr;
9424 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
9425 
9426 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
9427 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
9428 
9429 	/* current FW does not support min-max range for dwell time */
9430 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
9431 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
9432 
9433 	if (pno->do_passive_scan)
9434 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
9435 
9436 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
9437 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
9438 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
9439 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
9440 
9441 	if (pno->enable_pno_scan_randomization) {
9442 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
9443 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
9444 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
9445 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
9446 	}
9447 
9448 	ptr += sizeof(*cmd);
9449 
9450 	/* nlo_configured_params(nlo_list) */
9451 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
9452 	tlv = ptr;
9453 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
9454 
9455 	ptr += sizeof(*tlv);
9456 	nlo_list = ptr;
9457 	for (i = 0; i < pno->uc_networks_count; i++) {
9458 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
9459 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
9460 						     sizeof(*nlo_list));
9461 
9462 		nlo_list[i].ssid.valid = cpu_to_le32(1);
9463 		nlo_list[i].ssid.ssid.ssid_len =
9464 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
9465 		memcpy(nlo_list[i].ssid.ssid.ssid,
9466 		       pno->a_networks[i].ssid.ssid,
9467 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
9468 
9469 		if (pno->a_networks[i].rssi_threshold &&
9470 		    pno->a_networks[i].rssi_threshold > -300) {
9471 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
9472 			nlo_list[i].rssi_cond.rssi =
9473 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
9474 		}
9475 
9476 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
9477 		nlo_list[i].bcast_nw_type.bcast_nw_type =
9478 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
9479 	}
9480 
9481 	ptr += nlo_list_len;
9482 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
9483 	tlv = ptr;
9484 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
9485 	ptr += sizeof(*tlv);
9486 	channel_list = ptr;
9487 
9488 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
9489 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
9490 
9491 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
9492 		   vdev_id);
9493 
9494 	return skb;
9495 }
9496 
9497 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
9498 							 u32 vdev_id)
9499 {
9500 	struct wmi_wow_nlo_config_cmd *cmd;
9501 	struct sk_buff *skb;
9502 	size_t len;
9503 
9504 	len = sizeof(*cmd);
9505 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9506 	if (!skb)
9507 		return ERR_PTR(-ENOMEM);
9508 
9509 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
9510 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
9511 
9512 	cmd->vdev_id = cpu_to_le32(vdev_id);
9513 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
9514 
9515 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9516 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
9517 	return skb;
9518 }
9519 
9520 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
9521 			      struct wmi_pno_scan_req_arg  *pno_scan)
9522 {
9523 	struct sk_buff *skb;
9524 
9525 	if (pno_scan->enable)
9526 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
9527 	else
9528 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
9529 
9530 	if (IS_ERR_OR_NULL(skb))
9531 		return -ENOMEM;
9532 
9533 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
9534 }
9535 
9536 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
9537 				       struct wmi_arp_ns_offload_arg *offload,
9538 				       void **ptr,
9539 				       bool enable,
9540 				       bool ext)
9541 {
9542 	struct wmi_ns_offload_params *ns;
9543 	struct wmi_tlv *tlv;
9544 	void *buf_ptr = *ptr;
9545 	u32 ns_cnt, ns_ext_tuples;
9546 	int i, max_offloads;
9547 
9548 	ns_cnt = offload->ipv6_count;
9549 
9550 	tlv  = buf_ptr;
9551 
9552 	if (ext) {
9553 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
9554 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9555 						 ns_ext_tuples * sizeof(*ns));
9556 		i = WMI_MAX_NS_OFFLOADS;
9557 		max_offloads = offload->ipv6_count;
9558 	} else {
9559 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9560 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
9561 		i = 0;
9562 		max_offloads = WMI_MAX_NS_OFFLOADS;
9563 	}
9564 
9565 	buf_ptr += sizeof(*tlv);
9566 
9567 	for (; i < max_offloads; i++) {
9568 		ns = buf_ptr;
9569 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
9570 							sizeof(*ns));
9571 
9572 		if (enable) {
9573 			if (i < ns_cnt)
9574 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
9575 
9576 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
9577 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
9578 
9579 			if (offload->ipv6_type[i])
9580 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
9581 
9582 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
9583 
9584 			if (!is_zero_ether_addr(ns->target_mac.addr))
9585 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
9586 
9587 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9588 				   "wmi index %d ns_solicited %pI6 target %pI6",
9589 				   i, ns->solicitation_ipaddr,
9590 				   ns->target_ipaddr[0]);
9591 		}
9592 
9593 		buf_ptr += sizeof(*ns);
9594 	}
9595 
9596 	*ptr = buf_ptr;
9597 }
9598 
9599 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
9600 					struct wmi_arp_ns_offload_arg *offload,
9601 					void **ptr,
9602 					bool enable)
9603 {
9604 	struct wmi_arp_offload_params *arp;
9605 	struct wmi_tlv *tlv;
9606 	void *buf_ptr = *ptr;
9607 	int i;
9608 
9609 	/* fill arp tuple */
9610 	tlv = buf_ptr;
9611 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9612 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
9613 	buf_ptr += sizeof(*tlv);
9614 
9615 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
9616 		arp = buf_ptr;
9617 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
9618 							 sizeof(*arp));
9619 
9620 		if (enable && i < offload->ipv4_count) {
9621 			/* Copy the target ip addr and flags */
9622 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
9623 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
9624 
9625 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
9626 				   arp->target_ipaddr);
9627 		}
9628 
9629 		buf_ptr += sizeof(*arp);
9630 	}
9631 
9632 	*ptr = buf_ptr;
9633 }
9634 
9635 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
9636 			      struct ath12k_link_vif *arvif,
9637 			      struct wmi_arp_ns_offload_arg *offload,
9638 			      bool enable)
9639 {
9640 	struct wmi_set_arp_ns_offload_cmd *cmd;
9641 	struct wmi_tlv *tlv;
9642 	struct sk_buff *skb;
9643 	void *buf_ptr;
9644 	size_t len;
9645 	u8 ns_cnt, ns_ext_tuples = 0;
9646 
9647 	ns_cnt = offload->ipv6_count;
9648 
9649 	len = sizeof(*cmd) +
9650 	      sizeof(*tlv) +
9651 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
9652 	      sizeof(*tlv) +
9653 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
9654 
9655 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
9656 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
9657 		len += sizeof(*tlv) +
9658 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
9659 	}
9660 
9661 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9662 	if (!skb)
9663 		return -ENOMEM;
9664 
9665 	buf_ptr = skb->data;
9666 	cmd = buf_ptr;
9667 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
9668 						 sizeof(*cmd));
9669 	cmd->flags = cpu_to_le32(0);
9670 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9671 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
9672 
9673 	buf_ptr += sizeof(*cmd);
9674 
9675 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
9676 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
9677 
9678 	if (ns_ext_tuples)
9679 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
9680 
9681 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
9682 }
9683 
9684 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
9685 				 struct ath12k_link_vif *arvif, bool enable)
9686 {
9687 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
9688 	struct wmi_gtk_rekey_offload_cmd *cmd;
9689 	struct sk_buff *skb;
9690 	__le64 replay_ctr;
9691 	int len;
9692 
9693 	len = sizeof(*cmd);
9694 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9695 	if (!skb)
9696 		return -ENOMEM;
9697 
9698 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
9699 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
9700 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9701 
9702 	if (enable) {
9703 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
9704 
9705 		/* the length in rekey_data and cmd is equal */
9706 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
9707 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
9708 
9709 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
9710 		memcpy(cmd->replay_ctr, &replay_ctr,
9711 		       sizeof(replay_ctr));
9712 	} else {
9713 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
9714 	}
9715 
9716 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
9717 		   arvif->vdev_id, enable);
9718 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
9719 }
9720 
9721 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
9722 				 struct ath12k_link_vif *arvif)
9723 {
9724 	struct wmi_gtk_rekey_offload_cmd *cmd;
9725 	struct sk_buff *skb;
9726 	int len;
9727 
9728 	len = sizeof(*cmd);
9729 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9730 	if (!skb)
9731 		return -ENOMEM;
9732 
9733 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
9734 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
9735 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9736 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
9737 
9738 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
9739 		   arvif->vdev_id);
9740 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
9741 }
9742 
9743 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
9744 			     const struct wmi_sta_keepalive_arg *arg)
9745 {
9746 	struct wmi_sta_keepalive_arp_resp_params *arp;
9747 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9748 	struct wmi_sta_keepalive_cmd *cmd;
9749 	struct sk_buff *skb;
9750 	size_t len;
9751 
9752 	len = sizeof(*cmd) + sizeof(*arp);
9753 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9754 	if (!skb)
9755 		return -ENOMEM;
9756 
9757 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
9758 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
9759 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9760 	cmd->enabled = cpu_to_le32(arg->enabled);
9761 	cmd->interval = cpu_to_le32(arg->interval);
9762 	cmd->method = cpu_to_le32(arg->method);
9763 
9764 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
9765 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
9766 						 sizeof(*arp));
9767 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
9768 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
9769 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
9770 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
9771 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
9772 	}
9773 
9774 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9775 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
9776 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
9777 
9778 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
9779 }
9780 
9781 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
9782 {
9783 	struct wmi_mlo_setup_cmd *cmd;
9784 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9785 	u32 *partner_links, num_links;
9786 	int i, ret, buf_len, arg_len;
9787 	struct sk_buff *skb;
9788 	struct wmi_tlv *tlv;
9789 	void *ptr;
9790 
9791 	num_links = mlo_params->num_partner_links;
9792 	arg_len = num_links * sizeof(u32);
9793 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
9794 
9795 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9796 	if (!skb)
9797 		return -ENOMEM;
9798 
9799 	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
9800 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
9801 						 sizeof(*cmd));
9802 	cmd->mld_group_id = mlo_params->group_id;
9803 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9804 	ptr = skb->data + sizeof(*cmd);
9805 
9806 	tlv = ptr;
9807 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
9808 	ptr += TLV_HDR_SIZE;
9809 
9810 	partner_links = ptr;
9811 	for (i = 0; i < num_links; i++)
9812 		partner_links[i] = mlo_params->partner_link_id[i];
9813 
9814 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
9815 	if (ret) {
9816 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
9817 			    ret);
9818 		dev_kfree_skb(skb);
9819 		return ret;
9820 	}
9821 
9822 	return 0;
9823 }
9824 
9825 int ath12k_wmi_mlo_ready(struct ath12k *ar)
9826 {
9827 	struct wmi_mlo_ready_cmd *cmd;
9828 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9829 	struct sk_buff *skb;
9830 	int ret, len;
9831 
9832 	len = sizeof(*cmd);
9833 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9834 	if (!skb)
9835 		return -ENOMEM;
9836 
9837 	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
9838 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
9839 						 sizeof(*cmd));
9840 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9841 
9842 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
9843 	if (ret) {
9844 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
9845 			    ret);
9846 		dev_kfree_skb(skb);
9847 		return ret;
9848 	}
9849 
9850 	return 0;
9851 }
9852 
9853 int ath12k_wmi_mlo_teardown(struct ath12k *ar)
9854 {
9855 	struct wmi_mlo_teardown_cmd *cmd;
9856 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9857 	struct sk_buff *skb;
9858 	int ret, len;
9859 
9860 	len = sizeof(*cmd);
9861 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9862 	if (!skb)
9863 		return -ENOMEM;
9864 
9865 	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
9866 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
9867 						 sizeof(*cmd));
9868 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9869 	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
9870 
9871 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
9872 	if (ret) {
9873 		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
9874 			    ret);
9875 		dev_kfree_skb(skb);
9876 		return ret;
9877 	}
9878 
9879 	return 0;
9880 }
9881