xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 979c5ce4a37680063d87fe13d662ed68e06e77c3)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debugfs.h"
19 #include "debug.h"
20 #include "mac.h"
21 #include "hw.h"
22 #include "peer.h"
23 #include "p2p.h"
24 #include "testmode.h"
25 
26 struct ath12k_wmi_svc_ready_parse {
27 	bool wmi_svc_bitmap_done;
28 };
29 
30 struct wmi_tlv_fw_stats_parse {
31 	const struct wmi_stats_event *ev;
32 	struct ath12k_fw_stats *stats;
33 };
34 
35 struct ath12k_wmi_dma_ring_caps_parse {
36 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
37 	u32 n_dma_ring_caps;
38 };
39 
40 struct ath12k_wmi_service_ext_arg {
41 	u32 default_conc_scan_config_bits;
42 	u32 default_fw_config_bits;
43 	struct ath12k_wmi_ppe_threshold_arg ppet;
44 	u32 he_cap_info;
45 	u32 mpdu_density;
46 	u32 max_bssid_rx_filters;
47 	u32 num_hw_modes;
48 	u32 num_phy;
49 };
50 
51 struct ath12k_wmi_svc_rdy_ext_parse {
52 	struct ath12k_wmi_service_ext_arg arg;
53 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
54 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
55 	u32 n_hw_mode_caps;
56 	u32 tot_phy_id;
57 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
58 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
59 	u32 n_mac_phy_caps;
60 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
61 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
62 	u32 n_ext_hal_reg_caps;
63 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
64 	bool hw_mode_done;
65 	bool mac_phy_done;
66 	bool ext_hal_reg_done;
67 	bool mac_phy_chainmask_combo_done;
68 	bool mac_phy_chainmask_cap_done;
69 	bool oem_dma_ring_cap_done;
70 	bool dma_ring_cap_done;
71 };
72 
73 struct ath12k_wmi_svc_rdy_ext2_arg {
74 	u32 reg_db_version;
75 	u32 hw_min_max_tx_power_2ghz;
76 	u32 hw_min_max_tx_power_5ghz;
77 	u32 chwidth_num_peer_caps;
78 	u32 preamble_puncture_bw;
79 	u32 max_user_per_ppdu_ofdma;
80 	u32 max_user_per_ppdu_mumimo;
81 	u32 target_cap_flags;
82 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
83 	u32 max_num_linkview_peers;
84 	u32 max_num_msduq_supported_per_tid;
85 	u32 default_num_msduq_supported_per_tid;
86 };
87 
88 struct ath12k_wmi_svc_rdy_ext2_parse {
89 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
90 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
91 	bool dma_ring_cap_done;
92 	bool spectral_bin_scaling_done;
93 	bool mac_phy_caps_ext_done;
94 	bool hal_reg_caps_ext2_done;
95 	bool scan_radio_caps_ext2_done;
96 	bool twt_caps_done;
97 	bool htt_msdu_idx_to_qtype_map_done;
98 	bool dbs_or_sbs_cap_ext_done;
99 };
100 
101 struct ath12k_wmi_rdy_parse {
102 	u32 num_extra_mac_addr;
103 };
104 
105 struct ath12k_wmi_dma_buf_release_arg {
106 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
107 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
108 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
109 	u32 num_buf_entry;
110 	u32 num_meta;
111 	bool buf_entry_done;
112 	bool meta_data_done;
113 };
114 
115 struct ath12k_wmi_tlv_policy {
116 	size_t min_len;
117 };
118 
119 struct wmi_tlv_mgmt_rx_parse {
120 	const struct ath12k_wmi_mgmt_rx_params *fixed;
121 	const u8 *frame_buf;
122 	bool frame_buf_done;
123 };
124 
125 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
126 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
127 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
128 	[WMI_TAG_SERVICE_READY_EVENT] = {
129 		.min_len = sizeof(struct wmi_service_ready_event) },
130 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
131 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
132 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
133 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
134 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
135 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
136 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
137 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
138 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
139 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
140 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
141 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
142 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
143 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
144 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
145 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
146 	[WMI_TAG_MGMT_RX_HDR] = {
147 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
148 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
149 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
150 	[WMI_TAG_SCAN_EVENT] = {
151 		.min_len = sizeof(struct wmi_scan_event) },
152 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
153 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
154 	[WMI_TAG_ROAM_EVENT] = {
155 		.min_len = sizeof(struct wmi_roam_event) },
156 	[WMI_TAG_CHAN_INFO_EVENT] = {
157 		.min_len = sizeof(struct wmi_chan_info_event) },
158 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
159 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
160 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
161 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
162 	[WMI_TAG_READY_EVENT] = {
163 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
164 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
165 		.min_len = sizeof(struct wmi_service_available_event) },
166 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
167 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
168 	[WMI_TAG_RFKILL_EVENT] = {
169 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
170 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
171 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
172 	[WMI_TAG_HOST_SWFDA_EVENT] = {
173 		.min_len = sizeof(struct wmi_fils_discovery_event) },
174 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
175 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
176 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
177 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
178 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
179 		.min_len = sizeof(struct wmi_twt_enable_event) },
180 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
181 		.min_len = sizeof(struct wmi_twt_disable_event) },
182 	[WMI_TAG_P2P_NOA_INFO] = {
183 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
184 	[WMI_TAG_P2P_NOA_EVENT] = {
185 		.min_len = sizeof(struct wmi_p2p_noa_event) },
186 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
187 		.min_len = sizeof(struct wmi_11d_new_cc_event) },
188 };
189 
190 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
191 {
192 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
193 		le32_encode_bits(len, WMI_TLV_LEN);
194 }
195 
196 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
197 {
198 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
199 }
200 
201 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
202 			     struct ath12k_wmi_resource_config_arg *config)
203 {
204 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
205 	config->num_peers = ab->num_radios *
206 		ath12k_core_get_max_peers_per_radio(ab);
207 	config->num_tids = ath12k_core_get_max_num_tids(ab);
208 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
209 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
210 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
211 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
212 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
213 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
214 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
215 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
216 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
217 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
218 
219 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
220 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
221 	else
222 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
223 
224 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
225 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
226 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
227 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
228 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
229 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
230 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
231 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
232 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
233 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
234 	config->rx_skip_defrag_timeout_dup_detection_check =
235 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
236 	config->vow_config = TARGET_VOW_CONFIG;
237 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
238 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
239 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
240 	config->rx_batchmode = TARGET_RX_BATCHMODE;
241 	/* Indicates host supports peer map v3 and unmap v2 support */
242 	config->peer_map_unmap_version = 0x32;
243 	config->twt_ap_pdev_count = ab->num_radios;
244 	config->twt_ap_sta_count = 1000;
245 	config->ema_max_vap_cnt = ab->num_radios;
246 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
247 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
248 
249 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
250 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
251 }
252 
253 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
254 			     struct ath12k_wmi_resource_config_arg *config)
255 {
256 	config->num_vdevs = 4;
257 	config->num_peers = 16;
258 	config->num_tids = 32;
259 
260 	config->num_offload_peers = 3;
261 	config->num_offload_reorder_buffs = 3;
262 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
263 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
264 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
265 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
266 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
267 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
268 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
269 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
270 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
271 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
272 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
273 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
274 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
275 	config->num_mcast_groups = 0;
276 	config->num_mcast_table_elems = 0;
277 	config->mcast2ucast_mode = 0;
278 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
279 	config->num_wds_entries = 0;
280 	config->dma_burst_size = 0;
281 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
282 	config->vow_config = TARGET_VOW_CONFIG;
283 	config->gtk_offload_max_vdev = 2;
284 	config->num_msdu_desc = 0x400;
285 	config->beacon_tx_offload_max_vdev = 2;
286 	config->rx_batchmode = TARGET_RX_BATCHMODE;
287 
288 	config->peer_map_unmap_version = 0x1;
289 	config->use_pdev_id = 1;
290 	config->max_frag_entries = 0xa;
291 	config->num_tdls_vdevs = 0x1;
292 	config->num_tdls_conn_table_entries = 8;
293 	config->beacon_tx_offload_max_vdev = 0x2;
294 	config->num_multicast_filter_entries = 0x20;
295 	config->num_wow_filters = 0x16;
296 	config->num_keep_alive_pattern = 0;
297 }
298 
299 #define PRIMAP(_hw_mode_) \
300 	[_hw_mode_] = _hw_mode_##_PRI
301 
302 static const int ath12k_hw_mode_pri_map[] = {
303 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
304 	PRIMAP(WMI_HOST_HW_MODE_DBS),
305 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
306 	PRIMAP(WMI_HOST_HW_MODE_SBS),
307 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
308 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
309 	/* keep last */
310 	PRIMAP(WMI_HOST_HW_MODE_MAX),
311 };
312 
313 static int
314 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
315 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
316 				const void *ptr, void *data),
317 		    void *data)
318 {
319 	const void *begin = ptr;
320 	const struct wmi_tlv *tlv;
321 	u16 tlv_tag, tlv_len;
322 	int ret;
323 
324 	while (len > 0) {
325 		if (len < sizeof(*tlv)) {
326 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
327 				   ptr - begin, len, sizeof(*tlv));
328 			return -EINVAL;
329 		}
330 
331 		tlv = ptr;
332 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
333 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
334 		ptr += sizeof(*tlv);
335 		len -= sizeof(*tlv);
336 
337 		if (tlv_len > len) {
338 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
339 				   tlv_tag, ptr - begin, len, tlv_len);
340 			return -EINVAL;
341 		}
342 
343 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
344 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
345 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
346 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
347 				   tlv_tag, ptr - begin, tlv_len,
348 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
349 			return -EINVAL;
350 		}
351 
352 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
353 		if (ret)
354 			return ret;
355 
356 		ptr += tlv_len;
357 		len -= tlv_len;
358 	}
359 
360 	return 0;
361 }
362 
363 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
364 				     const void *ptr, void *data)
365 {
366 	const void **tb = data;
367 
368 	if (tag < WMI_TAG_MAX)
369 		tb[tag] = ptr;
370 
371 	return 0;
372 }
373 
374 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
375 				const void *ptr, size_t len)
376 {
377 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
378 				   (void *)tb);
379 }
380 
381 static const void **
382 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
383 			   struct sk_buff *skb, gfp_t gfp)
384 {
385 	const void **tb;
386 	int ret;
387 
388 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
389 	if (!tb)
390 		return ERR_PTR(-ENOMEM);
391 
392 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
393 	if (ret) {
394 		kfree(tb);
395 		return ERR_PTR(ret);
396 	}
397 
398 	return tb;
399 }
400 
401 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
402 				      u32 cmd_id)
403 {
404 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
405 	struct ath12k_base *ab = wmi->wmi_ab->ab;
406 	struct wmi_cmd_hdr *cmd_hdr;
407 	int ret;
408 
409 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
410 		return -ENOMEM;
411 
412 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
413 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
414 
415 	memset(skb_cb, 0, sizeof(*skb_cb));
416 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
417 
418 	if (ret)
419 		goto err_pull;
420 
421 	return 0;
422 
423 err_pull:
424 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
425 	return ret;
426 }
427 
428 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
429 			u32 cmd_id)
430 {
431 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
432 	int ret = -EOPNOTSUPP;
433 
434 	might_sleep();
435 
436 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
437 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
438 
439 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
440 			ret = -ESHUTDOWN;
441 
442 		(ret != -EAGAIN);
443 	}), WMI_SEND_TIMEOUT_HZ);
444 
445 	if (ret == -EAGAIN)
446 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
447 
448 	return ret;
449 }
450 
451 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
452 				     const void *ptr,
453 				     struct ath12k_wmi_service_ext_arg *arg)
454 {
455 	const struct wmi_service_ready_ext_event *ev = ptr;
456 	int i;
457 
458 	if (!ev)
459 		return -EINVAL;
460 
461 	/* Move this to host based bitmap */
462 	arg->default_conc_scan_config_bits =
463 		le32_to_cpu(ev->default_conc_scan_config_bits);
464 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
465 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
466 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
467 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
468 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
469 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
470 
471 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
472 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
473 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
474 
475 	return 0;
476 }
477 
478 static int
479 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
480 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
481 				      u8 hw_mode_id, u8 phy_id,
482 				      struct ath12k_pdev *pdev)
483 {
484 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
485 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
486 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
487 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
488 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
489 	struct ath12k_band_cap *cap_band;
490 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
491 	struct ath12k_fw_pdev *fw_pdev;
492 	u32 phy_map;
493 	u32 hw_idx, phy_idx = 0;
494 	int i;
495 
496 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
497 		return -EINVAL;
498 
499 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
500 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
501 			break;
502 
503 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
504 		phy_idx = fls(phy_map);
505 	}
506 
507 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
508 		return -EINVAL;
509 
510 	phy_idx += phy_id;
511 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
512 		return -EINVAL;
513 
514 	mac_caps = wmi_mac_phy_caps + phy_idx;
515 
516 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
517 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
518 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
519 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
520 
521 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
522 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
523 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
524 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
525 	ab->fw_pdev_count++;
526 
527 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
528 	 * band to band for a single radio, need to see how this should be
529 	 * handled.
530 	 */
531 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
532 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
533 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
534 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
535 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
536 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
537 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
538 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
539 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
540 		pdev_cap->nss_ratio_enabled =
541 			WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
542 		pdev_cap->nss_ratio_info =
543 			WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
544 	} else {
545 		return -EINVAL;
546 	}
547 
548 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
549 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
550 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
551 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
552 	 * will be advertised for second mac or vice-versa. Compute the shift value
553 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
554 	 * mac80211.
555 	 */
556 	pdev_cap->tx_chain_mask_shift =
557 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
558 	pdev_cap->rx_chain_mask_shift =
559 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
560 
561 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
562 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
563 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
564 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
565 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
566 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
567 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
568 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
569 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
570 			cap_band->he_cap_phy_info[i] =
571 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
572 
573 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
574 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
575 
576 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
577 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
578 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
579 	}
580 
581 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
582 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
583 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
584 		cap_band->max_bw_supported =
585 			le32_to_cpu(mac_caps->max_bw_supported_5g);
586 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
587 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
588 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
589 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
590 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
591 			cap_band->he_cap_phy_info[i] =
592 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
593 
594 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
595 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
596 
597 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
598 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
599 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
600 
601 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
602 		cap_band->max_bw_supported =
603 			le32_to_cpu(mac_caps->max_bw_supported_5g);
604 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
605 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
606 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
607 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
608 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
609 			cap_band->he_cap_phy_info[i] =
610 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
611 
612 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
613 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
614 
615 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
616 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
617 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
618 	}
619 
620 	return 0;
621 }
622 
623 static int
624 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
625 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
626 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
627 				u8 phy_idx,
628 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
629 {
630 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
631 
632 	if (!reg_caps || !ext_caps)
633 		return -EINVAL;
634 
635 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
636 		return -EINVAL;
637 
638 	ext_reg_cap = &ext_caps[phy_idx];
639 
640 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
641 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
642 	param->eeprom_reg_domain_ext =
643 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
644 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
645 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
646 	/* check if param->wireless_mode is needed */
647 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
648 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
649 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
650 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
651 
652 	return 0;
653 }
654 
655 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
656 					 const void *evt_buf,
657 					 struct ath12k_wmi_target_cap_arg *cap)
658 {
659 	const struct wmi_service_ready_event *ev = evt_buf;
660 
661 	if (!ev) {
662 		ath12k_err(ab, "%s: failed by NULL param\n",
663 			   __func__);
664 		return -EINVAL;
665 	}
666 
667 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
668 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
669 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
670 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
671 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
672 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
673 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
674 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
675 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
676 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
677 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
678 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
679 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
680 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
681 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
682 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
683 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
684 
685 	return 0;
686 }
687 
688 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
689  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
690  * 4-byte word.
691  */
692 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
693 					   const u32 *wmi_svc_bm)
694 {
695 	int i, j;
696 
697 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
698 		do {
699 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
700 				set_bit(j, wmi->wmi_ab->svc_map);
701 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
702 	}
703 }
704 
705 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
706 				    const void *ptr, void *data)
707 {
708 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
709 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
710 	u16 expect_len;
711 
712 	switch (tag) {
713 	case WMI_TAG_SERVICE_READY_EVENT:
714 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
715 			return -EINVAL;
716 		break;
717 
718 	case WMI_TAG_ARRAY_UINT32:
719 		if (!svc_ready->wmi_svc_bitmap_done) {
720 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
721 			if (len < expect_len) {
722 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
723 					    len, tag);
724 				return -EINVAL;
725 			}
726 
727 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
728 
729 			svc_ready->wmi_svc_bitmap_done = true;
730 		}
731 		break;
732 	default:
733 		break;
734 	}
735 
736 	return 0;
737 }
738 
739 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
740 {
741 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
742 	int ret;
743 
744 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
745 				  ath12k_wmi_svc_rdy_parse,
746 				  &svc_ready);
747 	if (ret) {
748 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
749 		return ret;
750 	}
751 
752 	return 0;
753 }
754 
755 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
756 				    struct ieee80211_tx_info *info)
757 {
758 	struct ath12k_base *ab = ar->ab;
759 	u32 freq = 0;
760 
761 	if (ab->hw_params->single_pdev_only &&
762 	    ar->scan.is_roc &&
763 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
764 		freq = ar->scan.roc_freq;
765 
766 	return freq;
767 }
768 
769 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
770 {
771 	struct sk_buff *skb;
772 	struct ath12k_base *ab = wmi_ab->ab;
773 	u32 round_len = roundup(len, 4);
774 
775 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
776 	if (!skb)
777 		return NULL;
778 
779 	skb_reserve(skb, WMI_SKB_HEADROOM);
780 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
781 		ath12k_warn(ab, "unaligned WMI skb data\n");
782 
783 	skb_put(skb, round_len);
784 	memset(skb->data, 0, round_len);
785 
786 	return skb;
787 }
788 
789 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
790 			 struct sk_buff *frame)
791 {
792 	struct ath12k_wmi_pdev *wmi = ar->wmi;
793 	struct wmi_mgmt_send_cmd *cmd;
794 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
795 	struct wmi_tlv *frame_tlv;
796 	struct sk_buff *skb;
797 	u32 buf_len;
798 	int ret, len;
799 
800 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
801 
802 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
803 
804 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
805 	if (!skb)
806 		return -ENOMEM;
807 
808 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
809 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
810 						 sizeof(*cmd));
811 	cmd->vdev_id = cpu_to_le32(vdev_id);
812 	cmd->desc_id = cpu_to_le32(buf_id);
813 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
814 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
815 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
816 	cmd->frame_len = cpu_to_le32(frame->len);
817 	cmd->buf_len = cpu_to_le32(buf_len);
818 	cmd->tx_params_valid = 0;
819 
820 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
821 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
822 
823 	memcpy(frame_tlv->value, frame->data, buf_len);
824 
825 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
826 	if (ret) {
827 		ath12k_warn(ar->ab,
828 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
829 		dev_kfree_skb(skb);
830 	}
831 
832 	return ret;
833 }
834 
835 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
836 				      u32 vdev_id, u32 pdev_id)
837 {
838 	struct ath12k_wmi_pdev *wmi = ar->wmi;
839 	struct wmi_request_stats_cmd *cmd;
840 	struct sk_buff *skb;
841 	int ret;
842 
843 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
844 	if (!skb)
845 		return -ENOMEM;
846 
847 	cmd = (struct wmi_request_stats_cmd *)skb->data;
848 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
849 						 sizeof(*cmd));
850 
851 	cmd->stats_id = cpu_to_le32(stats_id);
852 	cmd->vdev_id = cpu_to_le32(vdev_id);
853 	cmd->pdev_id = cpu_to_le32(pdev_id);
854 
855 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
856 	if (ret) {
857 		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
858 		dev_kfree_skb(skb);
859 	}
860 
861 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
862 		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
863 		   stats_id, vdev_id, pdev_id);
864 
865 	return ret;
866 }
867 
868 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
869 			   struct ath12k_wmi_vdev_create_arg *args)
870 {
871 	struct ath12k_wmi_pdev *wmi = ar->wmi;
872 	struct wmi_vdev_create_cmd *cmd;
873 	struct sk_buff *skb;
874 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
875 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
876 	struct wmi_vdev_create_mlo_params *ml_params;
877 	struct wmi_tlv *tlv;
878 	int ret, len;
879 	void *ptr;
880 
881 	/* It can be optimized my sending tx/rx chain configuration
882 	 * only for supported bands instead of always sending it for
883 	 * both the bands.
884 	 */
885 	len = sizeof(*cmd) + TLV_HDR_SIZE +
886 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
887 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
888 
889 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
890 	if (!skb)
891 		return -ENOMEM;
892 
893 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
894 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
895 						 sizeof(*cmd));
896 
897 	cmd->vdev_id = cpu_to_le32(args->if_id);
898 	cmd->vdev_type = cpu_to_le32(args->type);
899 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
900 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
901 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
902 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
903 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
904 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
905 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
906 
907 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
908 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
909 
910 	ptr = skb->data + sizeof(*cmd);
911 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
912 
913 	tlv = ptr;
914 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
915 
916 	ptr += TLV_HDR_SIZE;
917 	txrx_streams = ptr;
918 	len = sizeof(*txrx_streams);
919 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
920 							  len);
921 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
922 	txrx_streams->supported_tx_streams =
923 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
924 	txrx_streams->supported_rx_streams =
925 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
926 
927 	txrx_streams++;
928 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
929 							  len);
930 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
931 	txrx_streams->supported_tx_streams =
932 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
933 	txrx_streams->supported_rx_streams =
934 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
935 
936 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
937 
938 	if (is_ml_vdev) {
939 		tlv = ptr;
940 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
941 						 sizeof(*ml_params));
942 		ptr += TLV_HDR_SIZE;
943 		ml_params = ptr;
944 
945 		ml_params->tlv_header =
946 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
947 					       sizeof(*ml_params));
948 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
949 	}
950 
951 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
952 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
953 		   args->if_id, args->type, args->subtype,
954 		   macaddr, args->pdev_id);
955 
956 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
957 	if (ret) {
958 		ath12k_warn(ar->ab,
959 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
960 		dev_kfree_skb(skb);
961 	}
962 
963 	return ret;
964 }
965 
966 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
967 {
968 	struct ath12k_wmi_pdev *wmi = ar->wmi;
969 	struct wmi_vdev_delete_cmd *cmd;
970 	struct sk_buff *skb;
971 	int ret;
972 
973 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
974 	if (!skb)
975 		return -ENOMEM;
976 
977 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
978 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
979 						 sizeof(*cmd));
980 	cmd->vdev_id = cpu_to_le32(vdev_id);
981 
982 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
983 
984 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
985 	if (ret) {
986 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
987 		dev_kfree_skb(skb);
988 	}
989 
990 	return ret;
991 }
992 
993 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
994 {
995 	struct ath12k_wmi_pdev *wmi = ar->wmi;
996 	struct wmi_vdev_stop_cmd *cmd;
997 	struct sk_buff *skb;
998 	int ret;
999 
1000 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1001 	if (!skb)
1002 		return -ENOMEM;
1003 
1004 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1005 
1006 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
1007 						 sizeof(*cmd));
1008 	cmd->vdev_id = cpu_to_le32(vdev_id);
1009 
1010 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
1011 
1012 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
1013 	if (ret) {
1014 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
1015 		dev_kfree_skb(skb);
1016 	}
1017 
1018 	return ret;
1019 }
1020 
1021 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
1022 {
1023 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1024 	struct wmi_vdev_down_cmd *cmd;
1025 	struct sk_buff *skb;
1026 	int ret;
1027 
1028 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1029 	if (!skb)
1030 		return -ENOMEM;
1031 
1032 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1033 
1034 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
1035 						 sizeof(*cmd));
1036 	cmd->vdev_id = cpu_to_le32(vdev_id);
1037 
1038 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
1039 
1040 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
1041 	if (ret) {
1042 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
1043 		dev_kfree_skb(skb);
1044 	}
1045 
1046 	return ret;
1047 }
1048 
1049 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
1050 				       struct wmi_vdev_start_req_arg *arg)
1051 {
1052 	u32 center_freq1 = arg->band_center_freq1;
1053 
1054 	memset(chan, 0, sizeof(*chan));
1055 
1056 	chan->mhz = cpu_to_le32(arg->freq);
1057 	chan->band_center_freq1 = cpu_to_le32(center_freq1);
1058 	if (arg->mode == MODE_11BE_EHT320) {
1059 		if (arg->freq > center_freq1)
1060 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
1061 		else
1062 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
1063 
1064 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1065 
1066 	} else if (arg->mode == MODE_11BE_EHT160 ||
1067 		   arg->mode == MODE_11AX_HE160) {
1068 		if (arg->freq > center_freq1)
1069 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
1070 		else
1071 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
1072 
1073 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1074 	} else {
1075 		chan->band_center_freq2 = 0;
1076 	}
1077 
1078 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1079 	if (arg->passive)
1080 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1081 	if (arg->allow_ibss)
1082 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1083 	if (arg->allow_ht)
1084 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1085 	if (arg->allow_vht)
1086 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1087 	if (arg->allow_he)
1088 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1089 	if (arg->ht40plus)
1090 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1091 	if (arg->chan_radar)
1092 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1093 	if (arg->freq2_radar)
1094 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1095 
1096 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1097 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1098 		le32_encode_bits(arg->max_reg_power,
1099 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1100 
1101 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1102 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1103 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1104 }
1105 
1106 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1107 			  bool restart)
1108 {
1109 	struct wmi_vdev_start_mlo_params *ml_params;
1110 	struct wmi_partner_link_info *partner_info;
1111 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1112 	struct wmi_vdev_start_request_cmd *cmd;
1113 	struct sk_buff *skb;
1114 	struct ath12k_wmi_channel_params *chan;
1115 	struct wmi_tlv *tlv;
1116 	void *ptr;
1117 	int ret, len, i, ml_arg_size = 0;
1118 
1119 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1120 		return -EINVAL;
1121 
1122 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1123 
1124 	if (!restart && arg->ml.enabled) {
1125 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1126 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1127 					      sizeof(*partner_info));
1128 		len += ml_arg_size;
1129 	}
1130 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1131 	if (!skb)
1132 		return -ENOMEM;
1133 
1134 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1135 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1136 						 sizeof(*cmd));
1137 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1138 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1139 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1140 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1141 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1142 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1143 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1144 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1145 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1146 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1147 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1148 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1149 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1150 
1151 	if (!restart) {
1152 		if (arg->ssid) {
1153 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1154 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1155 		}
1156 		if (arg->hidden_ssid)
1157 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1158 		if (arg->pmf_enabled)
1159 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1160 	}
1161 
1162 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1163 
1164 	ptr = skb->data + sizeof(*cmd);
1165 	chan = ptr;
1166 
1167 	ath12k_wmi_put_wmi_channel(chan, arg);
1168 
1169 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1170 						  sizeof(*chan));
1171 	ptr += sizeof(*chan);
1172 
1173 	tlv = ptr;
1174 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1175 
1176 	/* Note: This is a nested TLV containing:
1177 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1178 	 */
1179 
1180 	ptr += sizeof(*tlv);
1181 
1182 	if (ml_arg_size) {
1183 		tlv = ptr;
1184 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1185 						 sizeof(*ml_params));
1186 		ptr += TLV_HDR_SIZE;
1187 
1188 		ml_params = ptr;
1189 
1190 		ml_params->tlv_header =
1191 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1192 					       sizeof(*ml_params));
1193 
1194 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1195 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1196 				   le32_encode_bits(arg->ml.assoc_link,
1197 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1198 				   le32_encode_bits(arg->ml.mcast_link,
1199 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1200 				   le32_encode_bits(arg->ml.link_add,
1201 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1202 
1203 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1204 			   arg->vdev_id, ml_params->flags);
1205 
1206 		ptr += sizeof(*ml_params);
1207 
1208 		tlv = ptr;
1209 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1210 						 arg->ml.num_partner_links *
1211 						 sizeof(*partner_info));
1212 		ptr += TLV_HDR_SIZE;
1213 
1214 		partner_info = ptr;
1215 
1216 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1217 			partner_info->tlv_header =
1218 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1219 						       sizeof(*partner_info));
1220 			partner_info->vdev_id =
1221 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1222 			partner_info->hw_link_id =
1223 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1224 			ether_addr_copy(partner_info->vdev_addr.addr,
1225 					arg->ml.partner_info[i].addr);
1226 
1227 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1228 				   partner_info->vdev_id, partner_info->hw_link_id,
1229 				   partner_info->vdev_addr.addr);
1230 
1231 			partner_info++;
1232 		}
1233 
1234 		ptr = partner_info;
1235 	}
1236 
1237 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1238 		   restart ? "restart" : "start", arg->vdev_id,
1239 		   arg->freq, arg->mode);
1240 
1241 	if (restart)
1242 		ret = ath12k_wmi_cmd_send(wmi, skb,
1243 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1244 	else
1245 		ret = ath12k_wmi_cmd_send(wmi, skb,
1246 					  WMI_VDEV_START_REQUEST_CMDID);
1247 	if (ret) {
1248 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1249 			    restart ? "restart" : "start");
1250 		dev_kfree_skb(skb);
1251 	}
1252 
1253 	return ret;
1254 }
1255 
1256 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1257 {
1258 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1259 	struct wmi_vdev_up_cmd *cmd;
1260 	struct sk_buff *skb;
1261 	int ret;
1262 
1263 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1264 	if (!skb)
1265 		return -ENOMEM;
1266 
1267 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1268 
1269 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1270 						 sizeof(*cmd));
1271 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1272 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1273 
1274 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1275 
1276 	if (params->tx_bssid) {
1277 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1278 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1279 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1280 	}
1281 
1282 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1283 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1284 		   params->vdev_id, params->aid, params->bssid);
1285 
1286 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1287 	if (ret) {
1288 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1289 		dev_kfree_skb(skb);
1290 	}
1291 
1292 	return ret;
1293 }
1294 
1295 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1296 				    struct ath12k_wmi_peer_create_arg *arg)
1297 {
1298 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1299 	struct wmi_peer_create_cmd *cmd;
1300 	struct sk_buff *skb;
1301 	int ret, len;
1302 	struct wmi_peer_create_mlo_params *ml_param;
1303 	void *ptr;
1304 	struct wmi_tlv *tlv;
1305 
1306 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1307 
1308 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1309 	if (!skb)
1310 		return -ENOMEM;
1311 
1312 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1313 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1314 						 sizeof(*cmd));
1315 
1316 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1317 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1318 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1319 
1320 	ptr = skb->data + sizeof(*cmd);
1321 	tlv = ptr;
1322 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1323 					 sizeof(*ml_param));
1324 	ptr += TLV_HDR_SIZE;
1325 	ml_param = ptr;
1326 	ml_param->tlv_header =
1327 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1328 					       sizeof(*ml_param));
1329 	if (arg->ml_enabled)
1330 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1331 
1332 	ptr += sizeof(*ml_param);
1333 
1334 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1335 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1336 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1337 
1338 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1339 	if (ret) {
1340 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1341 		dev_kfree_skb(skb);
1342 	}
1343 
1344 	return ret;
1345 }
1346 
1347 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1348 				    const u8 *peer_addr, u8 vdev_id)
1349 {
1350 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1351 	struct wmi_peer_delete_cmd *cmd;
1352 	struct sk_buff *skb;
1353 	int ret;
1354 
1355 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1356 	if (!skb)
1357 		return -ENOMEM;
1358 
1359 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1360 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1361 						 sizeof(*cmd));
1362 
1363 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1364 	cmd->vdev_id = cpu_to_le32(vdev_id);
1365 
1366 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1367 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1368 		   vdev_id,  peer_addr);
1369 
1370 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1371 	if (ret) {
1372 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1373 		dev_kfree_skb(skb);
1374 	}
1375 
1376 	return ret;
1377 }
1378 
1379 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1380 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1381 {
1382 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1383 	struct wmi_pdev_set_regdomain_cmd *cmd;
1384 	struct sk_buff *skb;
1385 	int ret;
1386 
1387 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1388 	if (!skb)
1389 		return -ENOMEM;
1390 
1391 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1392 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1393 						 sizeof(*cmd));
1394 
1395 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1396 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1397 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1398 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1399 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1400 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1401 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1402 
1403 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1404 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1405 		   arg->current_rd_in_use, arg->current_rd_2g,
1406 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1407 
1408 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1409 	if (ret) {
1410 		ath12k_warn(ar->ab,
1411 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1412 		dev_kfree_skb(skb);
1413 	}
1414 
1415 	return ret;
1416 }
1417 
1418 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1419 			      u32 vdev_id, u32 param_id, u32 param_val)
1420 {
1421 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1422 	struct wmi_peer_set_param_cmd *cmd;
1423 	struct sk_buff *skb;
1424 	int ret;
1425 
1426 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1427 	if (!skb)
1428 		return -ENOMEM;
1429 
1430 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1431 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1432 						 sizeof(*cmd));
1433 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1434 	cmd->vdev_id = cpu_to_le32(vdev_id);
1435 	cmd->param_id = cpu_to_le32(param_id);
1436 	cmd->param_value = cpu_to_le32(param_val);
1437 
1438 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1439 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1440 		   vdev_id, peer_addr, param_id, param_val);
1441 
1442 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1443 	if (ret) {
1444 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1445 		dev_kfree_skb(skb);
1446 	}
1447 
1448 	return ret;
1449 }
1450 
1451 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1452 					u8 peer_addr[ETH_ALEN],
1453 					u32 peer_tid_bitmap,
1454 					u8 vdev_id)
1455 {
1456 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1457 	struct wmi_peer_flush_tids_cmd *cmd;
1458 	struct sk_buff *skb;
1459 	int ret;
1460 
1461 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1462 	if (!skb)
1463 		return -ENOMEM;
1464 
1465 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1466 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1467 						 sizeof(*cmd));
1468 
1469 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1470 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1471 	cmd->vdev_id = cpu_to_le32(vdev_id);
1472 
1473 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1474 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1475 		   vdev_id, peer_addr, peer_tid_bitmap);
1476 
1477 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1478 	if (ret) {
1479 		ath12k_warn(ar->ab,
1480 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1481 		dev_kfree_skb(skb);
1482 	}
1483 
1484 	return ret;
1485 }
1486 
1487 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1488 					   int vdev_id, const u8 *addr,
1489 					   dma_addr_t paddr, u8 tid,
1490 					   u8 ba_window_size_valid,
1491 					   u32 ba_window_size)
1492 {
1493 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1494 	struct sk_buff *skb;
1495 	int ret;
1496 
1497 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1498 	if (!skb)
1499 		return -ENOMEM;
1500 
1501 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1502 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1503 						 sizeof(*cmd));
1504 
1505 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1506 	cmd->vdev_id = cpu_to_le32(vdev_id);
1507 	cmd->tid = cpu_to_le32(tid);
1508 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1509 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1510 	cmd->queue_no = cpu_to_le32(tid);
1511 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1512 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1513 
1514 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1515 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1516 		   addr, vdev_id, tid);
1517 
1518 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1519 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1520 	if (ret) {
1521 		ath12k_warn(ar->ab,
1522 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1523 		dev_kfree_skb(skb);
1524 	}
1525 
1526 	return ret;
1527 }
1528 
1529 int
1530 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1531 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1532 {
1533 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1534 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1535 	struct sk_buff *skb;
1536 	int ret;
1537 
1538 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1539 	if (!skb)
1540 		return -ENOMEM;
1541 
1542 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1543 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1544 						 sizeof(*cmd));
1545 
1546 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1547 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1548 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1549 
1550 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1551 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1552 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1553 
1554 	ret = ath12k_wmi_cmd_send(wmi, skb,
1555 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1556 	if (ret) {
1557 		ath12k_warn(ar->ab,
1558 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1559 		dev_kfree_skb(skb);
1560 	}
1561 
1562 	return ret;
1563 }
1564 
1565 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1566 			      u32 param_value, u8 pdev_id)
1567 {
1568 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1569 	struct wmi_pdev_set_param_cmd *cmd;
1570 	struct sk_buff *skb;
1571 	int ret;
1572 
1573 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1574 	if (!skb)
1575 		return -ENOMEM;
1576 
1577 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1578 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1579 						 sizeof(*cmd));
1580 	cmd->pdev_id = cpu_to_le32(pdev_id);
1581 	cmd->param_id = cpu_to_le32(param_id);
1582 	cmd->param_value = cpu_to_le32(param_value);
1583 
1584 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1585 		   "WMI pdev set param %d pdev id %d value %d\n",
1586 		   param_id, pdev_id, param_value);
1587 
1588 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1589 	if (ret) {
1590 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1591 		dev_kfree_skb(skb);
1592 	}
1593 
1594 	return ret;
1595 }
1596 
1597 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1598 {
1599 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1600 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1601 	struct sk_buff *skb;
1602 	int ret;
1603 
1604 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1605 	if (!skb)
1606 		return -ENOMEM;
1607 
1608 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1609 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1610 						 sizeof(*cmd));
1611 	cmd->vdev_id = cpu_to_le32(vdev_id);
1612 	cmd->sta_ps_mode = cpu_to_le32(enable);
1613 
1614 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1615 		   "WMI vdev set psmode %d vdev id %d\n",
1616 		   enable, vdev_id);
1617 
1618 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1619 	if (ret) {
1620 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1621 		dev_kfree_skb(skb);
1622 	}
1623 
1624 	return ret;
1625 }
1626 
1627 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1628 			    u32 pdev_id)
1629 {
1630 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1631 	struct wmi_pdev_suspend_cmd *cmd;
1632 	struct sk_buff *skb;
1633 	int ret;
1634 
1635 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1636 	if (!skb)
1637 		return -ENOMEM;
1638 
1639 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1640 
1641 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1642 						 sizeof(*cmd));
1643 
1644 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1645 	cmd->pdev_id = cpu_to_le32(pdev_id);
1646 
1647 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1648 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1649 
1650 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1651 	if (ret) {
1652 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1653 		dev_kfree_skb(skb);
1654 	}
1655 
1656 	return ret;
1657 }
1658 
1659 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1660 {
1661 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1662 	struct wmi_pdev_resume_cmd *cmd;
1663 	struct sk_buff *skb;
1664 	int ret;
1665 
1666 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1667 	if (!skb)
1668 		return -ENOMEM;
1669 
1670 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1671 
1672 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1673 						 sizeof(*cmd));
1674 	cmd->pdev_id = cpu_to_le32(pdev_id);
1675 
1676 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1677 		   "WMI pdev resume pdev id %d\n", pdev_id);
1678 
1679 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1680 	if (ret) {
1681 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1682 		dev_kfree_skb(skb);
1683 	}
1684 
1685 	return ret;
1686 }
1687 
1688 /* TODO FW Support for the cmd is not available yet.
1689  * Can be tested once the command and corresponding
1690  * event is implemented in FW
1691  */
1692 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1693 					  enum wmi_bss_chan_info_req_type type)
1694 {
1695 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1696 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1697 	struct sk_buff *skb;
1698 	int ret;
1699 
1700 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1701 	if (!skb)
1702 		return -ENOMEM;
1703 
1704 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1705 
1706 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1707 						 sizeof(*cmd));
1708 	cmd->req_type = cpu_to_le32(type);
1709 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1710 
1711 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1712 		   "WMI bss chan info req type %d\n", type);
1713 
1714 	ret = ath12k_wmi_cmd_send(wmi, skb,
1715 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1716 	if (ret) {
1717 		ath12k_warn(ar->ab,
1718 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1719 		dev_kfree_skb(skb);
1720 	}
1721 
1722 	return ret;
1723 }
1724 
1725 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1726 					struct ath12k_wmi_ap_ps_arg *arg)
1727 {
1728 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1729 	struct wmi_ap_ps_peer_cmd *cmd;
1730 	struct sk_buff *skb;
1731 	int ret;
1732 
1733 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1734 	if (!skb)
1735 		return -ENOMEM;
1736 
1737 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1738 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1739 						 sizeof(*cmd));
1740 
1741 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1742 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1743 	cmd->param = cpu_to_le32(arg->param);
1744 	cmd->value = cpu_to_le32(arg->value);
1745 
1746 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1747 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1748 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1749 
1750 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1751 	if (ret) {
1752 		ath12k_warn(ar->ab,
1753 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1754 		dev_kfree_skb(skb);
1755 	}
1756 
1757 	return ret;
1758 }
1759 
1760 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1761 				u32 param, u32 param_value)
1762 {
1763 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1764 	struct wmi_sta_powersave_param_cmd *cmd;
1765 	struct sk_buff *skb;
1766 	int ret;
1767 
1768 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1769 	if (!skb)
1770 		return -ENOMEM;
1771 
1772 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1773 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1774 						 sizeof(*cmd));
1775 
1776 	cmd->vdev_id = cpu_to_le32(vdev_id);
1777 	cmd->param = cpu_to_le32(param);
1778 	cmd->value = cpu_to_le32(param_value);
1779 
1780 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1781 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1782 		   vdev_id, param, param_value);
1783 
1784 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1785 	if (ret) {
1786 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1787 		dev_kfree_skb(skb);
1788 	}
1789 
1790 	return ret;
1791 }
1792 
1793 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1794 {
1795 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1796 	struct wmi_force_fw_hang_cmd *cmd;
1797 	struct sk_buff *skb;
1798 	int ret, len;
1799 
1800 	len = sizeof(*cmd);
1801 
1802 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1803 	if (!skb)
1804 		return -ENOMEM;
1805 
1806 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1807 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1808 						 len);
1809 
1810 	cmd->type = cpu_to_le32(type);
1811 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1812 
1813 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1814 
1815 	if (ret) {
1816 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1817 		dev_kfree_skb(skb);
1818 	}
1819 	return ret;
1820 }
1821 
1822 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1823 				  u32 param_id, u32 param_value)
1824 {
1825 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1826 	struct wmi_vdev_set_param_cmd *cmd;
1827 	struct sk_buff *skb;
1828 	int ret;
1829 
1830 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1831 	if (!skb)
1832 		return -ENOMEM;
1833 
1834 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1835 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1836 						 sizeof(*cmd));
1837 
1838 	cmd->vdev_id = cpu_to_le32(vdev_id);
1839 	cmd->param_id = cpu_to_le32(param_id);
1840 	cmd->param_value = cpu_to_le32(param_value);
1841 
1842 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1843 		   "WMI vdev id 0x%x set param %d value %d\n",
1844 		   vdev_id, param_id, param_value);
1845 
1846 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1847 	if (ret) {
1848 		ath12k_warn(ar->ab,
1849 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1850 		dev_kfree_skb(skb);
1851 	}
1852 
1853 	return ret;
1854 }
1855 
1856 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1857 {
1858 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1859 	struct wmi_get_pdev_temperature_cmd *cmd;
1860 	struct sk_buff *skb;
1861 	int ret;
1862 
1863 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1864 	if (!skb)
1865 		return -ENOMEM;
1866 
1867 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1868 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1869 						 sizeof(*cmd));
1870 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1871 
1872 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1873 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1874 
1875 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1876 	if (ret) {
1877 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1878 		dev_kfree_skb(skb);
1879 	}
1880 
1881 	return ret;
1882 }
1883 
1884 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1885 					    u32 vdev_id, u32 bcn_ctrl_op)
1886 {
1887 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1888 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1889 	struct sk_buff *skb;
1890 	int ret;
1891 
1892 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1893 	if (!skb)
1894 		return -ENOMEM;
1895 
1896 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1897 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1898 						 sizeof(*cmd));
1899 
1900 	cmd->vdev_id = cpu_to_le32(vdev_id);
1901 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1902 
1903 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1904 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1905 		   vdev_id, bcn_ctrl_op);
1906 
1907 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1908 	if (ret) {
1909 		ath12k_warn(ar->ab,
1910 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1911 		dev_kfree_skb(skb);
1912 	}
1913 
1914 	return ret;
1915 }
1916 
1917 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1918 			     const u8 *p2p_ie)
1919 {
1920 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1921 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1922 	size_t p2p_ie_len, aligned_len;
1923 	struct wmi_tlv *tlv;
1924 	struct sk_buff *skb;
1925 	void *ptr;
1926 	int ret, len;
1927 
1928 	p2p_ie_len = p2p_ie[1] + 2;
1929 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1930 
1931 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1932 
1933 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1934 	if (!skb)
1935 		return -ENOMEM;
1936 
1937 	ptr = skb->data;
1938 	cmd = ptr;
1939 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1940 						 sizeof(*cmd));
1941 	cmd->vdev_id = cpu_to_le32(vdev_id);
1942 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1943 
1944 	ptr += sizeof(*cmd);
1945 	tlv = ptr;
1946 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1947 					     aligned_len);
1948 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1949 
1950 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1951 	if (ret) {
1952 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1953 		dev_kfree_skb(skb);
1954 	}
1955 
1956 	return ret;
1957 }
1958 
1959 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
1960 			struct ieee80211_mutable_offsets *offs,
1961 			struct sk_buff *bcn,
1962 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1963 {
1964 	struct ath12k *ar = arvif->ar;
1965 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1966 	struct ath12k_base *ab = ar->ab;
1967 	struct wmi_bcn_tmpl_cmd *cmd;
1968 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1969 	struct ath12k_vif *ahvif = arvif->ahvif;
1970 	struct ieee80211_bss_conf *conf;
1971 	u32 vdev_id = arvif->vdev_id;
1972 	struct wmi_tlv *tlv;
1973 	struct sk_buff *skb;
1974 	u32 ema_params = 0;
1975 	void *ptr;
1976 	int ret, len;
1977 	size_t aligned_len = roundup(bcn->len, 4);
1978 
1979 	conf = ath12k_mac_get_link_bss_conf(arvif);
1980 	if (!conf) {
1981 		ath12k_warn(ab,
1982 			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
1983 			    ahvif->vif->addr, arvif->link_id);
1984 		return -EINVAL;
1985 	}
1986 
1987 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1988 
1989 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1990 	if (!skb)
1991 		return -ENOMEM;
1992 
1993 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1994 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1995 						 sizeof(*cmd));
1996 	cmd->vdev_id = cpu_to_le32(vdev_id);
1997 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1998 
1999 	if (conf->csa_active) {
2000 		cmd->csa_switch_count_offset =
2001 				cpu_to_le32(offs->cntdwn_counter_offs[0]);
2002 		cmd->ext_csa_switch_count_offset =
2003 				cpu_to_le32(offs->cntdwn_counter_offs[1]);
2004 		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
2005 		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
2006 	}
2007 
2008 	cmd->buf_len = cpu_to_le32(bcn->len);
2009 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
2010 	if (ema_args) {
2011 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
2012 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
2013 		if (ema_args->bcn_index == 0)
2014 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
2015 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
2016 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
2017 		cmd->ema_params = cpu_to_le32(ema_params);
2018 	}
2019 	cmd->feature_enable_bitmap =
2020 		cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
2021 					    WMI_BEACON_PROTECTION_EN_BIT));
2022 
2023 	ptr = skb->data + sizeof(*cmd);
2024 
2025 	bcn_prb_info = ptr;
2026 	len = sizeof(*bcn_prb_info);
2027 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
2028 							  len);
2029 	bcn_prb_info->caps = 0;
2030 	bcn_prb_info->erp = 0;
2031 
2032 	ptr += sizeof(*bcn_prb_info);
2033 
2034 	tlv = ptr;
2035 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2036 	memcpy(tlv->value, bcn->data, bcn->len);
2037 
2038 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
2039 	if (ret) {
2040 		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
2041 		dev_kfree_skb(skb);
2042 	}
2043 
2044 	return ret;
2045 }
2046 
2047 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
2048 				struct wmi_vdev_install_key_arg *arg)
2049 {
2050 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2051 	struct wmi_vdev_install_key_cmd *cmd;
2052 	struct wmi_tlv *tlv;
2053 	struct sk_buff *skb;
2054 	int ret, len, key_len_aligned;
2055 
2056 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
2057 	 * length is specified in cmd->key_len.
2058 	 */
2059 	key_len_aligned = roundup(arg->key_len, 4);
2060 
2061 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
2062 
2063 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2064 	if (!skb)
2065 		return -ENOMEM;
2066 
2067 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
2068 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
2069 						 sizeof(*cmd));
2070 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2071 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2072 	cmd->key_idx = cpu_to_le32(arg->key_idx);
2073 	cmd->key_flags = cpu_to_le32(arg->key_flags);
2074 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
2075 	cmd->key_len = cpu_to_le32(arg->key_len);
2076 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
2077 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
2078 
2079 	if (arg->key_rsc_counter)
2080 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
2081 
2082 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2083 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
2084 	memcpy(tlv->value, arg->key_data, arg->key_len);
2085 
2086 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2087 		   "WMI vdev install key idx %d cipher %d len %d\n",
2088 		   arg->key_idx, arg->key_cipher, arg->key_len);
2089 
2090 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
2091 	if (ret) {
2092 		ath12k_warn(ar->ab,
2093 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2094 		dev_kfree_skb(skb);
2095 	}
2096 
2097 	return ret;
2098 }
2099 
2100 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2101 				       struct ath12k_wmi_peer_assoc_arg *arg,
2102 				       bool hw_crypto_disabled)
2103 {
2104 	cmd->peer_flags = 0;
2105 	cmd->peer_flags_ext = 0;
2106 
2107 	if (arg->is_wme_set) {
2108 		if (arg->qos_flag)
2109 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2110 		if (arg->apsd_flag)
2111 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2112 		if (arg->ht_flag)
2113 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2114 		if (arg->bw_40)
2115 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2116 		if (arg->bw_80)
2117 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2118 		if (arg->bw_160)
2119 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2120 		if (arg->bw_320)
2121 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2122 
2123 		/* Typically if STBC is enabled for VHT it should be enabled
2124 		 * for HT as well
2125 		 **/
2126 		if (arg->stbc_flag)
2127 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2128 
2129 		/* Typically if LDPC is enabled for VHT it should be enabled
2130 		 * for HT as well
2131 		 **/
2132 		if (arg->ldpc_flag)
2133 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2134 
2135 		if (arg->static_mimops_flag)
2136 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2137 		if (arg->dynamic_mimops_flag)
2138 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2139 		if (arg->spatial_mux_flag)
2140 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2141 		if (arg->vht_flag)
2142 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2143 		if (arg->he_flag)
2144 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2145 		if (arg->twt_requester)
2146 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2147 		if (arg->twt_responder)
2148 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2149 		if (arg->eht_flag)
2150 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2151 	}
2152 
2153 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2154 	 * (during re-association).
2155 	 * Authorization will be done for these modes on key installation.
2156 	 */
2157 	if (arg->auth_flag)
2158 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2159 	if (arg->need_ptk_4_way) {
2160 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2161 		if (!hw_crypto_disabled && arg->is_assoc)
2162 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2163 	}
2164 	if (arg->need_gtk_2_way)
2165 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2166 	/* safe mode bypass the 4-way handshake */
2167 	if (arg->safe_mode_enabled)
2168 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2169 						 WMI_PEER_NEED_GTK_2_WAY));
2170 
2171 	if (arg->is_pmf_enabled)
2172 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2173 
2174 	/* Disable AMSDU for station transmit, if user configures it */
2175 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2176 	 * it
2177 	 * if (arg->amsdu_disable) Add after FW support
2178 	 **/
2179 
2180 	/* Target asserts if node is marked HT and all MCS is set to 0.
2181 	 * Mark the node as non-HT if all the mcs rates are disabled through
2182 	 * iwpriv
2183 	 **/
2184 	if (arg->peer_ht_rates.num_rates == 0)
2185 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2186 }
2187 
2188 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2189 				   struct ath12k_wmi_peer_assoc_arg *arg)
2190 {
2191 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2192 	struct wmi_peer_assoc_complete_cmd *cmd;
2193 	struct ath12k_wmi_vht_rate_set_params *mcs;
2194 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2195 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2196 	struct wmi_peer_assoc_mlo_params *ml_params;
2197 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2198 	struct sk_buff *skb;
2199 	struct wmi_tlv *tlv;
2200 	void *ptr;
2201 	u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
2202 	u32 peer_ht_rates_align, eml_trans_timeout;
2203 	int i, ret, len;
2204 	u16 eml_cap;
2205 	__le32 v;
2206 
2207 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2208 					  sizeof(u32));
2209 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2210 				      sizeof(u32));
2211 
2212 	len = sizeof(*cmd) +
2213 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2214 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2215 	      sizeof(*mcs) + TLV_HDR_SIZE +
2216 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2217 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2218 
2219 	if (arg->ml.enabled)
2220 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2221 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2222 	else
2223 		len += (2 * TLV_HDR_SIZE);
2224 
2225 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2226 	if (!skb)
2227 		return -ENOMEM;
2228 
2229 	ptr = skb->data;
2230 
2231 	cmd = ptr;
2232 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2233 						 sizeof(*cmd));
2234 
2235 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2236 
2237 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2238 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2239 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2240 
2241 	ath12k_wmi_copy_peer_flags(cmd, arg,
2242 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2243 					    &ar->ab->dev_flags));
2244 
2245 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2246 
2247 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2248 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2249 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2250 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2251 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2252 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2253 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2254 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2255 
2256 	/* Update 11ax capabilities */
2257 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2258 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2259 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2260 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2261 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2262 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2263 		cmd->peer_he_cap_phy[i] =
2264 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2265 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2266 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2267 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2268 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2269 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2270 
2271 	/* Update 11be capabilities */
2272 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2273 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2274 		       0);
2275 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2276 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2277 		       0);
2278 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2279 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2280 
2281 	/* Update peer legacy rate information */
2282 	ptr += sizeof(*cmd);
2283 
2284 	tlv = ptr;
2285 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2286 
2287 	ptr += TLV_HDR_SIZE;
2288 
2289 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2290 	memcpy(ptr, arg->peer_legacy_rates.rates,
2291 	       arg->peer_legacy_rates.num_rates);
2292 
2293 	/* Update peer HT rate information */
2294 	ptr += peer_legacy_rates_align;
2295 
2296 	tlv = ptr;
2297 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2298 	ptr += TLV_HDR_SIZE;
2299 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2300 	memcpy(ptr, arg->peer_ht_rates.rates,
2301 	       arg->peer_ht_rates.num_rates);
2302 
2303 	/* VHT Rates */
2304 	ptr += peer_ht_rates_align;
2305 
2306 	mcs = ptr;
2307 
2308 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2309 						 sizeof(*mcs));
2310 
2311 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2312 
2313 	/* Update bandwidth-NSS mapping */
2314 	cmd->peer_bw_rxnss_override = 0;
2315 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2316 
2317 	if (arg->vht_capable) {
2318 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2319 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2320 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2321 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2322 	}
2323 
2324 	/* HE Rates */
2325 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2326 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2327 
2328 	ptr += sizeof(*mcs);
2329 
2330 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2331 
2332 	tlv = ptr;
2333 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2334 	ptr += TLV_HDR_SIZE;
2335 
2336 	/* Loop through the HE rate set */
2337 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2338 		he_mcs = ptr;
2339 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2340 							    sizeof(*he_mcs));
2341 
2342 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2343 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2344 		ptr += sizeof(*he_mcs);
2345 	}
2346 
2347 	tlv = ptr;
2348 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2349 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2350 	ptr += TLV_HDR_SIZE;
2351 	if (!len)
2352 		goto skip_ml_params;
2353 
2354 	ml_params = ptr;
2355 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2356 						       len);
2357 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2358 
2359 	if (arg->ml.assoc_link)
2360 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2361 
2362 	if (arg->ml.primary_umac)
2363 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2364 
2365 	if (arg->ml.logical_link_idx_valid)
2366 		ml_params->flags |=
2367 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2368 
2369 	if (arg->ml.peer_id_valid)
2370 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2371 
2372 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2373 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2374 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2375 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2376 
2377 	eml_cap = arg->ml.eml_cap;
2378 	if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
2379 		/* Padding delay */
2380 		eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
2381 		ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
2382 		/* Transition delay */
2383 		eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
2384 		ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
2385 		/* Transition timeout */
2386 		eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
2387 		ml_params->emlsr_trans_timeout_us =
2388 					cpu_to_le32(eml_trans_timeout);
2389 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
2390 			   arg->peer_mac, eml_pad_delay, eml_trans_delay,
2391 			   eml_trans_timeout);
2392 	}
2393 
2394 	ptr += sizeof(*ml_params);
2395 
2396 skip_ml_params:
2397 	/* Loop through the EHT rate set */
2398 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2399 	tlv = ptr;
2400 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2401 	ptr += TLV_HDR_SIZE;
2402 
2403 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2404 		eht_mcs = ptr;
2405 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
2406 							     sizeof(*eht_mcs));
2407 
2408 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2409 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2410 		ptr += sizeof(*eht_mcs);
2411 	}
2412 
2413 	/* Update MCS15 capability */
2414 	if (arg->eht_disable_mcs15)
2415 		cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
2416 
2417 	tlv = ptr;
2418 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2419 	/* fill ML Partner links */
2420 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2421 	ptr += TLV_HDR_SIZE;
2422 
2423 	if (len == 0)
2424 		goto send;
2425 
2426 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2427 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2428 
2429 		partner_info = ptr;
2430 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2431 								  sizeof(*partner_info));
2432 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2433 		partner_info->hw_link_id =
2434 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2435 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2436 
2437 		if (arg->ml.partner_info[i].assoc_link)
2438 			partner_info->flags |=
2439 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2440 
2441 		if (arg->ml.partner_info[i].primary_umac)
2442 			partner_info->flags |=
2443 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2444 
2445 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2446 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2447 			partner_info->flags |= v;
2448 		}
2449 
2450 		partner_info->logical_link_idx =
2451 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2452 		ptr += sizeof(*partner_info);
2453 	}
2454 
2455 send:
2456 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2457 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
2458 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2459 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2460 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2461 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2462 		   cmd->peer_mpdu_density,
2463 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2464 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2465 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2466 		   cmd->peer_he_cap_phy[2],
2467 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2468 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2469 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2470 		   cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
2471 
2472 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2473 	if (ret) {
2474 		ath12k_warn(ar->ab,
2475 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2476 		dev_kfree_skb(skb);
2477 	}
2478 
2479 	return ret;
2480 }
2481 
2482 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2483 				struct ath12k_wmi_scan_req_arg *arg)
2484 {
2485 	/* setup commonly used values */
2486 	arg->scan_req_id = 1;
2487 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2488 	arg->dwell_time_active = 50;
2489 	arg->dwell_time_active_2g = 0;
2490 	arg->dwell_time_passive = 150;
2491 	arg->dwell_time_active_6g = 70;
2492 	arg->dwell_time_passive_6g = 70;
2493 	arg->min_rest_time = 50;
2494 	arg->max_rest_time = 500;
2495 	arg->repeat_probe_time = 0;
2496 	arg->probe_spacing_time = 0;
2497 	arg->idle_time = 0;
2498 	arg->max_scan_time = 20000;
2499 	arg->probe_delay = 5;
2500 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2501 				  WMI_SCAN_EVENT_COMPLETED |
2502 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2503 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2504 				  WMI_SCAN_EVENT_DEQUEUED;
2505 	arg->scan_f_chan_stat_evnt = 1;
2506 	arg->num_bssid = 1;
2507 
2508 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2509 	 * ZEROs in probe request
2510 	 */
2511 	eth_broadcast_addr(arg->bssid_list[0].addr);
2512 }
2513 
2514 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2515 						   struct ath12k_wmi_scan_req_arg *arg)
2516 {
2517 	/* Scan events subscription */
2518 	if (arg->scan_ev_started)
2519 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2520 	if (arg->scan_ev_completed)
2521 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2522 	if (arg->scan_ev_bss_chan)
2523 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2524 	if (arg->scan_ev_foreign_chan)
2525 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2526 	if (arg->scan_ev_dequeued)
2527 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2528 	if (arg->scan_ev_preempted)
2529 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2530 	if (arg->scan_ev_start_failed)
2531 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2532 	if (arg->scan_ev_restarted)
2533 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2534 	if (arg->scan_ev_foreign_chn_exit)
2535 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2536 	if (arg->scan_ev_suspended)
2537 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2538 	if (arg->scan_ev_resumed)
2539 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2540 
2541 	/** Set scan control flags */
2542 	cmd->scan_ctrl_flags = 0;
2543 	if (arg->scan_f_passive)
2544 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2545 	if (arg->scan_f_strict_passive_pch)
2546 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2547 	if (arg->scan_f_promisc_mode)
2548 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2549 	if (arg->scan_f_capture_phy_err)
2550 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2551 	if (arg->scan_f_half_rate)
2552 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2553 	if (arg->scan_f_quarter_rate)
2554 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2555 	if (arg->scan_f_cck_rates)
2556 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2557 	if (arg->scan_f_ofdm_rates)
2558 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2559 	if (arg->scan_f_chan_stat_evnt)
2560 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2561 	if (arg->scan_f_filter_prb_req)
2562 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2563 	if (arg->scan_f_bcast_probe)
2564 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2565 	if (arg->scan_f_offchan_mgmt_tx)
2566 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2567 	if (arg->scan_f_offchan_data_tx)
2568 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2569 	if (arg->scan_f_force_active_dfs_chn)
2570 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2571 	if (arg->scan_f_add_tpc_ie_in_probe)
2572 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2573 	if (arg->scan_f_add_ds_ie_in_probe)
2574 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2575 	if (arg->scan_f_add_spoofed_mac_in_probe)
2576 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2577 	if (arg->scan_f_add_rand_seq_in_probe)
2578 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2579 	if (arg->scan_f_en_ie_whitelist_in_probe)
2580 		cmd->scan_ctrl_flags |=
2581 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2582 
2583 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2584 						 WMI_SCAN_DWELL_MODE_MASK);
2585 }
2586 
2587 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2588 				   struct ath12k_wmi_scan_req_arg *arg)
2589 {
2590 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2591 	struct wmi_start_scan_cmd *cmd;
2592 	struct ath12k_wmi_ssid_params *ssid = NULL;
2593 	struct ath12k_wmi_mac_addr_params *bssid;
2594 	struct sk_buff *skb;
2595 	struct wmi_tlv *tlv;
2596 	void *ptr;
2597 	int i, ret, len;
2598 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2599 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2600 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2601 
2602 	len = sizeof(*cmd);
2603 
2604 	len += TLV_HDR_SIZE;
2605 	if (arg->num_chan)
2606 		len += arg->num_chan * sizeof(u32);
2607 
2608 	len += TLV_HDR_SIZE;
2609 	if (arg->num_ssids)
2610 		len += arg->num_ssids * sizeof(*ssid);
2611 
2612 	len += TLV_HDR_SIZE;
2613 	if (arg->num_bssid)
2614 		len += sizeof(*bssid) * arg->num_bssid;
2615 
2616 	if (arg->num_hint_bssid)
2617 		len += TLV_HDR_SIZE +
2618 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2619 
2620 	if (arg->num_hint_s_ssid)
2621 		len += TLV_HDR_SIZE +
2622 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2623 
2624 	len += TLV_HDR_SIZE;
2625 	if (arg->extraie.len)
2626 		extraie_len_with_pad =
2627 			roundup(arg->extraie.len, sizeof(u32));
2628 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2629 		len += extraie_len_with_pad;
2630 	} else {
2631 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2632 			    arg->extraie.len);
2633 		extraie_len_with_pad = 0;
2634 	}
2635 
2636 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2637 	if (!skb)
2638 		return -ENOMEM;
2639 
2640 	ptr = skb->data;
2641 
2642 	cmd = ptr;
2643 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2644 						 sizeof(*cmd));
2645 
2646 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2647 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2648 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2649 	if (ar->state_11d == ATH12K_11D_PREPARING)
2650 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
2651 	else
2652 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2653 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2654 
2655 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2656 
2657 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2658 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2659 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2660 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2661 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2662 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2663 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2664 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2665 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2666 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2667 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2668 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2669 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2670 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2671 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2672 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2673 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2674 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2675 
2676 	ptr += sizeof(*cmd);
2677 
2678 	len = arg->num_chan * sizeof(u32);
2679 
2680 	tlv = ptr;
2681 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2682 	ptr += TLV_HDR_SIZE;
2683 	tmp_ptr = (u32 *)ptr;
2684 
2685 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2686 
2687 	ptr += len;
2688 
2689 	len = arg->num_ssids * sizeof(*ssid);
2690 	tlv = ptr;
2691 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2692 
2693 	ptr += TLV_HDR_SIZE;
2694 
2695 	if (arg->num_ssids) {
2696 		ssid = ptr;
2697 		for (i = 0; i < arg->num_ssids; ++i) {
2698 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2699 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2700 			       arg->ssid[i].ssid_len);
2701 			ssid++;
2702 		}
2703 	}
2704 
2705 	ptr += (arg->num_ssids * sizeof(*ssid));
2706 	len = arg->num_bssid * sizeof(*bssid);
2707 	tlv = ptr;
2708 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2709 
2710 	ptr += TLV_HDR_SIZE;
2711 	bssid = ptr;
2712 
2713 	if (arg->num_bssid) {
2714 		for (i = 0; i < arg->num_bssid; ++i) {
2715 			ether_addr_copy(bssid->addr,
2716 					arg->bssid_list[i].addr);
2717 			bssid++;
2718 		}
2719 	}
2720 
2721 	ptr += arg->num_bssid * sizeof(*bssid);
2722 
2723 	len = extraie_len_with_pad;
2724 	tlv = ptr;
2725 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2726 	ptr += TLV_HDR_SIZE;
2727 
2728 	if (extraie_len_with_pad)
2729 		memcpy(ptr, arg->extraie.ptr,
2730 		       arg->extraie.len);
2731 
2732 	ptr += extraie_len_with_pad;
2733 
2734 	if (arg->num_hint_s_ssid) {
2735 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2736 		tlv = ptr;
2737 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2738 		ptr += TLV_HDR_SIZE;
2739 		s_ssid = ptr;
2740 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2741 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2742 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2743 			s_ssid++;
2744 		}
2745 		ptr += len;
2746 	}
2747 
2748 	if (arg->num_hint_bssid) {
2749 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2750 		tlv = ptr;
2751 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2752 		ptr += TLV_HDR_SIZE;
2753 		hint_bssid = ptr;
2754 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2755 			hint_bssid->freq_flags =
2756 				arg->hint_bssid[i].freq_flags;
2757 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2758 					&hint_bssid->bssid.addr[0]);
2759 			hint_bssid++;
2760 		}
2761 	}
2762 
2763 	ret = ath12k_wmi_cmd_send(wmi, skb,
2764 				  WMI_START_SCAN_CMDID);
2765 	if (ret) {
2766 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2767 		dev_kfree_skb(skb);
2768 	}
2769 
2770 	return ret;
2771 }
2772 
2773 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2774 				  struct ath12k_wmi_scan_cancel_arg *arg)
2775 {
2776 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2777 	struct wmi_stop_scan_cmd *cmd;
2778 	struct sk_buff *skb;
2779 	int ret;
2780 
2781 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2782 	if (!skb)
2783 		return -ENOMEM;
2784 
2785 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2786 
2787 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2788 						 sizeof(*cmd));
2789 
2790 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2791 	cmd->requestor = cpu_to_le32(arg->requester);
2792 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2793 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2794 	/* stop the scan with the corresponding scan_id */
2795 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2796 		/* Cancelling all scans */
2797 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2798 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2799 		/* Cancelling VAP scans */
2800 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2801 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2802 		/* Cancelling specific scan */
2803 		cmd->req_type = WMI_SCAN_STOP_ONE;
2804 	} else {
2805 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2806 			    arg->req_type);
2807 		dev_kfree_skb(skb);
2808 		return -EINVAL;
2809 	}
2810 
2811 	ret = ath12k_wmi_cmd_send(wmi, skb,
2812 				  WMI_STOP_SCAN_CMDID);
2813 	if (ret) {
2814 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2815 		dev_kfree_skb(skb);
2816 	}
2817 
2818 	return ret;
2819 }
2820 
2821 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2822 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2823 {
2824 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2825 	struct wmi_scan_chan_list_cmd *cmd;
2826 	struct sk_buff *skb;
2827 	struct ath12k_wmi_channel_params *chan_info;
2828 	struct ath12k_wmi_channel_arg *channel_arg;
2829 	struct wmi_tlv *tlv;
2830 	void *ptr;
2831 	int i, ret, len;
2832 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2833 	__le32 *reg1, *reg2;
2834 
2835 	channel_arg = &arg->channel[0];
2836 	while (arg->nallchans) {
2837 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2838 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2839 			sizeof(*chan_info);
2840 
2841 		num_send_chans = min(arg->nallchans, max_chan_limit);
2842 
2843 		arg->nallchans -= num_send_chans;
2844 		len += sizeof(*chan_info) * num_send_chans;
2845 
2846 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2847 		if (!skb)
2848 			return -ENOMEM;
2849 
2850 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2851 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2852 							 sizeof(*cmd));
2853 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2854 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2855 		if (num_sends)
2856 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2857 
2858 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2859 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2860 			   num_send_chans, len, cmd->pdev_id, num_sends);
2861 
2862 		ptr = skb->data + sizeof(*cmd);
2863 
2864 		len = sizeof(*chan_info) * num_send_chans;
2865 		tlv = ptr;
2866 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2867 						     len);
2868 		ptr += TLV_HDR_SIZE;
2869 
2870 		for (i = 0; i < num_send_chans; ++i) {
2871 			chan_info = ptr;
2872 			memset(chan_info, 0, sizeof(*chan_info));
2873 			len = sizeof(*chan_info);
2874 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2875 								       len);
2876 
2877 			reg1 = &chan_info->reg_info_1;
2878 			reg2 = &chan_info->reg_info_2;
2879 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2880 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2881 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2882 
2883 			if (channel_arg->is_chan_passive)
2884 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2885 			if (channel_arg->allow_he)
2886 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2887 			else if (channel_arg->allow_vht)
2888 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2889 			else if (channel_arg->allow_ht)
2890 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2891 			if (channel_arg->half_rate)
2892 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2893 			if (channel_arg->quarter_rate)
2894 				chan_info->info |=
2895 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2896 
2897 			if (channel_arg->psc_channel)
2898 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2899 
2900 			if (channel_arg->dfs_set)
2901 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2902 
2903 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2904 							    WMI_CHAN_INFO_MODE);
2905 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2906 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2907 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2908 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2909 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2910 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2911 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2912 						  WMI_CHAN_REG_INFO1_REG_CLS);
2913 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2914 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2915 			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
2916 						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
2917 
2918 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2919 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2920 				   i, chan_info->mhz, chan_info->info);
2921 
2922 			ptr += sizeof(*chan_info);
2923 
2924 			channel_arg++;
2925 		}
2926 
2927 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2928 		if (ret) {
2929 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2930 			dev_kfree_skb(skb);
2931 			return ret;
2932 		}
2933 
2934 		num_sends++;
2935 	}
2936 
2937 	return 0;
2938 }
2939 
2940 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2941 				   struct wmi_wmm_params_all_arg *param)
2942 {
2943 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2944 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2945 	struct wmi_wmm_params *wmm_param;
2946 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2947 	struct sk_buff *skb;
2948 	int ret, ac;
2949 
2950 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2951 	if (!skb)
2952 		return -ENOMEM;
2953 
2954 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2955 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2956 						 sizeof(*cmd));
2957 
2958 	cmd->vdev_id = cpu_to_le32(vdev_id);
2959 	cmd->wmm_param_type = 0;
2960 
2961 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2962 		switch (ac) {
2963 		case WME_AC_BE:
2964 			wmi_wmm_arg = &param->ac_be;
2965 			break;
2966 		case WME_AC_BK:
2967 			wmi_wmm_arg = &param->ac_bk;
2968 			break;
2969 		case WME_AC_VI:
2970 			wmi_wmm_arg = &param->ac_vi;
2971 			break;
2972 		case WME_AC_VO:
2973 			wmi_wmm_arg = &param->ac_vo;
2974 			break;
2975 		}
2976 
2977 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2978 		wmm_param->tlv_header =
2979 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2980 					       sizeof(*wmm_param));
2981 
2982 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2983 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2984 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2985 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2986 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2987 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2988 
2989 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2990 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2991 			   ac, wmm_param->aifs, wmm_param->cwmin,
2992 			   wmm_param->cwmax, wmm_param->txoplimit,
2993 			   wmm_param->acm, wmm_param->no_ack);
2994 	}
2995 	ret = ath12k_wmi_cmd_send(wmi, skb,
2996 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2997 	if (ret) {
2998 		ath12k_warn(ar->ab,
2999 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
3000 		dev_kfree_skb(skb);
3001 	}
3002 
3003 	return ret;
3004 }
3005 
3006 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
3007 						  u32 pdev_id)
3008 {
3009 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3010 	struct wmi_dfs_phyerr_offload_cmd *cmd;
3011 	struct sk_buff *skb;
3012 	int ret;
3013 
3014 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3015 	if (!skb)
3016 		return -ENOMEM;
3017 
3018 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
3019 	cmd->tlv_header =
3020 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
3021 				       sizeof(*cmd));
3022 
3023 	cmd->pdev_id = cpu_to_le32(pdev_id);
3024 
3025 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3026 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
3027 
3028 	ret = ath12k_wmi_cmd_send(wmi, skb,
3029 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
3030 	if (ret) {
3031 		ath12k_warn(ar->ab,
3032 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
3033 		dev_kfree_skb(skb);
3034 	}
3035 
3036 	return ret;
3037 }
3038 
3039 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
3040 			    const u8 *buf, size_t buf_len)
3041 {
3042 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3043 	struct wmi_pdev_set_bios_interface_cmd *cmd;
3044 	struct wmi_tlv *tlv;
3045 	struct sk_buff *skb;
3046 	u8 *ptr;
3047 	u32 len, len_aligned;
3048 	int ret;
3049 
3050 	len_aligned = roundup(buf_len, sizeof(u32));
3051 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
3052 
3053 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3054 	if (!skb)
3055 		return -ENOMEM;
3056 
3057 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
3058 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
3059 						 sizeof(*cmd));
3060 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3061 	cmd->param_type_id = cpu_to_le32(param_id);
3062 	cmd->length = cpu_to_le32(buf_len);
3063 
3064 	ptr = skb->data + sizeof(*cmd);
3065 	tlv = (struct wmi_tlv *)ptr;
3066 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
3067 	ptr += TLV_HDR_SIZE;
3068 	memcpy(ptr, buf, buf_len);
3069 
3070 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3071 				  skb,
3072 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
3073 	if (ret) {
3074 		ath12k_warn(ab,
3075 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
3076 			    param_id, ret);
3077 		dev_kfree_skb(skb);
3078 	}
3079 
3080 	return 0;
3081 }
3082 
3083 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
3084 {
3085 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3086 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
3087 	struct wmi_tlv *tlv;
3088 	struct sk_buff *skb;
3089 	int ret;
3090 	u8 *buf_ptr;
3091 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
3092 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
3093 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
3094 
3095 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
3096 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
3097 					      sizeof(u32));
3098 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
3099 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
3100 
3101 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3102 	if (!skb)
3103 		return -ENOMEM;
3104 
3105 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
3106 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
3107 						 sizeof(*cmd));
3108 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3109 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3110 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3111 
3112 	buf_ptr = skb->data + sizeof(*cmd);
3113 	tlv = (struct wmi_tlv *)buf_ptr;
3114 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3115 					 sar_table_len_aligned);
3116 	buf_ptr += TLV_HDR_SIZE;
3117 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3118 
3119 	buf_ptr += sar_table_len_aligned;
3120 	tlv = (struct wmi_tlv *)buf_ptr;
3121 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3122 					 sar_dbs_backoff_len_aligned);
3123 	buf_ptr += TLV_HDR_SIZE;
3124 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3125 
3126 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3127 				  skb,
3128 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3129 	if (ret) {
3130 		ath12k_warn(ab,
3131 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3132 			    ret);
3133 		dev_kfree_skb(skb);
3134 	}
3135 
3136 	return ret;
3137 }
3138 
3139 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3140 {
3141 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3142 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3143 	struct wmi_tlv *tlv;
3144 	struct sk_buff *skb;
3145 	int ret;
3146 	u8 *buf_ptr;
3147 	u32 len, sar_geo_len_aligned;
3148 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3149 
3150 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3151 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3152 
3153 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3154 	if (!skb)
3155 		return -ENOMEM;
3156 
3157 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3158 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3159 						 sizeof(*cmd));
3160 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3161 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3162 
3163 	buf_ptr = skb->data + sizeof(*cmd);
3164 	tlv = (struct wmi_tlv *)buf_ptr;
3165 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3166 	buf_ptr += TLV_HDR_SIZE;
3167 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3168 
3169 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3170 				  skb,
3171 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3172 	if (ret) {
3173 		ath12k_warn(ab,
3174 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3175 			    ret);
3176 		dev_kfree_skb(skb);
3177 	}
3178 
3179 	return ret;
3180 }
3181 
3182 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3183 			  u32 tid, u32 initiator, u32 reason)
3184 {
3185 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3186 	struct wmi_delba_send_cmd *cmd;
3187 	struct sk_buff *skb;
3188 	int ret;
3189 
3190 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3191 	if (!skb)
3192 		return -ENOMEM;
3193 
3194 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3195 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3196 						 sizeof(*cmd));
3197 	cmd->vdev_id = cpu_to_le32(vdev_id);
3198 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3199 	cmd->tid = cpu_to_le32(tid);
3200 	cmd->initiator = cpu_to_le32(initiator);
3201 	cmd->reasoncode = cpu_to_le32(reason);
3202 
3203 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3204 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3205 		   vdev_id, mac, tid, initiator, reason);
3206 
3207 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3208 
3209 	if (ret) {
3210 		ath12k_warn(ar->ab,
3211 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3212 		dev_kfree_skb(skb);
3213 	}
3214 
3215 	return ret;
3216 }
3217 
3218 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3219 			      u32 tid, u32 status)
3220 {
3221 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3222 	struct wmi_addba_setresponse_cmd *cmd;
3223 	struct sk_buff *skb;
3224 	int ret;
3225 
3226 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3227 	if (!skb)
3228 		return -ENOMEM;
3229 
3230 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3231 	cmd->tlv_header =
3232 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3233 				       sizeof(*cmd));
3234 	cmd->vdev_id = cpu_to_le32(vdev_id);
3235 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3236 	cmd->tid = cpu_to_le32(tid);
3237 	cmd->statuscode = cpu_to_le32(status);
3238 
3239 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3240 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3241 		   vdev_id, mac, tid, status);
3242 
3243 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3244 
3245 	if (ret) {
3246 		ath12k_warn(ar->ab,
3247 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3248 		dev_kfree_skb(skb);
3249 	}
3250 
3251 	return ret;
3252 }
3253 
3254 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3255 			  u32 tid, u32 buf_size)
3256 {
3257 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3258 	struct wmi_addba_send_cmd *cmd;
3259 	struct sk_buff *skb;
3260 	int ret;
3261 
3262 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3263 	if (!skb)
3264 		return -ENOMEM;
3265 
3266 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3267 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3268 						 sizeof(*cmd));
3269 	cmd->vdev_id = cpu_to_le32(vdev_id);
3270 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3271 	cmd->tid = cpu_to_le32(tid);
3272 	cmd->buffersize = cpu_to_le32(buf_size);
3273 
3274 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3275 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3276 		   vdev_id, mac, tid, buf_size);
3277 
3278 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3279 
3280 	if (ret) {
3281 		ath12k_warn(ar->ab,
3282 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3283 		dev_kfree_skb(skb);
3284 	}
3285 
3286 	return ret;
3287 }
3288 
3289 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3290 {
3291 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3292 	struct wmi_addba_clear_resp_cmd *cmd;
3293 	struct sk_buff *skb;
3294 	int ret;
3295 
3296 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3297 	if (!skb)
3298 		return -ENOMEM;
3299 
3300 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3301 	cmd->tlv_header =
3302 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3303 				       sizeof(*cmd));
3304 	cmd->vdev_id = cpu_to_le32(vdev_id);
3305 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3306 
3307 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3308 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3309 		   vdev_id, mac);
3310 
3311 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3312 
3313 	if (ret) {
3314 		ath12k_warn(ar->ab,
3315 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3316 		dev_kfree_skb(skb);
3317 	}
3318 
3319 	return ret;
3320 }
3321 
3322 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3323 				     struct ath12k_wmi_init_country_arg *arg)
3324 {
3325 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3326 	struct wmi_init_country_cmd *cmd;
3327 	struct sk_buff *skb;
3328 	int ret;
3329 
3330 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3331 	if (!skb)
3332 		return -ENOMEM;
3333 
3334 	cmd = (struct wmi_init_country_cmd *)skb->data;
3335 	cmd->tlv_header =
3336 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3337 				       sizeof(*cmd));
3338 
3339 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3340 
3341 	switch (arg->flags) {
3342 	case ALPHA_IS_SET:
3343 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3344 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3345 		break;
3346 	case CC_IS_SET:
3347 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3348 		cmd->cc_info.country_code =
3349 			cpu_to_le32(arg->cc_info.country_code);
3350 		break;
3351 	case REGDMN_IS_SET:
3352 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3353 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3354 		break;
3355 	default:
3356 		ret = -EINVAL;
3357 		goto out;
3358 	}
3359 
3360 	ret = ath12k_wmi_cmd_send(wmi, skb,
3361 				  WMI_SET_INIT_COUNTRY_CMDID);
3362 
3363 out:
3364 	if (ret) {
3365 		ath12k_warn(ar->ab,
3366 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3367 			    ret);
3368 		dev_kfree_skb(skb);
3369 	}
3370 
3371 	return ret;
3372 }
3373 
3374 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
3375 					    struct wmi_set_current_country_arg *arg)
3376 {
3377 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3378 	struct wmi_set_current_country_cmd *cmd;
3379 	struct sk_buff *skb;
3380 	int ret;
3381 
3382 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3383 	if (!skb)
3384 		return -ENOMEM;
3385 
3386 	cmd = (struct wmi_set_current_country_cmd *)skb->data;
3387 	cmd->tlv_header =
3388 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
3389 				       sizeof(*cmd));
3390 
3391 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3392 	memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
3393 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
3394 
3395 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3396 		   "set current country pdev id %d alpha2 %c%c\n",
3397 		   ar->pdev->pdev_id,
3398 		   arg->alpha2[0],
3399 		   arg->alpha2[1]);
3400 
3401 	if (ret) {
3402 		ath12k_warn(ar->ab,
3403 			    "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
3404 		dev_kfree_skb(skb);
3405 	}
3406 
3407 	return ret;
3408 }
3409 
3410 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
3411 				       struct wmi_11d_scan_start_arg *arg)
3412 {
3413 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3414 	struct wmi_11d_scan_start_cmd *cmd;
3415 	struct sk_buff *skb;
3416 	int ret;
3417 
3418 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3419 	if (!skb)
3420 		return -ENOMEM;
3421 
3422 	cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
3423 	cmd->tlv_header =
3424 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
3425 				       sizeof(*cmd));
3426 
3427 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3428 	cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
3429 	cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
3430 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
3431 
3432 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3433 		   "send 11d scan start vdev id %d period %d ms internal %d ms\n",
3434 		   arg->vdev_id, arg->scan_period_msec,
3435 		   arg->start_interval_msec);
3436 
3437 	if (ret) {
3438 		ath12k_warn(ar->ab,
3439 			    "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
3440 		dev_kfree_skb(skb);
3441 	}
3442 
3443 	return ret;
3444 }
3445 
3446 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
3447 {
3448 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3449 	struct wmi_11d_scan_stop_cmd *cmd;
3450 	struct sk_buff *skb;
3451 	int ret;
3452 
3453 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3454 	if (!skb)
3455 		return -ENOMEM;
3456 
3457 	cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
3458 	cmd->tlv_header =
3459 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
3460 				       sizeof(*cmd));
3461 
3462 	cmd->vdev_id = cpu_to_le32(vdev_id);
3463 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
3464 
3465 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3466 		   "send 11d scan stop vdev id %d\n",
3467 		   cmd->vdev_id);
3468 
3469 	if (ret) {
3470 		ath12k_warn(ar->ab,
3471 			    "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
3472 		dev_kfree_skb(skb);
3473 	}
3474 
3475 	return ret;
3476 }
3477 
3478 int
3479 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3480 {
3481 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3482 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3483 	struct wmi_twt_enable_params_cmd *cmd;
3484 	struct sk_buff *skb;
3485 	int ret, len;
3486 
3487 	len = sizeof(*cmd);
3488 
3489 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3490 	if (!skb)
3491 		return -ENOMEM;
3492 
3493 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3494 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3495 						 len);
3496 	cmd->pdev_id = cpu_to_le32(pdev_id);
3497 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3498 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3499 	cmd->congestion_thresh_setup =
3500 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3501 	cmd->congestion_thresh_teardown =
3502 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3503 	cmd->congestion_thresh_critical =
3504 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3505 	cmd->interference_thresh_teardown =
3506 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3507 	cmd->interference_thresh_setup =
3508 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3509 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3510 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3511 	cmd->no_of_bcast_mcast_slots =
3512 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3513 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3514 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3515 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3516 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3517 	cmd->remove_sta_slot_interval =
3518 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3519 	/* TODO add MBSSID support */
3520 	cmd->mbss_support = 0;
3521 
3522 	ret = ath12k_wmi_cmd_send(wmi, skb,
3523 				  WMI_TWT_ENABLE_CMDID);
3524 	if (ret) {
3525 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3526 		dev_kfree_skb(skb);
3527 	}
3528 	return ret;
3529 }
3530 
3531 int
3532 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3533 {
3534 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3535 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3536 	struct wmi_twt_disable_params_cmd *cmd;
3537 	struct sk_buff *skb;
3538 	int ret, len;
3539 
3540 	len = sizeof(*cmd);
3541 
3542 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3543 	if (!skb)
3544 		return -ENOMEM;
3545 
3546 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3547 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3548 						 len);
3549 	cmd->pdev_id = cpu_to_le32(pdev_id);
3550 
3551 	ret = ath12k_wmi_cmd_send(wmi, skb,
3552 				  WMI_TWT_DISABLE_CMDID);
3553 	if (ret) {
3554 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3555 		dev_kfree_skb(skb);
3556 	}
3557 	return ret;
3558 }
3559 
3560 int
3561 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3562 			     struct ieee80211_he_obss_pd *he_obss_pd)
3563 {
3564 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3565 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3566 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3567 	struct sk_buff *skb;
3568 	int ret, len;
3569 
3570 	len = sizeof(*cmd);
3571 
3572 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3573 	if (!skb)
3574 		return -ENOMEM;
3575 
3576 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3577 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3578 						 len);
3579 	cmd->vdev_id = cpu_to_le32(vdev_id);
3580 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3581 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3582 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3583 
3584 	ret = ath12k_wmi_cmd_send(wmi, skb,
3585 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3586 	if (ret) {
3587 		ath12k_warn(ab,
3588 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3589 		dev_kfree_skb(skb);
3590 	}
3591 	return ret;
3592 }
3593 
3594 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3595 				  u8 bss_color, u32 period,
3596 				  bool enable)
3597 {
3598 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3599 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3600 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3601 	struct sk_buff *skb;
3602 	int ret, len;
3603 
3604 	len = sizeof(*cmd);
3605 
3606 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3607 	if (!skb)
3608 		return -ENOMEM;
3609 
3610 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3611 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3612 						 len);
3613 	cmd->vdev_id = cpu_to_le32(vdev_id);
3614 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3615 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3616 	cmd->current_bss_color = cpu_to_le32(bss_color);
3617 	cmd->detection_period_ms = cpu_to_le32(period);
3618 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3619 	cmd->free_slot_expiry_time_ms = 0;
3620 	cmd->flags = 0;
3621 
3622 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3623 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3624 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3625 		   cmd->detection_period_ms, cmd->scan_period_ms);
3626 
3627 	ret = ath12k_wmi_cmd_send(wmi, skb,
3628 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3629 	if (ret) {
3630 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3631 		dev_kfree_skb(skb);
3632 	}
3633 	return ret;
3634 }
3635 
3636 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3637 						bool enable)
3638 {
3639 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3640 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3641 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3642 	struct sk_buff *skb;
3643 	int ret, len;
3644 
3645 	len = sizeof(*cmd);
3646 
3647 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3648 	if (!skb)
3649 		return -ENOMEM;
3650 
3651 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3652 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3653 						 len);
3654 	cmd->vdev_id = cpu_to_le32(vdev_id);
3655 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3656 
3657 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3658 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3659 		   cmd->vdev_id, cmd->enable);
3660 
3661 	ret = ath12k_wmi_cmd_send(wmi, skb,
3662 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3663 	if (ret) {
3664 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3665 		dev_kfree_skb(skb);
3666 	}
3667 	return ret;
3668 }
3669 
3670 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3671 				   struct sk_buff *tmpl)
3672 {
3673 	struct wmi_tlv *tlv;
3674 	struct sk_buff *skb;
3675 	void *ptr;
3676 	int ret, len;
3677 	size_t aligned_len;
3678 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3679 
3680 	aligned_len = roundup(tmpl->len, 4);
3681 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3682 
3683 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3684 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3685 
3686 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3687 	if (!skb)
3688 		return -ENOMEM;
3689 
3690 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3691 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3692 						 sizeof(*cmd));
3693 	cmd->vdev_id = cpu_to_le32(vdev_id);
3694 	cmd->buf_len = cpu_to_le32(tmpl->len);
3695 	ptr = skb->data + sizeof(*cmd);
3696 
3697 	tlv = ptr;
3698 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3699 	memcpy(tlv->value, tmpl->data, tmpl->len);
3700 
3701 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3702 	if (ret) {
3703 		ath12k_warn(ar->ab,
3704 			    "WMI vdev %i failed to send FILS discovery template command\n",
3705 			    vdev_id);
3706 		dev_kfree_skb(skb);
3707 	}
3708 	return ret;
3709 }
3710 
3711 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3712 			       struct sk_buff *tmpl)
3713 {
3714 	struct wmi_probe_tmpl_cmd *cmd;
3715 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3716 	struct wmi_tlv *tlv;
3717 	struct sk_buff *skb;
3718 	void *ptr;
3719 	int ret, len;
3720 	size_t aligned_len = roundup(tmpl->len, 4);
3721 
3722 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3723 		   "WMI vdev %i set probe response template\n", vdev_id);
3724 
3725 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3726 
3727 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3728 	if (!skb)
3729 		return -ENOMEM;
3730 
3731 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3732 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3733 						 sizeof(*cmd));
3734 	cmd->vdev_id = cpu_to_le32(vdev_id);
3735 	cmd->buf_len = cpu_to_le32(tmpl->len);
3736 
3737 	ptr = skb->data + sizeof(*cmd);
3738 
3739 	probe_info = ptr;
3740 	len = sizeof(*probe_info);
3741 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3742 							len);
3743 	probe_info->caps = 0;
3744 	probe_info->erp = 0;
3745 
3746 	ptr += sizeof(*probe_info);
3747 
3748 	tlv = ptr;
3749 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3750 	memcpy(tlv->value, tmpl->data, tmpl->len);
3751 
3752 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3753 	if (ret) {
3754 		ath12k_warn(ar->ab,
3755 			    "WMI vdev %i failed to send probe response template command\n",
3756 			    vdev_id);
3757 		dev_kfree_skb(skb);
3758 	}
3759 	return ret;
3760 }
3761 
3762 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3763 			      bool unsol_bcast_probe_resp_enabled)
3764 {
3765 	struct sk_buff *skb;
3766 	int ret, len;
3767 	struct wmi_fils_discovery_cmd *cmd;
3768 
3769 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3770 		   "WMI vdev %i set %s interval to %u TU\n",
3771 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3772 		   "unsolicited broadcast probe response" : "FILS discovery",
3773 		   interval);
3774 
3775 	len = sizeof(*cmd);
3776 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3777 	if (!skb)
3778 		return -ENOMEM;
3779 
3780 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3781 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3782 						 len);
3783 	cmd->vdev_id = cpu_to_le32(vdev_id);
3784 	cmd->interval = cpu_to_le32(interval);
3785 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3786 
3787 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3788 	if (ret) {
3789 		ath12k_warn(ar->ab,
3790 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3791 			    vdev_id);
3792 		dev_kfree_skb(skb);
3793 	}
3794 	return ret;
3795 }
3796 
3797 static void
3798 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3799 			      struct ath12k_wmi_pdev_band_arg *arg)
3800 {
3801 	u8 i;
3802 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3803 	struct ath12k_pdev *pdev;
3804 
3805 	for (i = 0; i < soc->num_radios; i++) {
3806 		pdev = &soc->pdevs[i];
3807 		hal_reg_cap = &soc->hal_reg_cap[i];
3808 		arg[i].pdev_id = pdev->pdev_id;
3809 
3810 		switch (pdev->cap.supported_bands) {
3811 		case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
3812 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3813 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3814 			break;
3815 		case WMI_HOST_WLAN_2GHZ_CAP:
3816 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3817 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3818 			break;
3819 		case WMI_HOST_WLAN_5GHZ_CAP:
3820 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3821 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3822 			break;
3823 		default:
3824 			break;
3825 		}
3826 	}
3827 }
3828 
3829 static void
3830 ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
3831 				struct ath12k_wmi_resource_config_params *wmi_cfg,
3832 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3833 {
3834 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3835 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3836 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3837 	wmi_cfg->num_offload_reorder_buffs =
3838 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3839 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3840 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3841 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3842 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3843 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3844 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3845 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3846 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3847 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3848 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3849 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3850 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3851 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3852 	wmi_cfg->roam_offload_max_ap_profiles =
3853 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3854 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3855 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3856 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3857 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3858 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3859 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3860 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3861 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3862 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3863 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3864 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3865 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3866 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3867 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3868 	wmi_cfg->num_tdls_conn_table_entries =
3869 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3870 	wmi_cfg->beacon_tx_offload_max_vdev =
3871 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3872 	wmi_cfg->num_multicast_filter_entries =
3873 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3874 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3875 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3876 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3877 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3878 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3879 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3880 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3881 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3882 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3883 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3884 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3885 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3886 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3887 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3888 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3889 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 |
3890 				     WMI_RSRC_CFG_FLAG1_ACK_RSSI);
3891 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3892 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3893 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3894 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3895 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3896 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3897 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3898 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3899 	if (ab->hw_params->reoq_lut_support)
3900 		wmi_cfg->host_service_flags |=
3901 			cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
3902 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3903 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3904 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3905 }
3906 
3907 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3908 				struct ath12k_wmi_init_cmd_arg *arg)
3909 {
3910 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3911 	struct sk_buff *skb;
3912 	struct wmi_init_cmd *cmd;
3913 	struct ath12k_wmi_resource_config_params *cfg;
3914 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3915 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3916 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3917 	struct wmi_tlv *tlv;
3918 	size_t ret, len;
3919 	void *ptr;
3920 	u32 hw_mode_len = 0;
3921 	u16 idx;
3922 
3923 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3924 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3925 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3926 
3927 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3928 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3929 
3930 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3931 	if (!skb)
3932 		return -ENOMEM;
3933 
3934 	cmd = (struct wmi_init_cmd *)skb->data;
3935 
3936 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3937 						 sizeof(*cmd));
3938 
3939 	ptr = skb->data + sizeof(*cmd);
3940 	cfg = ptr;
3941 
3942 	ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
3943 
3944 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3945 						 sizeof(*cfg));
3946 
3947 	ptr += sizeof(*cfg);
3948 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3949 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3950 
3951 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3952 		host_mem_chunks[idx].tlv_header =
3953 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3954 					   len);
3955 
3956 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3957 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3958 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3959 
3960 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3961 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3962 			   arg->mem_chunks[idx].req_id,
3963 			   (u64)arg->mem_chunks[idx].paddr,
3964 			   arg->mem_chunks[idx].len);
3965 	}
3966 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3967 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3968 
3969 	/* num_mem_chunks is zero */
3970 	tlv = ptr;
3971 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3972 	ptr += TLV_HDR_SIZE + len;
3973 
3974 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3975 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3976 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3977 							     sizeof(*hw_mode));
3978 
3979 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3980 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3981 
3982 		ptr += sizeof(*hw_mode);
3983 
3984 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3985 		tlv = ptr;
3986 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3987 
3988 		ptr += TLV_HDR_SIZE;
3989 		len = sizeof(*band_to_mac);
3990 
3991 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3992 			band_to_mac = (void *)ptr;
3993 
3994 			band_to_mac->tlv_header =
3995 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3996 						       len);
3997 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3998 			band_to_mac->start_freq =
3999 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
4000 			band_to_mac->end_freq =
4001 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
4002 			ptr += sizeof(*band_to_mac);
4003 		}
4004 	}
4005 
4006 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
4007 	if (ret) {
4008 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
4009 		dev_kfree_skb(skb);
4010 	}
4011 
4012 	return ret;
4013 }
4014 
4015 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
4016 			    int pdev_id)
4017 {
4018 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
4019 	struct sk_buff *skb;
4020 	int ret;
4021 
4022 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4023 	if (!skb)
4024 		return -ENOMEM;
4025 
4026 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
4027 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
4028 						 sizeof(*cmd));
4029 
4030 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
4031 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
4032 
4033 	cmd->pdev_id = cpu_to_le32(pdev_id);
4034 
4035 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4036 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
4037 
4038 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
4039 	if (ret) {
4040 		ath12k_warn(ar->ab,
4041 			    "failed to send lro cfg req wmi cmd\n");
4042 		goto err;
4043 	}
4044 
4045 	return 0;
4046 err:
4047 	dev_kfree_skb(skb);
4048 	return ret;
4049 }
4050 
4051 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
4052 {
4053 	unsigned long time_left;
4054 
4055 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
4056 						WMI_SERVICE_READY_TIMEOUT_HZ);
4057 	if (!time_left)
4058 		return -ETIMEDOUT;
4059 
4060 	return 0;
4061 }
4062 
4063 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
4064 {
4065 	unsigned long time_left;
4066 
4067 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
4068 						WMI_SERVICE_READY_TIMEOUT_HZ);
4069 	if (!time_left)
4070 		return -ETIMEDOUT;
4071 
4072 	return 0;
4073 }
4074 
4075 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
4076 			   enum wmi_host_hw_mode_config_type mode)
4077 {
4078 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
4079 	struct sk_buff *skb;
4080 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4081 	int len;
4082 	int ret;
4083 
4084 	len = sizeof(*cmd);
4085 
4086 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
4087 	if (!skb)
4088 		return -ENOMEM;
4089 
4090 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
4091 
4092 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
4093 						 sizeof(*cmd));
4094 
4095 	cmd->pdev_id = WMI_PDEV_ID_SOC;
4096 	cmd->hw_mode_index = cpu_to_le32(mode);
4097 
4098 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
4099 	if (ret) {
4100 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
4101 		dev_kfree_skb(skb);
4102 	}
4103 
4104 	return ret;
4105 }
4106 
4107 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
4108 {
4109 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4110 	struct ath12k_wmi_init_cmd_arg arg = {};
4111 
4112 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
4113 		     ab->wmi_ab.svc_map))
4114 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
4115 
4116 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
4117 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
4118 
4119 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
4120 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
4121 	arg.mem_chunks = wmi_ab->mem_chunks;
4122 
4123 	if (ab->hw_params->single_pdev_only)
4124 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
4125 
4126 	arg.num_band_to_mac = ab->num_radios;
4127 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
4128 
4129 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
4130 
4131 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
4132 }
4133 
4134 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
4135 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
4136 {
4137 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
4138 	struct sk_buff *skb;
4139 	int ret;
4140 
4141 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4142 	if (!skb)
4143 		return -ENOMEM;
4144 
4145 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
4146 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
4147 						 sizeof(*cmd));
4148 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
4149 	cmd->scan_count = cpu_to_le32(arg->scan_count);
4150 	cmd->scan_period = cpu_to_le32(arg->scan_period);
4151 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
4152 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
4153 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
4154 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
4155 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
4156 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
4157 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
4158 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
4159 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
4160 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
4161 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
4162 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
4163 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
4164 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
4165 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
4166 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
4167 
4168 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4169 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
4170 		   arg->vdev_id);
4171 
4172 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4173 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4174 	if (ret) {
4175 		ath12k_warn(ar->ab,
4176 			    "failed to send spectral scan config wmi cmd\n");
4177 		goto err;
4178 	}
4179 
4180 	return 0;
4181 err:
4182 	dev_kfree_skb(skb);
4183 	return ret;
4184 }
4185 
4186 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
4187 				    u32 trigger, u32 enable)
4188 {
4189 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
4190 	struct sk_buff *skb;
4191 	int ret;
4192 
4193 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4194 	if (!skb)
4195 		return -ENOMEM;
4196 
4197 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
4198 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
4199 						 sizeof(*cmd));
4200 
4201 	cmd->vdev_id = cpu_to_le32(vdev_id);
4202 	cmd->trigger_cmd = cpu_to_le32(trigger);
4203 	cmd->enable_cmd = cpu_to_le32(enable);
4204 
4205 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4206 		   "WMI spectral enable cmd vdev id 0x%x\n",
4207 		   vdev_id);
4208 
4209 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4210 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4211 	if (ret) {
4212 		ath12k_warn(ar->ab,
4213 			    "failed to send spectral enable wmi cmd\n");
4214 		goto err;
4215 	}
4216 
4217 	return 0;
4218 err:
4219 	dev_kfree_skb(skb);
4220 	return ret;
4221 }
4222 
4223 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
4224 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
4225 {
4226 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4227 	struct sk_buff *skb;
4228 	int ret;
4229 
4230 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4231 	if (!skb)
4232 		return -ENOMEM;
4233 
4234 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4235 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4236 						 sizeof(*cmd));
4237 
4238 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4239 	cmd->module_id = cpu_to_le32(arg->module_id);
4240 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4241 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4242 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4243 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4244 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4245 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4246 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4247 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4248 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4249 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4250 
4251 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4252 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4253 		   arg->pdev_id);
4254 
4255 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4256 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4257 	if (ret) {
4258 		ath12k_warn(ar->ab,
4259 			    "failed to send dma ring cfg req wmi cmd\n");
4260 		goto err;
4261 	}
4262 
4263 	return 0;
4264 err:
4265 	dev_kfree_skb(skb);
4266 	return ret;
4267 }
4268 
4269 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4270 					  u16 tag, u16 len,
4271 					  const void *ptr, void *data)
4272 {
4273 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4274 
4275 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4276 		return -EPROTO;
4277 
4278 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4279 		return -ENOBUFS;
4280 
4281 	arg->num_buf_entry++;
4282 	return 0;
4283 }
4284 
4285 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4286 					 u16 tag, u16 len,
4287 					 const void *ptr, void *data)
4288 {
4289 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4290 
4291 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4292 		return -EPROTO;
4293 
4294 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4295 		return -ENOBUFS;
4296 
4297 	arg->num_meta++;
4298 
4299 	return 0;
4300 }
4301 
4302 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4303 				    u16 tag, u16 len,
4304 				    const void *ptr, void *data)
4305 {
4306 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4307 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4308 	u32 pdev_id;
4309 	int ret;
4310 
4311 	switch (tag) {
4312 	case WMI_TAG_DMA_BUF_RELEASE:
4313 		fixed = ptr;
4314 		arg->fixed = *fixed;
4315 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4316 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4317 		break;
4318 	case WMI_TAG_ARRAY_STRUCT:
4319 		if (!arg->buf_entry_done) {
4320 			arg->num_buf_entry = 0;
4321 			arg->buf_entry = ptr;
4322 
4323 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4324 						  ath12k_wmi_dma_buf_entry_parse,
4325 						  arg);
4326 			if (ret) {
4327 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4328 					    ret);
4329 				return ret;
4330 			}
4331 
4332 			arg->buf_entry_done = true;
4333 		} else if (!arg->meta_data_done) {
4334 			arg->num_meta = 0;
4335 			arg->meta_data = ptr;
4336 
4337 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4338 						  ath12k_wmi_dma_buf_meta_parse,
4339 						  arg);
4340 			if (ret) {
4341 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4342 					    ret);
4343 				return ret;
4344 			}
4345 
4346 			arg->meta_data_done = true;
4347 		}
4348 		break;
4349 	default:
4350 		break;
4351 	}
4352 	return 0;
4353 }
4354 
4355 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4356 						       struct sk_buff *skb)
4357 {
4358 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4359 	struct ath12k_dbring_buf_release_event param;
4360 	int ret;
4361 
4362 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4363 				  ath12k_wmi_dma_buf_parse,
4364 				  &arg);
4365 	if (ret) {
4366 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4367 		return;
4368 	}
4369 
4370 	param.fixed = arg.fixed;
4371 	param.buf_entry = arg.buf_entry;
4372 	param.num_buf_entry = arg.num_buf_entry;
4373 	param.meta_data = arg.meta_data;
4374 	param.num_meta = arg.num_meta;
4375 
4376 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4377 	if (ret) {
4378 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4379 		return;
4380 	}
4381 }
4382 
4383 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4384 					 u16 tag, u16 len,
4385 					 const void *ptr, void *data)
4386 {
4387 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4388 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4389 	u32 phy_map = 0;
4390 
4391 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4392 		return -EPROTO;
4393 
4394 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4395 		return -ENOBUFS;
4396 
4397 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4398 				   hw_mode_id);
4399 	svc_rdy_ext->n_hw_mode_caps++;
4400 
4401 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4402 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4403 
4404 	return 0;
4405 }
4406 
4407 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4408 				   u16 len, const void *ptr, void *data)
4409 {
4410 	struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info;
4411 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4412 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4413 	enum wmi_host_hw_mode_config_type mode, pref;
4414 	u32 i;
4415 	int ret;
4416 
4417 	svc_rdy_ext->n_hw_mode_caps = 0;
4418 	svc_rdy_ext->hw_mode_caps = ptr;
4419 
4420 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4421 				  ath12k_wmi_hw_mode_caps_parse,
4422 				  svc_rdy_ext);
4423 	if (ret) {
4424 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4425 		return ret;
4426 	}
4427 
4428 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4429 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4430 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4431 
4432 		if (mode >= WMI_HOST_HW_MODE_MAX)
4433 			continue;
4434 
4435 		pref = soc->wmi_ab.preferred_hw_mode;
4436 
4437 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4438 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4439 			soc->wmi_ab.preferred_hw_mode = mode;
4440 		}
4441 	}
4442 
4443 	svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps;
4444 
4445 	ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n",
4446 		   svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode);
4447 
4448 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4449 		return -EINVAL;
4450 
4451 	return 0;
4452 }
4453 
4454 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4455 					 u16 tag, u16 len,
4456 					 const void *ptr, void *data)
4457 {
4458 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4459 
4460 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4461 		return -EPROTO;
4462 
4463 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4464 		return -ENOBUFS;
4465 
4466 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4467 	if (!svc_rdy_ext->n_mac_phy_caps) {
4468 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4469 						    GFP_ATOMIC);
4470 		if (!svc_rdy_ext->mac_phy_caps)
4471 			return -ENOMEM;
4472 	}
4473 
4474 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4475 	svc_rdy_ext->n_mac_phy_caps++;
4476 	return 0;
4477 }
4478 
4479 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4480 					     u16 tag, u16 len,
4481 					     const void *ptr, void *data)
4482 {
4483 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4484 
4485 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4486 		return -EPROTO;
4487 
4488 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4489 		return -ENOBUFS;
4490 
4491 	svc_rdy_ext->n_ext_hal_reg_caps++;
4492 	return 0;
4493 }
4494 
4495 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4496 				       u16 len, const void *ptr, void *data)
4497 {
4498 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4499 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4500 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4501 	int ret;
4502 	u32 i;
4503 
4504 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4505 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4506 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4507 				  ath12k_wmi_ext_hal_reg_caps_parse,
4508 				  svc_rdy_ext);
4509 	if (ret) {
4510 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4511 		return ret;
4512 	}
4513 
4514 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4515 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4516 						      svc_rdy_ext->soc_hal_reg_caps,
4517 						      svc_rdy_ext->ext_hal_reg_caps, i,
4518 						      &reg_cap);
4519 		if (ret) {
4520 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4521 			return ret;
4522 		}
4523 
4524 		if (reg_cap.phy_id >= MAX_RADIOS) {
4525 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4526 			return -EINVAL;
4527 		}
4528 
4529 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4530 	}
4531 	return 0;
4532 }
4533 
4534 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4535 						 u16 len, const void *ptr,
4536 						 void *data)
4537 {
4538 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4539 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4540 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4541 	u32 phy_id_map;
4542 	int pdev_index = 0;
4543 	int ret;
4544 
4545 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4546 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4547 
4548 	soc->num_radios = 0;
4549 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4550 	soc->fw_pdev_count = 0;
4551 
4552 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4553 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4554 							    svc_rdy_ext,
4555 							    hw_mode_id, soc->num_radios,
4556 							    &soc->pdevs[pdev_index]);
4557 		if (ret) {
4558 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4559 				    soc->num_radios);
4560 			return ret;
4561 		}
4562 
4563 		soc->num_radios++;
4564 
4565 		/* For single_pdev_only targets,
4566 		 * save mac_phy capability in the same pdev
4567 		 */
4568 		if (soc->hw_params->single_pdev_only)
4569 			pdev_index = 0;
4570 		else
4571 			pdev_index = soc->num_radios;
4572 
4573 		/* TODO: mac_phy_cap prints */
4574 		phy_id_map >>= 1;
4575 	}
4576 
4577 	if (soc->hw_params->single_pdev_only) {
4578 		soc->num_radios = 1;
4579 		soc->pdevs[0].pdev_id = 0;
4580 	}
4581 
4582 	return 0;
4583 }
4584 
4585 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4586 					  u16 tag, u16 len,
4587 					  const void *ptr, void *data)
4588 {
4589 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4590 
4591 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4592 		return -EPROTO;
4593 
4594 	parse->n_dma_ring_caps++;
4595 	return 0;
4596 }
4597 
4598 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4599 					u32 num_cap)
4600 {
4601 	size_t sz;
4602 	void *ptr;
4603 
4604 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4605 	ptr = kzalloc(sz, GFP_ATOMIC);
4606 	if (!ptr)
4607 		return -ENOMEM;
4608 
4609 	ab->db_caps = ptr;
4610 	ab->num_db_cap = num_cap;
4611 
4612 	return 0;
4613 }
4614 
4615 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4616 {
4617 	kfree(ab->db_caps);
4618 	ab->db_caps = NULL;
4619 	ab->num_db_cap = 0;
4620 }
4621 
4622 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4623 				    u16 len, const void *ptr, void *data)
4624 {
4625 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4626 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4627 	struct ath12k_dbring_cap *dir_buff_caps;
4628 	int ret;
4629 	u32 i;
4630 
4631 	dma_caps_parse->n_dma_ring_caps = 0;
4632 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4633 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4634 				  ath12k_wmi_dma_ring_caps_parse,
4635 				  dma_caps_parse);
4636 	if (ret) {
4637 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4638 		return ret;
4639 	}
4640 
4641 	if (!dma_caps_parse->n_dma_ring_caps)
4642 		return 0;
4643 
4644 	if (ab->num_db_cap) {
4645 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4646 		return 0;
4647 	}
4648 
4649 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4650 	if (ret)
4651 		return ret;
4652 
4653 	dir_buff_caps = ab->db_caps;
4654 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4655 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4656 			ath12k_warn(ab, "Invalid module id %d\n",
4657 				    le32_to_cpu(dma_caps[i].module_id));
4658 			ret = -EINVAL;
4659 			goto free_dir_buff;
4660 		}
4661 
4662 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4663 		dir_buff_caps[i].pdev_id =
4664 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4665 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4666 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4667 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4668 	}
4669 
4670 	return 0;
4671 
4672 free_dir_buff:
4673 	ath12k_wmi_free_dbring_caps(ab);
4674 	return ret;
4675 }
4676 
4677 static void
4678 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab,
4679 			     const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap,
4680 			     struct ath12k_svc_ext_mac_phy_info *mac_phy_info)
4681 {
4682 	mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id);
4683 	mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands);
4684 	mac_phy_info->hw_freq_range.low_2ghz_freq =
4685 					__le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq);
4686 	mac_phy_info->hw_freq_range.high_2ghz_freq =
4687 					__le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq);
4688 	mac_phy_info->hw_freq_range.low_5ghz_freq =
4689 					__le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq);
4690 	mac_phy_info->hw_freq_range.high_5ghz_freq =
4691 					__le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq);
4692 }
4693 
4694 static void
4695 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab,
4696 				 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
4697 {
4698 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
4699 	const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap;
4700 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4701 	struct ath12k_svc_ext_mac_phy_info *mac_phy_info;
4702 	u32 hw_mode_id, phy_bit_map;
4703 	u8 hw_idx;
4704 
4705 	mac_phy_info = &svc_ext_info->mac_phy_info[0];
4706 	mac_phy_cap = svc_rdy_ext->mac_phy_caps;
4707 
4708 	for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) {
4709 		hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx];
4710 		hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id);
4711 		phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map);
4712 
4713 		while (phy_bit_map) {
4714 			ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info);
4715 			mac_phy_info->hw_mode_config_type =
4716 					le32_get_bits(hw_mode_cap->hw_mode_config_type,
4717 						      WMI_HW_MODE_CAP_CFG_TYPE);
4718 			ath12k_dbg(ab, ATH12K_DBG_WMI,
4719 				   "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n",
4720 				   hw_idx, hw_mode_id,
4721 				   mac_phy_info->hw_mode_config_type,
4722 				   mac_phy_info->supported_bands, mac_phy_info->phy_id,
4723 				   mac_phy_info->hw_freq_range.low_2ghz_freq,
4724 				   mac_phy_info->hw_freq_range.high_2ghz_freq,
4725 				   mac_phy_info->hw_freq_range.low_5ghz_freq,
4726 				   mac_phy_info->hw_freq_range.high_5ghz_freq);
4727 
4728 			mac_phy_cap++;
4729 			mac_phy_info++;
4730 
4731 			phy_bit_map >>= 1;
4732 		}
4733 	}
4734 }
4735 
4736 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4737 					u16 tag, u16 len,
4738 					const void *ptr, void *data)
4739 {
4740 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4741 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4742 	int ret;
4743 
4744 	switch (tag) {
4745 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4746 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4747 						&svc_rdy_ext->arg);
4748 		if (ret) {
4749 			ath12k_warn(ab, "unable to extract ext params\n");
4750 			return ret;
4751 		}
4752 		break;
4753 
4754 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4755 		svc_rdy_ext->hw_caps = ptr;
4756 		svc_rdy_ext->arg.num_hw_modes =
4757 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4758 		break;
4759 
4760 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4761 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4762 							    svc_rdy_ext);
4763 		if (ret)
4764 			return ret;
4765 		break;
4766 
4767 	case WMI_TAG_ARRAY_STRUCT:
4768 		if (!svc_rdy_ext->hw_mode_done) {
4769 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4770 			if (ret)
4771 				return ret;
4772 
4773 			svc_rdy_ext->hw_mode_done = true;
4774 		} else if (!svc_rdy_ext->mac_phy_done) {
4775 			svc_rdy_ext->n_mac_phy_caps = 0;
4776 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4777 						  ath12k_wmi_mac_phy_caps_parse,
4778 						  svc_rdy_ext);
4779 			if (ret) {
4780 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4781 				return ret;
4782 			}
4783 
4784 			ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext);
4785 
4786 			svc_rdy_ext->mac_phy_done = true;
4787 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4788 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4789 			if (ret)
4790 				return ret;
4791 
4792 			svc_rdy_ext->ext_hal_reg_done = true;
4793 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4794 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4795 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4796 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4797 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4798 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4799 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4800 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4801 						       &svc_rdy_ext->dma_caps_parse);
4802 			if (ret)
4803 				return ret;
4804 
4805 			svc_rdy_ext->dma_ring_cap_done = true;
4806 		}
4807 		break;
4808 
4809 	default:
4810 		break;
4811 	}
4812 	return 0;
4813 }
4814 
4815 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4816 					  struct sk_buff *skb)
4817 {
4818 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4819 	int ret;
4820 
4821 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4822 				  ath12k_wmi_svc_rdy_ext_parse,
4823 				  &svc_rdy_ext);
4824 	if (ret) {
4825 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4826 		goto err;
4827 	}
4828 
4829 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4830 		complete(&ab->wmi_ab.service_ready);
4831 
4832 	kfree(svc_rdy_ext.mac_phy_caps);
4833 	return 0;
4834 
4835 err:
4836 	kfree(svc_rdy_ext.mac_phy_caps);
4837 	ath12k_wmi_free_dbring_caps(ab);
4838 	return ret;
4839 }
4840 
4841 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4842 				      const void *ptr,
4843 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4844 {
4845 	const struct wmi_service_ready_ext2_event *ev = ptr;
4846 
4847 	if (!ev)
4848 		return -EINVAL;
4849 
4850 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4851 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4852 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4853 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4854 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4855 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4856 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4857 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4858 	return 0;
4859 }
4860 
4861 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4862 				      const __le32 cap_mac_info[],
4863 				      const __le32 cap_phy_info[],
4864 				      const __le32 supp_mcs[],
4865 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4866 				       __le32 cap_info_internal)
4867 {
4868 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4869 	u32 support_320mhz;
4870 	u8 i;
4871 
4872 	if (band == NL80211_BAND_6GHZ)
4873 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4874 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4875 
4876 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4877 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4878 
4879 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4880 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4881 
4882 	if (band == NL80211_BAND_6GHZ)
4883 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4884 
4885 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4886 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4887 	if (band != NL80211_BAND_2GHZ) {
4888 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4889 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4890 	}
4891 
4892 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4893 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4894 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4895 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4896 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4897 
4898 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4899 }
4900 
4901 static int
4902 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4903 				      const struct ath12k_wmi_caps_ext_params *caps,
4904 				      struct ath12k_pdev *pdev)
4905 {
4906 	struct ath12k_band_cap *cap_band;
4907 	u32 bands, support_320mhz;
4908 	int i;
4909 
4910 	if (ab->hw_params->single_pdev_only) {
4911 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4912 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4913 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4914 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4915 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4916 			return 0;
4917 		}
4918 
4919 		for (i = 0; i < ab->fw_pdev_count; i++) {
4920 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4921 
4922 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4923 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4924 				bands = fw_pdev->supported_bands;
4925 				break;
4926 			}
4927 		}
4928 
4929 		if (i == ab->fw_pdev_count)
4930 			return -EINVAL;
4931 	} else {
4932 		bands = pdev->cap.supported_bands;
4933 	}
4934 
4935 	if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
4936 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4937 					  caps->eht_cap_mac_info_2ghz,
4938 					  caps->eht_cap_phy_info_2ghz,
4939 					  caps->eht_supp_mcs_ext_2ghz,
4940 					  &caps->eht_ppet_2ghz,
4941 					  caps->eht_cap_info_internal);
4942 	}
4943 
4944 	if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
4945 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4946 					  caps->eht_cap_mac_info_5ghz,
4947 					  caps->eht_cap_phy_info_5ghz,
4948 					  caps->eht_supp_mcs_ext_5ghz,
4949 					  &caps->eht_ppet_5ghz,
4950 					  caps->eht_cap_info_internal);
4951 
4952 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4953 					  caps->eht_cap_mac_info_5ghz,
4954 					  caps->eht_cap_phy_info_5ghz,
4955 					  caps->eht_supp_mcs_ext_5ghz,
4956 					  &caps->eht_ppet_5ghz,
4957 					  caps->eht_cap_info_internal);
4958 	}
4959 
4960 	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
4961 	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
4962 
4963 	return 0;
4964 }
4965 
4966 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4967 					   u16 len, const void *ptr,
4968 					   void *data)
4969 {
4970 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4971 	int i = 0, ret;
4972 
4973 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4974 		return -EPROTO;
4975 
4976 	if (ab->hw_params->single_pdev_only) {
4977 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4978 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4979 			return 0;
4980 	} else {
4981 		for (i = 0; i < ab->num_radios; i++) {
4982 			if (ab->pdevs[i].pdev_id ==
4983 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4984 				break;
4985 		}
4986 
4987 		if (i == ab->num_radios)
4988 			return -EINVAL;
4989 	}
4990 
4991 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4992 	if (ret) {
4993 		ath12k_warn(ab,
4994 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4995 			    ret, ab->pdevs[i].pdev_id);
4996 		return ret;
4997 	}
4998 
4999 	return 0;
5000 }
5001 
5002 static void
5003 ath12k_wmi_update_freq_info(struct ath12k_base *ab,
5004 			    struct ath12k_svc_ext_mac_phy_info *mac_cap,
5005 			    enum ath12k_hw_mode mode,
5006 			    u32 phy_id)
5007 {
5008 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5009 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5010 
5011 	mac_range = &hw_mode_info->freq_range_caps[mode][phy_id];
5012 
5013 	if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
5014 		mac_range->low_2ghz_freq = max_t(u32,
5015 						 mac_cap->hw_freq_range.low_2ghz_freq,
5016 						 ATH12K_MIN_2GHZ_FREQ);
5017 		mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ?
5018 					    min_t(u32,
5019 						  mac_cap->hw_freq_range.high_2ghz_freq,
5020 						  ATH12K_MAX_2GHZ_FREQ) :
5021 					    ATH12K_MAX_2GHZ_FREQ;
5022 	}
5023 
5024 	if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
5025 		mac_range->low_5ghz_freq = max_t(u32,
5026 						 mac_cap->hw_freq_range.low_5ghz_freq,
5027 						 ATH12K_MIN_5GHZ_FREQ);
5028 		mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ?
5029 					    min_t(u32,
5030 						  mac_cap->hw_freq_range.high_5ghz_freq,
5031 						  ATH12K_MAX_6GHZ_FREQ) :
5032 					    ATH12K_MAX_6GHZ_FREQ;
5033 	}
5034 }
5035 
5036 static bool
5037 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab,
5038 				 enum ath12k_hw_mode hwmode)
5039 {
5040 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5041 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5042 	u8 phy_id;
5043 
5044 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5045 		mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id];
5046 		/* modify SBS/DBS range only when both phy for DBS are filled */
5047 		if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq)
5048 			return false;
5049 	}
5050 
5051 	return true;
5052 }
5053 
5054 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab)
5055 {
5056 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5057 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5058 	u8 phy_id;
5059 
5060 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS];
5061 	/* Reset 5 GHz range for shared mac for DBS */
5062 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5063 		if (mac_range[phy_id].low_2ghz_freq &&
5064 		    mac_range[phy_id].low_5ghz_freq) {
5065 			mac_range[phy_id].low_5ghz_freq = 0;
5066 			mac_range[phy_id].high_5ghz_freq = 0;
5067 		}
5068 	}
5069 }
5070 
5071 static u32
5072 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5073 {
5074 	u32 highest_freq = 0;
5075 	u8 phy_id;
5076 
5077 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5078 		if (range[phy_id].high_5ghz_freq > highest_freq)
5079 			highest_freq = range[phy_id].high_5ghz_freq;
5080 	}
5081 
5082 	return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ;
5083 }
5084 
5085 static u32
5086 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5087 {
5088 	u32 lowest_freq = 0;
5089 	u8 phy_id;
5090 
5091 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5092 		if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
5093 		    range[phy_id].low_5ghz_freq < lowest_freq)
5094 			lowest_freq = range[phy_id].low_5ghz_freq;
5095 	}
5096 
5097 	return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ;
5098 }
5099 
5100 static void
5101 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab,
5102 				     u16 sbs_range_sep,
5103 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5104 {
5105 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5106 	struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range;
5107 	u8 phy_id;
5108 
5109 	upper_sbs_freq_range =
5110 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
5111 
5112 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5113 		upper_sbs_freq_range[phy_id].low_2ghz_freq =
5114 						ref_freq[phy_id].low_2ghz_freq;
5115 		upper_sbs_freq_range[phy_id].high_2ghz_freq =
5116 						ref_freq[phy_id].high_2ghz_freq;
5117 
5118 		/* update for shared mac */
5119 		if (upper_sbs_freq_range[phy_id].low_2ghz_freq) {
5120 			upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5121 			upper_sbs_freq_range[phy_id].high_5ghz_freq =
5122 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5123 		} else {
5124 			upper_sbs_freq_range[phy_id].low_5ghz_freq =
5125 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5126 			upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5127 		}
5128 	}
5129 }
5130 
5131 static void
5132 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab,
5133 				     u16 sbs_range_sep,
5134 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5135 {
5136 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5137 	struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range;
5138 	u8 phy_id;
5139 
5140 	lower_sbs_freq_range =
5141 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
5142 
5143 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5144 		lower_sbs_freq_range[phy_id].low_2ghz_freq =
5145 						ref_freq[phy_id].low_2ghz_freq;
5146 		lower_sbs_freq_range[phy_id].high_2ghz_freq =
5147 						ref_freq[phy_id].high_2ghz_freq;
5148 
5149 		/* update for shared mac */
5150 		if (lower_sbs_freq_range[phy_id].low_2ghz_freq) {
5151 			lower_sbs_freq_range[phy_id].low_5ghz_freq =
5152 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5153 			lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5154 		} else {
5155 			lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5156 			lower_sbs_freq_range[phy_id].high_5ghz_freq =
5157 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5158 		}
5159 	}
5160 }
5161 
5162 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)
5163 {
5164 	static const char * const mode_str[] = {
5165 		[ATH12K_HW_MODE_SMM] = "SMM",
5166 		[ATH12K_HW_MODE_DBS] = "DBS",
5167 		[ATH12K_HW_MODE_SBS] = "SBS",
5168 		[ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE",
5169 		[ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE",
5170 	};
5171 
5172 	if (hw_mode >= ARRAY_SIZE(mode_str))
5173 		return "Unknown";
5174 
5175 	return mode_str[hw_mode];
5176 }
5177 
5178 static void
5179 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab,
5180 				   struct ath12k_hw_mode_freq_range_arg *freq_range,
5181 				   enum ath12k_hw_mode hw_mode)
5182 {
5183 	u8 i;
5184 
5185 	for (i = 0; i < MAX_RADIOS; i++)
5186 		if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq)
5187 			ath12k_dbg(ab, ATH12K_DBG_WMI,
5188 				   "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5189 				   ath12k_wmi_hw_mode_to_str(hw_mode),
5190 				   hw_mode, i,
5191 				   freq_range[i].low_2ghz_freq,
5192 				   freq_range[i].high_2ghz_freq,
5193 				   freq_range[i].low_5ghz_freq,
5194 				   freq_range[i].high_5ghz_freq);
5195 }
5196 
5197 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab)
5198 {
5199 	struct ath12k_hw_mode_freq_range_arg *freq_range;
5200 	u8 i;
5201 
5202 	for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) {
5203 		freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i];
5204 		ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i);
5205 	}
5206 }
5207 
5208 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id)
5209 {
5210 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5211 	struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range;
5212 	struct ath12k_hw_mode_freq_range_arg *non_shared_range;
5213 	u8 shared_phy_id;
5214 
5215 	sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id];
5216 
5217 	/* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id
5218 	 * keep the range as it is in SBS
5219 	 */
5220 	if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq)
5221 		return 0;
5222 
5223 	if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) {
5224 		ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz");
5225 		ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS);
5226 		return -EINVAL;
5227 	}
5228 
5229 	non_shared_range = sbs_mac_range;
5230 	/* if SBS mac range has only 5 GHz then it's the non-shared phy, so
5231 	 * modify the range as per the shared mac.
5232 	 */
5233 	shared_phy_id = phy_id ? 0 : 1;
5234 	shared_mac_range =
5235 		&hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id];
5236 
5237 	if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) {
5238 		ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared");
5239 		/* If the shared mac lower 5 GHz frequency is greater than
5240 		 * non-shared mac lower 5 GHz frequency then the shared mac has
5241 		 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high
5242 		 * freq should be less than the shared mac's low 5 GHz freq.
5243 		 */
5244 		if (non_shared_range->high_5ghz_freq >=
5245 		    shared_mac_range->low_5ghz_freq)
5246 			non_shared_range->high_5ghz_freq =
5247 				max_t(u32, shared_mac_range->low_5ghz_freq - 10,
5248 				      non_shared_range->low_5ghz_freq);
5249 	} else if (shared_mac_range->high_5ghz_freq <
5250 		   non_shared_range->high_5ghz_freq) {
5251 		ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared");
5252 		/* If the shared mac high 5 GHz frequency is less than
5253 		 * non-shared mac high 5 GHz frequency then the shared mac has
5254 		 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low
5255 		 * freq should be greater than the shared mac's high 5 GHz freq.
5256 		 */
5257 		if (shared_mac_range->high_5ghz_freq >=
5258 		    non_shared_range->low_5ghz_freq)
5259 			non_shared_range->low_5ghz_freq =
5260 				min_t(u32, shared_mac_range->high_5ghz_freq + 10,
5261 				      non_shared_range->high_5ghz_freq);
5262 	} else {
5263 		ath12k_warn(ab, "invalid SBS range with all 5 GHz shared");
5264 		return -EINVAL;
5265 	}
5266 
5267 	return 0;
5268 }
5269 
5270 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab)
5271 {
5272 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5273 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5274 	u16 sbs_range_sep;
5275 	u8 phy_id;
5276 	int ret;
5277 
5278 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS];
5279 
5280 	/* If sbs_lower_band_end_freq has a value, then the frequency range
5281 	 * will be split using that value.
5282 	 */
5283 	sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq;
5284 	if (sbs_range_sep) {
5285 		ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep,
5286 						     mac_range);
5287 		ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep,
5288 						     mac_range);
5289 		/* Hardware specifies the range boundary with sbs_range_sep,
5290 		 * (i.e. the boundary between 5 GHz high and 5 GHz low),
5291 		 * reset the original one to make sure it will not get used.
5292 		 */
5293 		memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5294 		return;
5295 	}
5296 
5297 	/* If sbs_lower_band_end_freq is not set that means firmware will send one
5298 	 * shared mac range and one non-shared mac range. so update that freq.
5299 	 */
5300 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5301 		ret = ath12k_wmi_modify_sbs_freq(ab, phy_id);
5302 		if (ret) {
5303 			memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5304 			break;
5305 		}
5306 	}
5307 }
5308 
5309 static void
5310 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab,
5311 				enum wmi_host_hw_mode_config_type hw_config_type,
5312 				u32 phy_id,
5313 				struct ath12k_svc_ext_mac_phy_info *mac_cap)
5314 {
5315 	if (phy_id >= MAX_RADIOS) {
5316 		ath12k_err(ab, "mac more than two not supported: %d", phy_id);
5317 		return;
5318 	}
5319 
5320 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5321 		   "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5322 		   hw_config_type, phy_id, mac_cap->supported_bands,
5323 		   ab->wmi_ab.sbs_lower_band_end_freq,
5324 		   mac_cap->hw_freq_range.low_2ghz_freq,
5325 		   mac_cap->hw_freq_range.high_2ghz_freq,
5326 		   mac_cap->hw_freq_range.low_5ghz_freq,
5327 		   mac_cap->hw_freq_range.high_5ghz_freq);
5328 
5329 	switch (hw_config_type) {
5330 	case WMI_HOST_HW_MODE_SINGLE:
5331 		if (phy_id) {
5332 			ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported");
5333 			break;
5334 		}
5335 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id);
5336 		break;
5337 
5338 	case WMI_HOST_HW_MODE_DBS:
5339 		if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5340 			ath12k_wmi_update_freq_info(ab, mac_cap,
5341 						    ATH12K_HW_MODE_DBS, phy_id);
5342 		break;
5343 	case WMI_HOST_HW_MODE_DBS_SBS:
5344 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
5345 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id);
5346 		if (ab->wmi_ab.sbs_lower_band_end_freq ||
5347 		    mac_cap->hw_freq_range.low_5ghz_freq ||
5348 		    mac_cap->hw_freq_range.low_2ghz_freq)
5349 			ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS,
5350 						    phy_id);
5351 
5352 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5353 			ath12k_wmi_update_dbs_freq_info(ab);
5354 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5355 			ath12k_wmi_update_sbs_freq_info(ab);
5356 		break;
5357 	case WMI_HOST_HW_MODE_SBS:
5358 	case WMI_HOST_HW_MODE_SBS_PASSIVE:
5359 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id);
5360 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5361 			ath12k_wmi_update_sbs_freq_info(ab);
5362 
5363 		break;
5364 	default:
5365 		break;
5366 	}
5367 }
5368 
5369 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab)
5370 {
5371 	if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) ||
5372 	    (ab->wmi_ab.sbs_lower_band_end_freq &&
5373 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) &&
5374 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE)))
5375 		return true;
5376 
5377 	return false;
5378 }
5379 
5380 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab)
5381 {
5382 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
5383 	struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
5384 	enum wmi_host_hw_mode_config_type hw_config_type;
5385 	struct ath12k_svc_ext_mac_phy_info *tmp;
5386 	bool dbs_mode = false, sbs_mode = false;
5387 	u32 i, j = 0;
5388 
5389 	if (!svc_ext_info->num_hw_modes) {
5390 		ath12k_err(ab, "invalid number of hw modes");
5391 		return -EINVAL;
5392 	}
5393 
5394 	ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d",
5395 		   svc_ext_info->num_hw_modes);
5396 
5397 	memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps));
5398 
5399 	for (i = 0; i < svc_ext_info->num_hw_modes; i++) {
5400 		if (j >= ATH12K_MAX_MAC_PHY_CAP)
5401 			return -EINVAL;
5402 
5403 		/* Update for MAC0 */
5404 		tmp = &svc_ext_info->mac_phy_info[j++];
5405 		hw_config_type = tmp->hw_mode_config_type;
5406 		ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp);
5407 
5408 		/* SBS and DBS have dual MAC. Up to 2 MACs are considered. */
5409 		if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5410 		    hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5411 		    hw_config_type == WMI_HOST_HW_MODE_SBS ||
5412 		    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) {
5413 			if (j >= ATH12K_MAX_MAC_PHY_CAP)
5414 				return -EINVAL;
5415 			/* Update for MAC1 */
5416 			tmp = &svc_ext_info->mac_phy_info[j++];
5417 			ath12k_wmi_update_mac_freq_info(ab, hw_config_type,
5418 							tmp->phy_id, tmp);
5419 
5420 			if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5421 			    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)
5422 				dbs_mode = true;
5423 
5424 			if (ath12k_wmi_sbs_range_present(ab) &&
5425 			    (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5426 			     hw_config_type == WMI_HOST_HW_MODE_SBS ||
5427 			     hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS))
5428 				sbs_mode = true;
5429 		}
5430 	}
5431 
5432 	info->support_dbs = dbs_mode;
5433 	info->support_sbs = sbs_mode;
5434 
5435 	ath12k_wmi_dump_freq_range(ab);
5436 
5437 	return 0;
5438 }
5439 
5440 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
5441 					 u16 tag, u16 len,
5442 					 const void *ptr, void *data)
5443 {
5444 	const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps;
5445 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
5446 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
5447 	int ret;
5448 
5449 	switch (tag) {
5450 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
5451 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
5452 						 &parse->arg);
5453 		if (ret) {
5454 			ath12k_warn(ab,
5455 				    "failed to extract wmi service ready ext2 parameters: %d\n",
5456 				    ret);
5457 			return ret;
5458 		}
5459 		break;
5460 
5461 	case WMI_TAG_ARRAY_STRUCT:
5462 		if (!parse->dma_ring_cap_done) {
5463 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
5464 						       &parse->dma_caps_parse);
5465 			if (ret)
5466 				return ret;
5467 
5468 			parse->dma_ring_cap_done = true;
5469 		} else if (!parse->spectral_bin_scaling_done) {
5470 			/* TODO: This is a place-holder as WMI tag for
5471 			 * spectral scaling is before
5472 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
5473 			 */
5474 			parse->spectral_bin_scaling_done = true;
5475 		} else if (!parse->mac_phy_caps_ext_done) {
5476 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
5477 						  ath12k_wmi_tlv_mac_phy_caps_ext,
5478 						  parse);
5479 			if (ret) {
5480 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
5481 					    ret);
5482 				return ret;
5483 			}
5484 
5485 			parse->mac_phy_caps_ext_done = true;
5486 		} else if (!parse->hal_reg_caps_ext2_done) {
5487 			parse->hal_reg_caps_ext2_done = true;
5488 		} else if (!parse->scan_radio_caps_ext2_done) {
5489 			parse->scan_radio_caps_ext2_done = true;
5490 		} else if (!parse->twt_caps_done) {
5491 			parse->twt_caps_done = true;
5492 		} else if (!parse->htt_msdu_idx_to_qtype_map_done) {
5493 			parse->htt_msdu_idx_to_qtype_map_done = true;
5494 		} else if (!parse->dbs_or_sbs_cap_ext_done) {
5495 			dbs_or_sbs_caps = ptr;
5496 			ab->wmi_ab.sbs_lower_band_end_freq =
5497 				__le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq);
5498 
5499 			ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n",
5500 				   ab->wmi_ab.sbs_lower_band_end_freq);
5501 
5502 			ret = ath12k_wmi_update_hw_mode_list(ab);
5503 			if (ret) {
5504 				ath12k_warn(ab, "failed to update hw mode list: %d\n",
5505 					    ret);
5506 				return ret;
5507 			}
5508 
5509 			parse->dbs_or_sbs_cap_ext_done = true;
5510 		}
5511 
5512 		break;
5513 	default:
5514 		break;
5515 	}
5516 
5517 	return 0;
5518 }
5519 
5520 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
5521 					   struct sk_buff *skb)
5522 {
5523 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
5524 	int ret;
5525 
5526 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5527 				  ath12k_wmi_svc_rdy_ext2_parse,
5528 				  &svc_rdy_ext2);
5529 	if (ret) {
5530 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
5531 		goto err;
5532 	}
5533 
5534 	complete(&ab->wmi_ab.service_ready);
5535 
5536 	return 0;
5537 
5538 err:
5539 	ath12k_wmi_free_dbring_caps(ab);
5540 	return ret;
5541 }
5542 
5543 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5544 					   struct wmi_vdev_start_resp_event *vdev_rsp)
5545 {
5546 	const void **tb;
5547 	const struct wmi_vdev_start_resp_event *ev;
5548 	int ret;
5549 
5550 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5551 	if (IS_ERR(tb)) {
5552 		ret = PTR_ERR(tb);
5553 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5554 		return ret;
5555 	}
5556 
5557 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
5558 	if (!ev) {
5559 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
5560 		kfree(tb);
5561 		return -EPROTO;
5562 	}
5563 
5564 	*vdev_rsp = *ev;
5565 
5566 	kfree(tb);
5567 	return 0;
5568 }
5569 
5570 static struct ath12k_reg_rule
5571 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
5572 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
5573 {
5574 	struct ath12k_reg_rule *reg_rule_ptr;
5575 	u32 count;
5576 
5577 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
5578 			       GFP_ATOMIC);
5579 
5580 	if (!reg_rule_ptr)
5581 		return NULL;
5582 
5583 	for (count = 0; count < num_reg_rules; count++) {
5584 		reg_rule_ptr[count].start_freq =
5585 			le32_get_bits(wmi_reg_rule[count].freq_info,
5586 				      REG_RULE_START_FREQ);
5587 		reg_rule_ptr[count].end_freq =
5588 			le32_get_bits(wmi_reg_rule[count].freq_info,
5589 				      REG_RULE_END_FREQ);
5590 		reg_rule_ptr[count].max_bw =
5591 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5592 				      REG_RULE_MAX_BW);
5593 		reg_rule_ptr[count].reg_power =
5594 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5595 				      REG_RULE_REG_PWR);
5596 		reg_rule_ptr[count].ant_gain =
5597 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5598 				      REG_RULE_ANT_GAIN);
5599 		reg_rule_ptr[count].flags =
5600 			le32_get_bits(wmi_reg_rule[count].flag_info,
5601 				      REG_RULE_FLAGS);
5602 		reg_rule_ptr[count].psd_flag =
5603 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5604 				      REG_RULE_PSD_INFO);
5605 		reg_rule_ptr[count].psd_eirp =
5606 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5607 				      REG_RULE_PSD_EIRP);
5608 	}
5609 
5610 	return reg_rule_ptr;
5611 }
5612 
5613 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
5614 					    u32 num_reg_rules)
5615 {
5616 	u8 num_invalid_5ghz_rules = 0;
5617 	u32 count, start_freq;
5618 
5619 	for (count = 0; count < num_reg_rules; count++) {
5620 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
5621 
5622 		if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
5623 			num_invalid_5ghz_rules++;
5624 	}
5625 
5626 	return num_invalid_5ghz_rules;
5627 }
5628 
5629 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
5630 						   struct sk_buff *skb,
5631 						   struct ath12k_reg_info *reg_info)
5632 {
5633 	const void **tb;
5634 	const struct wmi_reg_chan_list_cc_ext_event *ev;
5635 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
5636 	u32 num_2g_reg_rules, num_5g_reg_rules;
5637 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
5638 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
5639 	u8 num_invalid_5ghz_ext_rules;
5640 	u32 total_reg_rules = 0;
5641 	int ret, i, j;
5642 
5643 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
5644 
5645 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5646 	if (IS_ERR(tb)) {
5647 		ret = PTR_ERR(tb);
5648 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5649 		return ret;
5650 	}
5651 
5652 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
5653 	if (!ev) {
5654 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
5655 		kfree(tb);
5656 		return -EPROTO;
5657 	}
5658 
5659 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
5660 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
5661 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
5662 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
5663 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
5664 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
5665 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
5666 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
5667 
5668 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5669 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5670 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
5671 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5672 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
5673 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5674 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
5675 	}
5676 
5677 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
5678 	total_reg_rules += num_2g_reg_rules;
5679 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
5680 	total_reg_rules += num_5g_reg_rules;
5681 
5682 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
5683 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
5684 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
5685 		kfree(tb);
5686 		return -EINVAL;
5687 	}
5688 
5689 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5690 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
5691 
5692 		if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
5693 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
5694 				    i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
5695 			kfree(tb);
5696 			return -EINVAL;
5697 		}
5698 
5699 		total_reg_rules += num_6g_reg_rules_ap[i];
5700 	}
5701 
5702 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5703 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5704 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5705 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5706 
5707 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5708 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5709 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5710 
5711 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5712 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5713 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5714 
5715 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
5716 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
5717 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6GHZ_REG_RULES) {
5718 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
5719 				    i);
5720 			kfree(tb);
5721 			return -EINVAL;
5722 		}
5723 	}
5724 
5725 	if (!total_reg_rules) {
5726 		ath12k_warn(ab, "No reg rules available\n");
5727 		kfree(tb);
5728 		return -EINVAL;
5729 	}
5730 
5731 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
5732 
5733 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
5734 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
5735 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
5736 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
5737 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
5738 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
5739 
5740 	switch (le32_to_cpu(ev->status_code)) {
5741 	case WMI_REG_SET_CC_STATUS_PASS:
5742 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
5743 		break;
5744 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
5745 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
5746 		break;
5747 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
5748 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
5749 		break;
5750 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
5751 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
5752 		break;
5753 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
5754 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
5755 		break;
5756 	case WMI_REG_SET_CC_STATUS_FAIL:
5757 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
5758 		break;
5759 	}
5760 
5761 	reg_info->is_ext_reg_event = true;
5762 
5763 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
5764 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
5765 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
5766 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
5767 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
5768 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
5769 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
5770 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
5771 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
5772 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
5773 
5774 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5775 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5776 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
5777 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5778 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
5779 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5780 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
5781 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5782 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
5783 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
5784 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
5785 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
5786 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
5787 	}
5788 
5789 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5790 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
5791 		   __func__, reg_info->alpha2, reg_info->dfs_region,
5792 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
5793 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
5794 		   reg_info->phybitmap);
5795 
5796 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5797 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
5798 		   num_2g_reg_rules, num_5g_reg_rules);
5799 
5800 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5801 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
5802 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
5803 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
5804 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
5805 
5806 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5807 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5808 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
5809 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
5810 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
5811 
5812 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5813 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5814 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
5815 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
5816 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
5817 
5818 	ext_wmi_reg_rule =
5819 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
5820 			+ sizeof(*ev)
5821 			+ sizeof(struct wmi_tlv));
5822 
5823 	if (num_2g_reg_rules) {
5824 		reg_info->reg_rules_2g_ptr =
5825 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
5826 						      ext_wmi_reg_rule);
5827 
5828 		if (!reg_info->reg_rules_2g_ptr) {
5829 			kfree(tb);
5830 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
5831 			return -ENOMEM;
5832 		}
5833 	}
5834 
5835 	ext_wmi_reg_rule += num_2g_reg_rules;
5836 
5837 	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
5838 	 * for few countries along with separate 6 GHz rule.
5839 	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
5840 	 * causes intersect check to be true, and same rules will be
5841 	 * shown multiple times in iw cmd.
5842 	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
5843 	 */
5844 	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
5845 								       num_5g_reg_rules);
5846 
5847 	if (num_invalid_5ghz_ext_rules) {
5848 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5849 			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
5850 			   reg_info->alpha2, reg_info->num_5g_reg_rules,
5851 			   num_invalid_5ghz_ext_rules);
5852 
5853 		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
5854 		reg_info->num_5g_reg_rules = num_5g_reg_rules;
5855 	}
5856 
5857 	if (num_5g_reg_rules) {
5858 		reg_info->reg_rules_5g_ptr =
5859 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
5860 						      ext_wmi_reg_rule);
5861 
5862 		if (!reg_info->reg_rules_5g_ptr) {
5863 			kfree(tb);
5864 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5865 			return -ENOMEM;
5866 		}
5867 	}
5868 
5869 	/* We have adjusted the number of 5 GHz reg rules above. But still those
5870 	 * many rules needs to be adjusted in ext_wmi_reg_rule.
5871 	 *
5872 	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
5873 	 */
5874 	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
5875 
5876 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5877 		reg_info->reg_rules_6g_ap_ptr[i] =
5878 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
5879 						      ext_wmi_reg_rule);
5880 
5881 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
5882 			kfree(tb);
5883 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
5884 			return -ENOMEM;
5885 		}
5886 
5887 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
5888 	}
5889 
5890 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
5891 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5892 			reg_info->reg_rules_6g_client_ptr[j][i] =
5893 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
5894 							      ext_wmi_reg_rule);
5895 
5896 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
5897 				kfree(tb);
5898 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
5899 				return -ENOMEM;
5900 			}
5901 
5902 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
5903 		}
5904 	}
5905 
5906 	reg_info->client_type = le32_to_cpu(ev->client_type);
5907 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
5908 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
5909 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
5910 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
5911 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
5912 		le32_to_cpu(ev->domain_code_6g_ap_sp);
5913 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
5914 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
5915 
5916 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5917 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5918 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5919 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5920 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5921 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5922 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5923 	}
5924 
5925 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5926 
5927 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5928 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
5929 
5930 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5931 
5932 	kfree(tb);
5933 	return 0;
5934 }
5935 
5936 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5937 					struct wmi_peer_delete_resp_event *peer_del_resp)
5938 {
5939 	const void **tb;
5940 	const struct wmi_peer_delete_resp_event *ev;
5941 	int ret;
5942 
5943 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5944 	if (IS_ERR(tb)) {
5945 		ret = PTR_ERR(tb);
5946 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5947 		return ret;
5948 	}
5949 
5950 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5951 	if (!ev) {
5952 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
5953 		kfree(tb);
5954 		return -EPROTO;
5955 	}
5956 
5957 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5958 
5959 	peer_del_resp->vdev_id = ev->vdev_id;
5960 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5961 			ev->peer_macaddr.addr);
5962 
5963 	kfree(tb);
5964 	return 0;
5965 }
5966 
5967 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5968 					struct sk_buff *skb,
5969 					u32 *vdev_id)
5970 {
5971 	const void **tb;
5972 	const struct wmi_vdev_delete_resp_event *ev;
5973 	int ret;
5974 
5975 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5976 	if (IS_ERR(tb)) {
5977 		ret = PTR_ERR(tb);
5978 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5979 		return ret;
5980 	}
5981 
5982 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5983 	if (!ev) {
5984 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5985 		kfree(tb);
5986 		return -EPROTO;
5987 	}
5988 
5989 	*vdev_id = le32_to_cpu(ev->vdev_id);
5990 
5991 	kfree(tb);
5992 	return 0;
5993 }
5994 
5995 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5996 					struct sk_buff *skb,
5997 					u32 *vdev_id, u32 *tx_status)
5998 {
5999 	const void **tb;
6000 	const struct wmi_bcn_tx_status_event *ev;
6001 	int ret;
6002 
6003 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6004 	if (IS_ERR(tb)) {
6005 		ret = PTR_ERR(tb);
6006 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6007 		return ret;
6008 	}
6009 
6010 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
6011 	if (!ev) {
6012 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
6013 		kfree(tb);
6014 		return -EPROTO;
6015 	}
6016 
6017 	*vdev_id = le32_to_cpu(ev->vdev_id);
6018 	*tx_status = le32_to_cpu(ev->tx_status);
6019 
6020 	kfree(tb);
6021 	return 0;
6022 }
6023 
6024 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
6025 					      u32 *vdev_id)
6026 {
6027 	const void **tb;
6028 	const struct wmi_vdev_stopped_event *ev;
6029 	int ret;
6030 
6031 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6032 	if (IS_ERR(tb)) {
6033 		ret = PTR_ERR(tb);
6034 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6035 		return ret;
6036 	}
6037 
6038 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
6039 	if (!ev) {
6040 		ath12k_warn(ab, "failed to fetch vdev stop ev");
6041 		kfree(tb);
6042 		return -EPROTO;
6043 	}
6044 
6045 	*vdev_id = le32_to_cpu(ev->vdev_id);
6046 
6047 	kfree(tb);
6048 	return 0;
6049 }
6050 
6051 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
6052 					u16 tag, u16 len,
6053 					const void *ptr, void *data)
6054 {
6055 	struct wmi_tlv_mgmt_rx_parse *parse = data;
6056 
6057 	switch (tag) {
6058 	case WMI_TAG_MGMT_RX_HDR:
6059 		parse->fixed = ptr;
6060 		break;
6061 	case WMI_TAG_ARRAY_BYTE:
6062 		if (!parse->frame_buf_done) {
6063 			parse->frame_buf = ptr;
6064 			parse->frame_buf_done = true;
6065 		}
6066 		break;
6067 	}
6068 	return 0;
6069 }
6070 
6071 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
6072 					  struct sk_buff *skb,
6073 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
6074 {
6075 	struct wmi_tlv_mgmt_rx_parse parse = { };
6076 	const struct ath12k_wmi_mgmt_rx_params *ev;
6077 	const u8 *frame;
6078 	int i, ret;
6079 
6080 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6081 				  ath12k_wmi_tlv_mgmt_rx_parse,
6082 				  &parse);
6083 	if (ret) {
6084 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
6085 		return ret;
6086 	}
6087 
6088 	ev = parse.fixed;
6089 	frame = parse.frame_buf;
6090 
6091 	if (!ev || !frame) {
6092 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
6093 		return -EPROTO;
6094 	}
6095 
6096 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
6097 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
6098 	hdr->channel = le32_to_cpu(ev->channel);
6099 	hdr->snr = le32_to_cpu(ev->snr);
6100 	hdr->rate = le32_to_cpu(ev->rate);
6101 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
6102 	hdr->buf_len = le32_to_cpu(ev->buf_len);
6103 	hdr->status = le32_to_cpu(ev->status);
6104 	hdr->flags = le32_to_cpu(ev->flags);
6105 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
6106 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
6107 
6108 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
6109 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
6110 
6111 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
6112 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
6113 		return -EPROTO;
6114 	}
6115 
6116 	/* shift the sk_buff to point to `frame` */
6117 	skb_trim(skb, 0);
6118 	skb_put(skb, frame - skb->data);
6119 	skb_pull(skb, frame - skb->data);
6120 	skb_put(skb, hdr->buf_len);
6121 
6122 	return 0;
6123 }
6124 
6125 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
6126 				    u32 status, u32 ack_rssi)
6127 {
6128 	struct sk_buff *msdu;
6129 	struct ieee80211_tx_info *info;
6130 	struct ath12k_skb_cb *skb_cb;
6131 	int num_mgmt;
6132 
6133 	spin_lock_bh(&ar->txmgmt_idr_lock);
6134 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
6135 
6136 	if (!msdu) {
6137 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
6138 			    desc_id);
6139 		spin_unlock_bh(&ar->txmgmt_idr_lock);
6140 		return -ENOENT;
6141 	}
6142 
6143 	idr_remove(&ar->txmgmt_idr, desc_id);
6144 	spin_unlock_bh(&ar->txmgmt_idr_lock);
6145 
6146 	skb_cb = ATH12K_SKB_CB(msdu);
6147 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
6148 
6149 	info = IEEE80211_SKB_CB(msdu);
6150 	memset(&info->status, 0, sizeof(info->status));
6151 
6152 	/* skip tx rate update from ieee80211_status*/
6153 	info->status.rates[0].idx = -1;
6154 
6155 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) {
6156 		info->flags |= IEEE80211_TX_STAT_ACK;
6157 		info->status.ack_signal = ack_rssi;
6158 		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
6159 	}
6160 
6161 	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
6162 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
6163 
6164 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
6165 
6166 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
6167 
6168 	/* WARN when we received this event without doing any mgmt tx */
6169 	if (num_mgmt < 0)
6170 		WARN_ON_ONCE(1);
6171 
6172 	if (!num_mgmt)
6173 		wake_up(&ar->txmgmt_empty_waitq);
6174 
6175 	return 0;
6176 }
6177 
6178 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
6179 					       struct sk_buff *skb,
6180 					       struct wmi_mgmt_tx_compl_event *param)
6181 {
6182 	const void **tb;
6183 	const struct wmi_mgmt_tx_compl_event *ev;
6184 	int ret;
6185 
6186 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6187 	if (IS_ERR(tb)) {
6188 		ret = PTR_ERR(tb);
6189 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6190 		return ret;
6191 	}
6192 
6193 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
6194 	if (!ev) {
6195 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
6196 		kfree(tb);
6197 		return -EPROTO;
6198 	}
6199 
6200 	param->pdev_id = ev->pdev_id;
6201 	param->desc_id = ev->desc_id;
6202 	param->status = ev->status;
6203 	param->ppdu_id = ev->ppdu_id;
6204 	param->ack_rssi = ev->ack_rssi;
6205 
6206 	kfree(tb);
6207 	return 0;
6208 }
6209 
6210 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
6211 {
6212 	lockdep_assert_held(&ar->data_lock);
6213 
6214 	switch (ar->scan.state) {
6215 	case ATH12K_SCAN_IDLE:
6216 	case ATH12K_SCAN_RUNNING:
6217 	case ATH12K_SCAN_ABORTING:
6218 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
6219 			    ath12k_scan_state_str(ar->scan.state),
6220 			    ar->scan.state);
6221 		break;
6222 	case ATH12K_SCAN_STARTING:
6223 		ar->scan.state = ATH12K_SCAN_RUNNING;
6224 
6225 		if (ar->scan.is_roc)
6226 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
6227 
6228 		complete(&ar->scan.started);
6229 		break;
6230 	}
6231 }
6232 
6233 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
6234 {
6235 	lockdep_assert_held(&ar->data_lock);
6236 
6237 	switch (ar->scan.state) {
6238 	case ATH12K_SCAN_IDLE:
6239 	case ATH12K_SCAN_RUNNING:
6240 	case ATH12K_SCAN_ABORTING:
6241 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
6242 			    ath12k_scan_state_str(ar->scan.state),
6243 			    ar->scan.state);
6244 		break;
6245 	case ATH12K_SCAN_STARTING:
6246 		complete(&ar->scan.started);
6247 		__ath12k_mac_scan_finish(ar);
6248 		break;
6249 	}
6250 }
6251 
6252 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
6253 {
6254 	lockdep_assert_held(&ar->data_lock);
6255 
6256 	switch (ar->scan.state) {
6257 	case ATH12K_SCAN_IDLE:
6258 	case ATH12K_SCAN_STARTING:
6259 		/* One suspected reason scan can be completed while starting is
6260 		 * if firmware fails to deliver all scan events to the host,
6261 		 * e.g. when transport pipe is full. This has been observed
6262 		 * with spectral scan phyerr events starving wmi transport
6263 		 * pipe. In such case the "scan completed" event should be (and
6264 		 * is) ignored by the host as it may be just firmware's scan
6265 		 * state machine recovering.
6266 		 */
6267 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
6268 			    ath12k_scan_state_str(ar->scan.state),
6269 			    ar->scan.state);
6270 		break;
6271 	case ATH12K_SCAN_RUNNING:
6272 	case ATH12K_SCAN_ABORTING:
6273 		__ath12k_mac_scan_finish(ar);
6274 		break;
6275 	}
6276 }
6277 
6278 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
6279 {
6280 	lockdep_assert_held(&ar->data_lock);
6281 
6282 	switch (ar->scan.state) {
6283 	case ATH12K_SCAN_IDLE:
6284 	case ATH12K_SCAN_STARTING:
6285 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
6286 			    ath12k_scan_state_str(ar->scan.state),
6287 			    ar->scan.state);
6288 		break;
6289 	case ATH12K_SCAN_RUNNING:
6290 	case ATH12K_SCAN_ABORTING:
6291 		ar->scan_channel = NULL;
6292 		break;
6293 	}
6294 }
6295 
6296 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
6297 {
6298 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6299 
6300 	lockdep_assert_held(&ar->data_lock);
6301 
6302 	switch (ar->scan.state) {
6303 	case ATH12K_SCAN_IDLE:
6304 	case ATH12K_SCAN_STARTING:
6305 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
6306 			    ath12k_scan_state_str(ar->scan.state),
6307 			    ar->scan.state);
6308 		break;
6309 	case ATH12K_SCAN_RUNNING:
6310 	case ATH12K_SCAN_ABORTING:
6311 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
6312 
6313 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
6314 			complete(&ar->scan.on_channel);
6315 
6316 		break;
6317 	}
6318 }
6319 
6320 static const char *
6321 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
6322 			       enum wmi_scan_completion_reason reason)
6323 {
6324 	switch (type) {
6325 	case WMI_SCAN_EVENT_STARTED:
6326 		return "started";
6327 	case WMI_SCAN_EVENT_COMPLETED:
6328 		switch (reason) {
6329 		case WMI_SCAN_REASON_COMPLETED:
6330 			return "completed";
6331 		case WMI_SCAN_REASON_CANCELLED:
6332 			return "completed [cancelled]";
6333 		case WMI_SCAN_REASON_PREEMPTED:
6334 			return "completed [preempted]";
6335 		case WMI_SCAN_REASON_TIMEDOUT:
6336 			return "completed [timedout]";
6337 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
6338 			return "completed [internal err]";
6339 		case WMI_SCAN_REASON_MAX:
6340 			break;
6341 		}
6342 		return "completed [unknown]";
6343 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6344 		return "bss channel";
6345 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6346 		return "foreign channel";
6347 	case WMI_SCAN_EVENT_DEQUEUED:
6348 		return "dequeued";
6349 	case WMI_SCAN_EVENT_PREEMPTED:
6350 		return "preempted";
6351 	case WMI_SCAN_EVENT_START_FAILED:
6352 		return "start failed";
6353 	case WMI_SCAN_EVENT_RESTARTED:
6354 		return "restarted";
6355 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6356 		return "foreign channel exit";
6357 	default:
6358 		return "unknown";
6359 	}
6360 }
6361 
6362 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
6363 			       struct wmi_scan_event *scan_evt_param)
6364 {
6365 	const void **tb;
6366 	const struct wmi_scan_event *ev;
6367 	int ret;
6368 
6369 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6370 	if (IS_ERR(tb)) {
6371 		ret = PTR_ERR(tb);
6372 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6373 		return ret;
6374 	}
6375 
6376 	ev = tb[WMI_TAG_SCAN_EVENT];
6377 	if (!ev) {
6378 		ath12k_warn(ab, "failed to fetch scan ev");
6379 		kfree(tb);
6380 		return -EPROTO;
6381 	}
6382 
6383 	scan_evt_param->event_type = ev->event_type;
6384 	scan_evt_param->reason = ev->reason;
6385 	scan_evt_param->channel_freq = ev->channel_freq;
6386 	scan_evt_param->scan_req_id = ev->scan_req_id;
6387 	scan_evt_param->scan_id = ev->scan_id;
6388 	scan_evt_param->vdev_id = ev->vdev_id;
6389 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
6390 
6391 	kfree(tb);
6392 	return 0;
6393 }
6394 
6395 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
6396 					   struct wmi_peer_sta_kickout_arg *arg)
6397 {
6398 	const void **tb;
6399 	const struct wmi_peer_sta_kickout_event *ev;
6400 	int ret;
6401 
6402 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6403 	if (IS_ERR(tb)) {
6404 		ret = PTR_ERR(tb);
6405 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6406 		return ret;
6407 	}
6408 
6409 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
6410 	if (!ev) {
6411 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
6412 		kfree(tb);
6413 		return -EPROTO;
6414 	}
6415 
6416 	arg->mac_addr = ev->peer_macaddr.addr;
6417 
6418 	kfree(tb);
6419 	return 0;
6420 }
6421 
6422 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
6423 			       struct wmi_roam_event *roam_ev)
6424 {
6425 	const void **tb;
6426 	const struct wmi_roam_event *ev;
6427 	int ret;
6428 
6429 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6430 	if (IS_ERR(tb)) {
6431 		ret = PTR_ERR(tb);
6432 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6433 		return ret;
6434 	}
6435 
6436 	ev = tb[WMI_TAG_ROAM_EVENT];
6437 	if (!ev) {
6438 		ath12k_warn(ab, "failed to fetch roam ev");
6439 		kfree(tb);
6440 		return -EPROTO;
6441 	}
6442 
6443 	roam_ev->vdev_id = ev->vdev_id;
6444 	roam_ev->reason = ev->reason;
6445 	roam_ev->rssi = ev->rssi;
6446 
6447 	kfree(tb);
6448 	return 0;
6449 }
6450 
6451 static int freq_to_idx(struct ath12k *ar, int freq)
6452 {
6453 	struct ieee80211_supported_band *sband;
6454 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6455 	int band, ch, idx = 0;
6456 
6457 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6458 		if (!ar->mac.sbands[band].channels)
6459 			continue;
6460 
6461 		sband = hw->wiphy->bands[band];
6462 		if (!sband)
6463 			continue;
6464 
6465 		for (ch = 0; ch < sband->n_channels; ch++, idx++) {
6466 			if (sband->channels[ch].center_freq <
6467 			    KHZ_TO_MHZ(ar->freq_range.start_freq) ||
6468 			    sband->channels[ch].center_freq >
6469 			    KHZ_TO_MHZ(ar->freq_range.end_freq))
6470 				continue;
6471 
6472 			if (sband->channels[ch].center_freq == freq)
6473 				goto exit;
6474 		}
6475 	}
6476 
6477 exit:
6478 	return idx;
6479 }
6480 
6481 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6482 				    struct wmi_chan_info_event *ch_info_ev)
6483 {
6484 	const void **tb;
6485 	const struct wmi_chan_info_event *ev;
6486 	int ret;
6487 
6488 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6489 	if (IS_ERR(tb)) {
6490 		ret = PTR_ERR(tb);
6491 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6492 		return ret;
6493 	}
6494 
6495 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
6496 	if (!ev) {
6497 		ath12k_warn(ab, "failed to fetch chan info ev");
6498 		kfree(tb);
6499 		return -EPROTO;
6500 	}
6501 
6502 	ch_info_ev->err_code = ev->err_code;
6503 	ch_info_ev->freq = ev->freq;
6504 	ch_info_ev->cmd_flags = ev->cmd_flags;
6505 	ch_info_ev->noise_floor = ev->noise_floor;
6506 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
6507 	ch_info_ev->cycle_count = ev->cycle_count;
6508 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
6509 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
6510 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
6511 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
6512 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
6513 	ch_info_ev->vdev_id = ev->vdev_id;
6514 
6515 	kfree(tb);
6516 	return 0;
6517 }
6518 
6519 static int
6520 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6521 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
6522 {
6523 	const void **tb;
6524 	const struct wmi_pdev_bss_chan_info_event *ev;
6525 	int ret;
6526 
6527 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6528 	if (IS_ERR(tb)) {
6529 		ret = PTR_ERR(tb);
6530 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6531 		return ret;
6532 	}
6533 
6534 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
6535 	if (!ev) {
6536 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
6537 		kfree(tb);
6538 		return -EPROTO;
6539 	}
6540 
6541 	bss_ch_info_ev->pdev_id = ev->pdev_id;
6542 	bss_ch_info_ev->freq = ev->freq;
6543 	bss_ch_info_ev->noise_floor = ev->noise_floor;
6544 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
6545 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
6546 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
6547 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
6548 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
6549 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
6550 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
6551 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
6552 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
6553 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
6554 
6555 	kfree(tb);
6556 	return 0;
6557 }
6558 
6559 static int
6560 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
6561 				      struct wmi_vdev_install_key_complete_arg *arg)
6562 {
6563 	const void **tb;
6564 	const struct wmi_vdev_install_key_compl_event *ev;
6565 	int ret;
6566 
6567 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6568 	if (IS_ERR(tb)) {
6569 		ret = PTR_ERR(tb);
6570 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6571 		return ret;
6572 	}
6573 
6574 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
6575 	if (!ev) {
6576 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
6577 		kfree(tb);
6578 		return -EPROTO;
6579 	}
6580 
6581 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
6582 	arg->macaddr = ev->peer_macaddr.addr;
6583 	arg->key_idx = le32_to_cpu(ev->key_idx);
6584 	arg->key_flags = le32_to_cpu(ev->key_flags);
6585 	arg->status = le32_to_cpu(ev->status);
6586 
6587 	kfree(tb);
6588 	return 0;
6589 }
6590 
6591 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
6592 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
6593 {
6594 	const void **tb;
6595 	const struct wmi_peer_assoc_conf_event *ev;
6596 	int ret;
6597 
6598 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6599 	if (IS_ERR(tb)) {
6600 		ret = PTR_ERR(tb);
6601 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6602 		return ret;
6603 	}
6604 
6605 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
6606 	if (!ev) {
6607 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
6608 		kfree(tb);
6609 		return -EPROTO;
6610 	}
6611 
6612 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
6613 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
6614 
6615 	kfree(tb);
6616 	return 0;
6617 }
6618 
6619 static int
6620 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
6621 			 const struct wmi_pdev_temperature_event *ev)
6622 {
6623 	const void **tb;
6624 	int ret;
6625 
6626 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6627 	if (IS_ERR(tb)) {
6628 		ret = PTR_ERR(tb);
6629 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6630 		return ret;
6631 	}
6632 
6633 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
6634 	if (!ev) {
6635 		ath12k_warn(ab, "failed to fetch pdev temp ev");
6636 		kfree(tb);
6637 		return -EPROTO;
6638 	}
6639 
6640 	kfree(tb);
6641 	return 0;
6642 }
6643 
6644 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
6645 {
6646 	/* try to send pending beacons first. they take priority */
6647 	wake_up(&ab->wmi_ab.tx_credits_wq);
6648 }
6649 
6650 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
6651 {
6652 	const struct wmi_11d_new_cc_event *ev;
6653 	struct ath12k *ar;
6654 	struct ath12k_pdev *pdev;
6655 	const void **tb;
6656 	int ret, i;
6657 
6658 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6659 	if (IS_ERR(tb)) {
6660 		ret = PTR_ERR(tb);
6661 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6662 		return ret;
6663 	}
6664 
6665 	ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
6666 	if (!ev) {
6667 		kfree(tb);
6668 		ath12k_warn(ab, "failed to fetch 11d new cc ev");
6669 		return -EPROTO;
6670 	}
6671 
6672 	spin_lock_bh(&ab->base_lock);
6673 	memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
6674 	spin_unlock_bh(&ab->base_lock);
6675 
6676 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
6677 		   ab->new_alpha2[0],
6678 		   ab->new_alpha2[1]);
6679 
6680 	kfree(tb);
6681 
6682 	for (i = 0; i < ab->num_radios; i++) {
6683 		pdev = &ab->pdevs[i];
6684 		ar = pdev->ar;
6685 		ar->state_11d = ATH12K_11D_IDLE;
6686 		ar->ah->regd_updated = false;
6687 		complete(&ar->completed_11d_scan);
6688 	}
6689 
6690 	queue_work(ab->workqueue, &ab->update_11d_work);
6691 
6692 	return 0;
6693 }
6694 
6695 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
6696 				       struct sk_buff *skb)
6697 {
6698 	dev_kfree_skb(skb);
6699 }
6700 
6701 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
6702 {
6703 	struct ath12k_reg_info *reg_info;
6704 	struct ath12k *ar = NULL;
6705 	u8 pdev_idx = 255;
6706 	int ret;
6707 
6708 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
6709 	if (!reg_info) {
6710 		ret = -ENOMEM;
6711 		goto fallback;
6712 	}
6713 
6714 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
6715 	if (ret) {
6716 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
6717 		goto mem_free;
6718 	}
6719 
6720 	ret = ath12k_reg_validate_reg_info(ab, reg_info);
6721 	if (ret == ATH12K_REG_STATUS_FALLBACK) {
6722 		ath12k_warn(ab, "failed to validate reg info %d\n", ret);
6723 		/* firmware has successfully switches to new regd but host can not
6724 		 * continue, so free reginfo and fallback to old regd
6725 		 */
6726 		goto mem_free;
6727 	} else if (ret == ATH12K_REG_STATUS_DROP) {
6728 		/* reg info is valid but we will not store it and
6729 		 * not going to create new regd for it
6730 		 */
6731 		ret = ATH12K_REG_STATUS_VALID;
6732 		goto mem_free;
6733 	}
6734 
6735 	/* free old reg_info if it exist */
6736 	pdev_idx = reg_info->phy_id;
6737 	if (ab->reg_info[pdev_idx]) {
6738 		ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
6739 		kfree(ab->reg_info[pdev_idx]);
6740 	}
6741 	/* reg_info is valid, we store it for later use
6742 	 * even below regd build failed
6743 	 */
6744 	ab->reg_info[pdev_idx] = reg_info;
6745 
6746 	ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
6747 					  IEEE80211_REG_UNSET_AP);
6748 	if (ret) {
6749 		ath12k_warn(ab, "failed to handle chan list %d\n", ret);
6750 		goto fallback;
6751 	}
6752 
6753 	goto out;
6754 
6755 mem_free:
6756 	ath12k_reg_reset_reg_info(reg_info);
6757 	kfree(reg_info);
6758 
6759 	if (ret == ATH12K_REG_STATUS_VALID)
6760 		goto out;
6761 
6762 fallback:
6763 	/* Fallback to older reg (by sending previous country setting
6764 	 * again if fw has succeeded and we failed to process here.
6765 	 * The Regdomain should be uniform across driver and fw. Since the
6766 	 * FW has processed the command and sent a success status, we expect
6767 	 * this function to succeed as well. If it doesn't, CTRY needs to be
6768 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
6769 	 */
6770 	/* TODO: This is rare, but still should also be handled */
6771 	WARN_ON(1);
6772 
6773 out:
6774 	/* In some error cases, even a valid pdev_idx might not be available */
6775 	if (pdev_idx != 255)
6776 		ar = ab->pdevs[pdev_idx].ar;
6777 
6778 	/* During the boot-time update, 'ar' might not be allocated,
6779 	 * so the completion cannot be marked at that point.
6780 	 * This boot-time update is handled in ath12k_mac_hw_register()
6781 	 * before registering the hardware.
6782 	 */
6783 	if (ar)
6784 		complete_all(&ar->regd_update_completed);
6785 
6786 	return ret;
6787 }
6788 
6789 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
6790 				const void *ptr, void *data)
6791 {
6792 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
6793 	struct wmi_ready_event fixed_param;
6794 	struct ath12k_wmi_mac_addr_params *addr_list;
6795 	struct ath12k_pdev *pdev;
6796 	u32 num_mac_addr;
6797 	int i;
6798 
6799 	switch (tag) {
6800 	case WMI_TAG_READY_EVENT:
6801 		memset(&fixed_param, 0, sizeof(fixed_param));
6802 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
6803 		       min_t(u16, sizeof(fixed_param), len));
6804 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
6805 		rdy_parse->num_extra_mac_addr =
6806 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
6807 
6808 		ether_addr_copy(ab->mac_addr,
6809 				fixed_param.ready_event_min.mac_addr.addr);
6810 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
6811 		ab->wmi_ready = true;
6812 		break;
6813 	case WMI_TAG_ARRAY_FIXED_STRUCT:
6814 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
6815 		num_mac_addr = rdy_parse->num_extra_mac_addr;
6816 
6817 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6818 			break;
6819 
6820 		for (i = 0; i < ab->num_radios; i++) {
6821 			pdev = &ab->pdevs[i];
6822 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6823 		}
6824 		ab->pdevs_macaddr_valid = true;
6825 		break;
6826 	default:
6827 		break;
6828 	}
6829 
6830 	return 0;
6831 }
6832 
6833 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
6834 {
6835 	struct ath12k_wmi_rdy_parse rdy_parse = { };
6836 	int ret;
6837 
6838 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6839 				  ath12k_wmi_rdy_parse, &rdy_parse);
6840 	if (ret) {
6841 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
6842 		return ret;
6843 	}
6844 
6845 	complete(&ab->wmi_ab.unified_ready);
6846 	return 0;
6847 }
6848 
6849 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6850 {
6851 	struct wmi_peer_delete_resp_event peer_del_resp;
6852 	struct ath12k *ar;
6853 
6854 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6855 		ath12k_warn(ab, "failed to extract peer delete resp");
6856 		return;
6857 	}
6858 
6859 	rcu_read_lock();
6860 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
6861 	if (!ar) {
6862 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6863 			    peer_del_resp.vdev_id);
6864 		rcu_read_unlock();
6865 		return;
6866 	}
6867 
6868 	complete(&ar->peer_delete_done);
6869 	rcu_read_unlock();
6870 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6871 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6872 }
6873 
6874 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
6875 					  struct sk_buff *skb)
6876 {
6877 	struct ath12k *ar;
6878 	u32 vdev_id = 0;
6879 
6880 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6881 		ath12k_warn(ab, "failed to extract vdev delete resp");
6882 		return;
6883 	}
6884 
6885 	rcu_read_lock();
6886 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6887 	if (!ar) {
6888 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6889 			    vdev_id);
6890 		rcu_read_unlock();
6891 		return;
6892 	}
6893 
6894 	complete(&ar->vdev_delete_done);
6895 
6896 	rcu_read_unlock();
6897 
6898 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6899 		   vdev_id);
6900 }
6901 
6902 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
6903 {
6904 	switch (vdev_resp_status) {
6905 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6906 		return "invalid vdev id";
6907 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6908 		return "not supported";
6909 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6910 		return "dfs violation";
6911 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6912 		return "invalid regdomain";
6913 	default:
6914 		return "unknown";
6915 	}
6916 }
6917 
6918 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6919 {
6920 	struct wmi_vdev_start_resp_event vdev_start_resp;
6921 	struct ath12k *ar;
6922 	u32 status;
6923 
6924 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6925 		ath12k_warn(ab, "failed to extract vdev start resp");
6926 		return;
6927 	}
6928 
6929 	rcu_read_lock();
6930 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
6931 	if (!ar) {
6932 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6933 			    vdev_start_resp.vdev_id);
6934 		rcu_read_unlock();
6935 		return;
6936 	}
6937 
6938 	ar->last_wmi_vdev_start_status = 0;
6939 
6940 	status = le32_to_cpu(vdev_start_resp.status);
6941 	if (WARN_ON_ONCE(status)) {
6942 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6943 			    status, ath12k_wmi_vdev_resp_print(status));
6944 		ar->last_wmi_vdev_start_status = status;
6945 	}
6946 
6947 	ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
6948 
6949 	complete(&ar->vdev_setup_done);
6950 
6951 	rcu_read_unlock();
6952 
6953 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6954 		   vdev_start_resp.vdev_id);
6955 }
6956 
6957 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6958 {
6959 	u32 vdev_id, tx_status;
6960 
6961 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6962 		ath12k_warn(ab, "failed to extract bcn tx status");
6963 		return;
6964 	}
6965 }
6966 
6967 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6968 {
6969 	struct ath12k *ar;
6970 	u32 vdev_id = 0;
6971 
6972 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6973 		ath12k_warn(ab, "failed to extract vdev stopped event");
6974 		return;
6975 	}
6976 
6977 	rcu_read_lock();
6978 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6979 	if (!ar) {
6980 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6981 			    vdev_id);
6982 		rcu_read_unlock();
6983 		return;
6984 	}
6985 
6986 	complete(&ar->vdev_setup_done);
6987 
6988 	rcu_read_unlock();
6989 
6990 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6991 }
6992 
6993 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6994 {
6995 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6996 	struct ath12k *ar;
6997 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6998 	struct ieee80211_hdr *hdr;
6999 	u16 fc;
7000 	struct ieee80211_supported_band *sband;
7001 	s32 noise_floor;
7002 
7003 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
7004 		ath12k_warn(ab, "failed to extract mgmt rx event");
7005 		dev_kfree_skb(skb);
7006 		return;
7007 	}
7008 
7009 	memset(status, 0, sizeof(*status));
7010 
7011 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
7012 		   rx_ev.status);
7013 
7014 	rcu_read_lock();
7015 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
7016 
7017 	if (!ar) {
7018 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
7019 			    rx_ev.pdev_id);
7020 		dev_kfree_skb(skb);
7021 		goto exit;
7022 	}
7023 
7024 	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
7025 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
7026 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
7027 			     WMI_RX_STATUS_ERR_CRC))) {
7028 		dev_kfree_skb(skb);
7029 		goto exit;
7030 	}
7031 
7032 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
7033 		status->flag |= RX_FLAG_MMIC_ERROR;
7034 
7035 	if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
7036 	    rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
7037 		status->band = NL80211_BAND_6GHZ;
7038 		status->freq = rx_ev.chan_freq;
7039 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
7040 		status->band = NL80211_BAND_2GHZ;
7041 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
7042 		status->band = NL80211_BAND_5GHZ;
7043 	} else {
7044 		/* Shouldn't happen unless list of advertised channels to
7045 		 * mac80211 has been changed.
7046 		 */
7047 		WARN_ON_ONCE(1);
7048 		dev_kfree_skb(skb);
7049 		goto exit;
7050 	}
7051 
7052 	if (rx_ev.phy_mode == MODE_11B &&
7053 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
7054 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7055 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
7056 
7057 	sband = &ar->mac.sbands[status->band];
7058 
7059 	if (status->band != NL80211_BAND_6GHZ)
7060 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
7061 							      status->band);
7062 
7063 	spin_lock_bh(&ar->data_lock);
7064 	noise_floor = ath12k_pdev_get_noise_floor(ar);
7065 	spin_unlock_bh(&ar->data_lock);
7066 
7067 	status->signal = rx_ev.snr + noise_floor;
7068 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
7069 
7070 	hdr = (struct ieee80211_hdr *)skb->data;
7071 	fc = le16_to_cpu(hdr->frame_control);
7072 
7073 	/* Firmware is guaranteed to report all essential management frames via
7074 	 * WMI while it can deliver some extra via HTT. Since there can be
7075 	 * duplicates split the reporting wrt monitor/sniffing.
7076 	 */
7077 	status->flag |= RX_FLAG_SKIP_MONITOR;
7078 
7079 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
7080 	 * including group privacy action frames.
7081 	 */
7082 	if (ieee80211_has_protected(hdr->frame_control)) {
7083 		status->flag |= RX_FLAG_DECRYPTED;
7084 
7085 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
7086 			status->flag |= RX_FLAG_IV_STRIPPED |
7087 					RX_FLAG_MMIC_STRIPPED;
7088 			hdr->frame_control = __cpu_to_le16(fc &
7089 					     ~IEEE80211_FCTL_PROTECTED);
7090 		}
7091 	}
7092 
7093 	if (ieee80211_is_beacon(hdr->frame_control))
7094 		ath12k_mac_handle_beacon(ar, skb);
7095 
7096 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7097 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
7098 		   skb, skb->len,
7099 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
7100 
7101 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7102 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
7103 		   status->freq, status->band, status->signal,
7104 		   status->rate_idx);
7105 
7106 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
7107 
7108 exit:
7109 	rcu_read_unlock();
7110 }
7111 
7112 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
7113 {
7114 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
7115 	struct ath12k *ar;
7116 
7117 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
7118 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
7119 		return;
7120 	}
7121 
7122 	rcu_read_lock();
7123 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
7124 	if (!ar) {
7125 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
7126 			    tx_compl_param.pdev_id);
7127 		goto exit;
7128 	}
7129 
7130 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
7131 				 le32_to_cpu(tx_compl_param.status),
7132 				 le32_to_cpu(tx_compl_param.ack_rssi));
7133 
7134 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7135 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
7136 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
7137 		   tx_compl_param.status);
7138 
7139 exit:
7140 	rcu_read_unlock();
7141 }
7142 
7143 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
7144 						  u32 vdev_id,
7145 						  enum ath12k_scan_state state)
7146 {
7147 	int i;
7148 	struct ath12k_pdev *pdev;
7149 	struct ath12k *ar;
7150 
7151 	for (i = 0; i < ab->num_radios; i++) {
7152 		pdev = rcu_dereference(ab->pdevs_active[i]);
7153 		if (pdev && pdev->ar) {
7154 			ar = pdev->ar;
7155 
7156 			spin_lock_bh(&ar->data_lock);
7157 			if (ar->scan.state == state &&
7158 			    ar->scan.arvif &&
7159 			    ar->scan.arvif->vdev_id == vdev_id) {
7160 				spin_unlock_bh(&ar->data_lock);
7161 				return ar;
7162 			}
7163 			spin_unlock_bh(&ar->data_lock);
7164 		}
7165 	}
7166 	return NULL;
7167 }
7168 
7169 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
7170 {
7171 	struct ath12k *ar;
7172 	struct wmi_scan_event scan_ev = {0};
7173 
7174 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
7175 		ath12k_warn(ab, "failed to extract scan event");
7176 		return;
7177 	}
7178 
7179 	rcu_read_lock();
7180 
7181 	/* In case the scan was cancelled, ex. during interface teardown,
7182 	 * the interface will not be found in active interfaces.
7183 	 * Rather, in such scenarios, iterate over the active pdev's to
7184 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
7185 	 * aborting scan's vdev id matches this event info.
7186 	 */
7187 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
7188 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
7189 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7190 						 ATH12K_SCAN_ABORTING);
7191 		if (!ar)
7192 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7193 							 ATH12K_SCAN_RUNNING);
7194 	} else {
7195 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
7196 	}
7197 
7198 	if (!ar) {
7199 		ath12k_warn(ab, "Received scan event for unknown vdev");
7200 		rcu_read_unlock();
7201 		return;
7202 	}
7203 
7204 	spin_lock_bh(&ar->data_lock);
7205 
7206 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7207 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
7208 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
7209 						  le32_to_cpu(scan_ev.reason)),
7210 		   le32_to_cpu(scan_ev.event_type),
7211 		   le32_to_cpu(scan_ev.reason),
7212 		   le32_to_cpu(scan_ev.channel_freq),
7213 		   le32_to_cpu(scan_ev.scan_req_id),
7214 		   le32_to_cpu(scan_ev.scan_id),
7215 		   le32_to_cpu(scan_ev.vdev_id),
7216 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
7217 
7218 	switch (le32_to_cpu(scan_ev.event_type)) {
7219 	case WMI_SCAN_EVENT_STARTED:
7220 		ath12k_wmi_event_scan_started(ar);
7221 		break;
7222 	case WMI_SCAN_EVENT_COMPLETED:
7223 		ath12k_wmi_event_scan_completed(ar);
7224 		break;
7225 	case WMI_SCAN_EVENT_BSS_CHANNEL:
7226 		ath12k_wmi_event_scan_bss_chan(ar);
7227 		break;
7228 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
7229 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
7230 		break;
7231 	case WMI_SCAN_EVENT_START_FAILED:
7232 		ath12k_warn(ab, "received scan start failure event\n");
7233 		ath12k_wmi_event_scan_start_failed(ar);
7234 		break;
7235 	case WMI_SCAN_EVENT_DEQUEUED:
7236 		__ath12k_mac_scan_finish(ar);
7237 		break;
7238 	case WMI_SCAN_EVENT_PREEMPTED:
7239 	case WMI_SCAN_EVENT_RESTARTED:
7240 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
7241 	default:
7242 		break;
7243 	}
7244 
7245 	spin_unlock_bh(&ar->data_lock);
7246 
7247 	rcu_read_unlock();
7248 }
7249 
7250 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
7251 {
7252 	struct wmi_peer_sta_kickout_arg arg = {};
7253 	struct ieee80211_sta *sta;
7254 	struct ath12k_peer *peer;
7255 	struct ath12k *ar;
7256 
7257 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
7258 		ath12k_warn(ab, "failed to extract peer sta kickout event");
7259 		return;
7260 	}
7261 
7262 	rcu_read_lock();
7263 
7264 	spin_lock_bh(&ab->base_lock);
7265 
7266 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
7267 
7268 	if (!peer) {
7269 		ath12k_warn(ab, "peer not found %pM\n",
7270 			    arg.mac_addr);
7271 		goto exit;
7272 	}
7273 
7274 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
7275 	if (!ar) {
7276 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
7277 			    peer->vdev_id);
7278 		goto exit;
7279 	}
7280 
7281 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
7282 					   arg.mac_addr, NULL);
7283 	if (!sta) {
7284 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
7285 			    arg.mac_addr);
7286 		goto exit;
7287 	}
7288 
7289 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
7290 		   arg.mac_addr);
7291 
7292 	ieee80211_report_low_ack(sta, 10);
7293 
7294 exit:
7295 	spin_unlock_bh(&ab->base_lock);
7296 	rcu_read_unlock();
7297 }
7298 
7299 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
7300 {
7301 	struct wmi_roam_event roam_ev = {};
7302 	struct ath12k *ar;
7303 	u32 vdev_id;
7304 	u8 roam_reason;
7305 
7306 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
7307 		ath12k_warn(ab, "failed to extract roam event");
7308 		return;
7309 	}
7310 
7311 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
7312 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
7313 				   WMI_ROAM_REASON_MASK);
7314 
7315 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7316 		   "wmi roam event vdev %u reason %d rssi %d\n",
7317 		   vdev_id, roam_reason, roam_ev.rssi);
7318 
7319 	rcu_read_lock();
7320 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
7321 	if (!ar) {
7322 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
7323 		rcu_read_unlock();
7324 		return;
7325 	}
7326 
7327 	if (roam_reason >= WMI_ROAM_REASON_MAX)
7328 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
7329 			    roam_reason, vdev_id);
7330 
7331 	switch (roam_reason) {
7332 	case WMI_ROAM_REASON_BEACON_MISS:
7333 		ath12k_mac_handle_beacon_miss(ar, vdev_id);
7334 		break;
7335 	case WMI_ROAM_REASON_BETTER_AP:
7336 	case WMI_ROAM_REASON_LOW_RSSI:
7337 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
7338 	case WMI_ROAM_REASON_HO_FAILED:
7339 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
7340 			    roam_reason, vdev_id);
7341 		break;
7342 	}
7343 
7344 	rcu_read_unlock();
7345 }
7346 
7347 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7348 {
7349 	struct wmi_chan_info_event ch_info_ev = {0};
7350 	struct ath12k *ar;
7351 	struct survey_info *survey;
7352 	int idx;
7353 	/* HW channel counters frequency value in hertz */
7354 	u32 cc_freq_hz = ab->cc_freq_hz;
7355 
7356 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
7357 		ath12k_warn(ab, "failed to extract chan info event");
7358 		return;
7359 	}
7360 
7361 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7362 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
7363 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
7364 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
7365 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
7366 		   ch_info_ev.mac_clk_mhz);
7367 
7368 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
7369 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
7370 		return;
7371 	}
7372 
7373 	rcu_read_lock();
7374 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
7375 	if (!ar) {
7376 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
7377 			    ch_info_ev.vdev_id);
7378 		rcu_read_unlock();
7379 		return;
7380 	}
7381 	spin_lock_bh(&ar->data_lock);
7382 
7383 	switch (ar->scan.state) {
7384 	case ATH12K_SCAN_IDLE:
7385 	case ATH12K_SCAN_STARTING:
7386 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
7387 		goto exit;
7388 	case ATH12K_SCAN_RUNNING:
7389 	case ATH12K_SCAN_ABORTING:
7390 		break;
7391 	}
7392 
7393 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
7394 	if (idx >= ARRAY_SIZE(ar->survey)) {
7395 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
7396 			    ch_info_ev.freq, idx);
7397 		goto exit;
7398 	}
7399 
7400 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
7401 	 * HW channel counters frequency value
7402 	 */
7403 	if (ch_info_ev.mac_clk_mhz)
7404 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
7405 
7406 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
7407 		survey = &ar->survey[idx];
7408 		memset(survey, 0, sizeof(*survey));
7409 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
7410 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
7411 				 SURVEY_INFO_TIME_BUSY;
7412 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
7413 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
7414 					    cc_freq_hz);
7415 	}
7416 exit:
7417 	spin_unlock_bh(&ar->data_lock);
7418 	rcu_read_unlock();
7419 }
7420 
7421 static void
7422 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7423 {
7424 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
7425 	struct survey_info *survey;
7426 	struct ath12k *ar;
7427 	u32 cc_freq_hz = ab->cc_freq_hz;
7428 	u64 busy, total, tx, rx, rx_bss;
7429 	int idx;
7430 
7431 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
7432 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
7433 		return;
7434 	}
7435 
7436 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
7437 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
7438 
7439 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
7440 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
7441 
7442 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
7443 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
7444 
7445 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
7446 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
7447 
7448 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
7449 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
7450 
7451 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7452 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
7453 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
7454 		   bss_ch_info_ev.noise_floor, busy, total,
7455 		   tx, rx, rx_bss);
7456 
7457 	rcu_read_lock();
7458 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
7459 
7460 	if (!ar) {
7461 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
7462 			    bss_ch_info_ev.pdev_id);
7463 		rcu_read_unlock();
7464 		return;
7465 	}
7466 
7467 	spin_lock_bh(&ar->data_lock);
7468 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
7469 	if (idx >= ARRAY_SIZE(ar->survey)) {
7470 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
7471 			    bss_ch_info_ev.freq, idx);
7472 		goto exit;
7473 	}
7474 
7475 	survey = &ar->survey[idx];
7476 
7477 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
7478 	survey->time      = div_u64(total, cc_freq_hz);
7479 	survey->time_busy = div_u64(busy, cc_freq_hz);
7480 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
7481 	survey->time_tx   = div_u64(tx, cc_freq_hz);
7482 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
7483 			     SURVEY_INFO_TIME |
7484 			     SURVEY_INFO_TIME_BUSY |
7485 			     SURVEY_INFO_TIME_RX |
7486 			     SURVEY_INFO_TIME_TX);
7487 exit:
7488 	spin_unlock_bh(&ar->data_lock);
7489 	complete(&ar->bss_survey_done);
7490 
7491 	rcu_read_unlock();
7492 }
7493 
7494 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
7495 						struct sk_buff *skb)
7496 {
7497 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
7498 	struct ath12k *ar;
7499 
7500 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
7501 		ath12k_warn(ab, "failed to extract install key compl event");
7502 		return;
7503 	}
7504 
7505 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7506 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
7507 		   install_key_compl.key_idx, install_key_compl.key_flags,
7508 		   install_key_compl.macaddr, install_key_compl.status);
7509 
7510 	rcu_read_lock();
7511 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
7512 	if (!ar) {
7513 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
7514 			    install_key_compl.vdev_id);
7515 		rcu_read_unlock();
7516 		return;
7517 	}
7518 
7519 	ar->install_key_status = 0;
7520 
7521 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
7522 		ath12k_warn(ab, "install key failed for %pM status %d\n",
7523 			    install_key_compl.macaddr, install_key_compl.status);
7524 		ar->install_key_status = install_key_compl.status;
7525 	}
7526 
7527 	complete(&ar->install_key_done);
7528 	rcu_read_unlock();
7529 }
7530 
7531 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
7532 					  u16 tag, u16 len,
7533 					  const void *ptr,
7534 					  void *data)
7535 {
7536 	const struct wmi_service_available_event *ev;
7537 	u32 *wmi_ext2_service_bitmap;
7538 	int i, j;
7539 	u16 expected_len;
7540 
7541 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
7542 	if (len < expected_len) {
7543 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
7544 			    len, tag);
7545 		return -EINVAL;
7546 	}
7547 
7548 	switch (tag) {
7549 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
7550 		ev = (struct wmi_service_available_event *)ptr;
7551 		for (i = 0, j = WMI_MAX_SERVICE;
7552 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
7553 		     i++) {
7554 			do {
7555 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
7556 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7557 					set_bit(j, ab->wmi_ab.svc_map);
7558 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7559 		}
7560 
7561 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7562 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
7563 			   ev->wmi_service_segment_bitmap[0],
7564 			   ev->wmi_service_segment_bitmap[1],
7565 			   ev->wmi_service_segment_bitmap[2],
7566 			   ev->wmi_service_segment_bitmap[3]);
7567 		break;
7568 	case WMI_TAG_ARRAY_UINT32:
7569 		wmi_ext2_service_bitmap = (u32 *)ptr;
7570 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
7571 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
7572 		     i++) {
7573 			do {
7574 				if (wmi_ext2_service_bitmap[i] &
7575 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7576 					set_bit(j, ab->wmi_ab.svc_map);
7577 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7578 		}
7579 
7580 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7581 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
7582 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
7583 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
7584 		break;
7585 	}
7586 	return 0;
7587 }
7588 
7589 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
7590 {
7591 	int ret;
7592 
7593 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7594 				  ath12k_wmi_tlv_services_parser,
7595 				  NULL);
7596 	return ret;
7597 }
7598 
7599 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
7600 {
7601 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
7602 	struct ath12k *ar;
7603 
7604 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
7605 		ath12k_warn(ab, "failed to extract peer assoc conf event");
7606 		return;
7607 	}
7608 
7609 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7610 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
7611 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
7612 
7613 	rcu_read_lock();
7614 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
7615 
7616 	if (!ar) {
7617 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
7618 			    peer_assoc_conf.vdev_id);
7619 		rcu_read_unlock();
7620 		return;
7621 	}
7622 
7623 	complete(&ar->peer_assoc_done);
7624 	rcu_read_unlock();
7625 }
7626 
7627 static void
7628 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
7629 			      struct ath12k_fw_stats *fw_stats,
7630 			      char *buf, u32 *length)
7631 {
7632 	const struct ath12k_fw_stats_vdev *vdev;
7633 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7634 	struct ath12k_link_vif *arvif;
7635 	u32 len = *length;
7636 	u8 *vif_macaddr;
7637 	int i;
7638 
7639 	len += scnprintf(buf + len, buf_len - len, "\n");
7640 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7641 			 "ath12k VDEV stats");
7642 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7643 			 "=================");
7644 
7645 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
7646 		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
7647 		if (!arvif)
7648 			continue;
7649 		vif_macaddr = arvif->ahvif->vif->addr;
7650 
7651 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7652 				 "VDEV ID", vdev->vdev_id);
7653 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7654 				 "VDEV MAC address", vif_macaddr);
7655 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7656 				 "beacon snr", vdev->beacon_snr);
7657 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7658 				 "data snr", vdev->data_snr);
7659 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7660 				 "num rx frames", vdev->num_rx_frames);
7661 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7662 				 "num rts fail", vdev->num_rts_fail);
7663 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7664 				 "num rts success", vdev->num_rts_success);
7665 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7666 				 "num rx err", vdev->num_rx_err);
7667 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7668 				 "num rx discard", vdev->num_rx_discard);
7669 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7670 				 "num tx not acked", vdev->num_tx_not_acked);
7671 
7672 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7673 			len += scnprintf(buf + len, buf_len - len,
7674 					"%25s [%02d] %u\n",
7675 					"num tx frames", i,
7676 					vdev->num_tx_frames[i]);
7677 
7678 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7679 			len += scnprintf(buf + len, buf_len - len,
7680 					"%25s [%02d] %u\n",
7681 					"num tx frames retries", i,
7682 					vdev->num_tx_frames_retries[i]);
7683 
7684 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7685 			len += scnprintf(buf + len, buf_len - len,
7686 					"%25s [%02d] %u\n",
7687 					"num tx frames failures", i,
7688 					vdev->num_tx_frames_failures[i]);
7689 
7690 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7691 			len += scnprintf(buf + len, buf_len - len,
7692 					"%25s [%02d] 0x%08x\n",
7693 					"tx rate history", i,
7694 					vdev->tx_rate_history[i]);
7695 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7696 			len += scnprintf(buf + len, buf_len - len,
7697 					"%25s [%02d] %u\n",
7698 					"beacon rssi history", i,
7699 					vdev->beacon_rssi_history[i]);
7700 
7701 		len += scnprintf(buf + len, buf_len - len, "\n");
7702 		*length = len;
7703 	}
7704 }
7705 
7706 static void
7707 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
7708 			     struct ath12k_fw_stats *fw_stats,
7709 			     char *buf, u32 *length)
7710 {
7711 	const struct ath12k_fw_stats_bcn *bcn;
7712 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7713 	struct ath12k_link_vif *arvif;
7714 	u32 len = *length;
7715 	size_t num_bcn;
7716 
7717 	num_bcn = list_count_nodes(&fw_stats->bcn);
7718 
7719 	len += scnprintf(buf + len, buf_len - len, "\n");
7720 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
7721 			 "ath12k Beacon stats", num_bcn);
7722 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7723 			 "===================");
7724 
7725 	list_for_each_entry(bcn, &fw_stats->bcn, list) {
7726 		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
7727 		if (!arvif)
7728 			continue;
7729 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7730 				 "VDEV ID", bcn->vdev_id);
7731 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7732 				 "VDEV MAC address", arvif->ahvif->vif->addr);
7733 		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7734 				 "================");
7735 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7736 				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
7737 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7738 				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
7739 
7740 		len += scnprintf(buf + len, buf_len - len, "\n");
7741 		*length = len;
7742 	}
7743 }
7744 
7745 static void
7746 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7747 				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
7748 {
7749 	u32 len = *length;
7750 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7751 
7752 	len = scnprintf(buf + len, buf_len - len, "\n");
7753 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7754 			"ath12k PDEV stats");
7755 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7756 			"=================");
7757 
7758 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7759 			"Channel noise floor", pdev->ch_noise_floor);
7760 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7761 			"Channel TX power", pdev->chan_tx_power);
7762 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7763 			"TX frame count", pdev->tx_frame_count);
7764 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7765 			"RX frame count", pdev->rx_frame_count);
7766 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7767 			"RX clear count", pdev->rx_clear_count);
7768 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7769 			"Cycle count", pdev->cycle_count);
7770 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7771 			"PHY error count", pdev->phy_err_count);
7772 	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
7773 			"soc drop count", fw_soc_drop_cnt);
7774 
7775 	*length = len;
7776 }
7777 
7778 static void
7779 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7780 				 char *buf, u32 *length)
7781 {
7782 	u32 len = *length;
7783 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7784 
7785 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7786 			 "ath12k PDEV TX stats");
7787 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7788 			 "====================");
7789 
7790 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7791 			 "HTT cookies queued", pdev->comp_queued);
7792 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7793 			 "HTT cookies disp.", pdev->comp_delivered);
7794 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7795 			 "MSDU queued", pdev->msdu_enqued);
7796 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7797 			 "MPDU queued", pdev->mpdu_enqued);
7798 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7799 			 "MSDUs dropped", pdev->wmm_drop);
7800 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7801 			 "Local enqued", pdev->local_enqued);
7802 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7803 			 "Local freed", pdev->local_freed);
7804 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7805 			 "HW queued", pdev->hw_queued);
7806 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7807 			 "PPDUs reaped", pdev->hw_reaped);
7808 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7809 			 "Num underruns", pdev->underrun);
7810 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7811 			 "PPDUs cleaned", pdev->tx_abort);
7812 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7813 			 "MPDUs requeued", pdev->mpdus_requed);
7814 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7815 			 "Excessive retries", pdev->tx_ko);
7816 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7817 			 "HW rate", pdev->data_rc);
7818 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7819 			 "Sched self triggers", pdev->self_triggers);
7820 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7821 			 "Dropped due to SW retries",
7822 			 pdev->sw_retry_failure);
7823 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7824 			 "Illegal rate phy errors",
7825 			 pdev->illgl_rate_phy_err);
7826 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7827 			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
7828 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7829 			 "TX timeout", pdev->pdev_tx_timeout);
7830 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7831 			 "PDEV resets", pdev->pdev_resets);
7832 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7833 			 "Stateless TIDs alloc failures",
7834 			 pdev->stateless_tid_alloc_failure);
7835 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7836 			 "PHY underrun", pdev->phy_underrun);
7837 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7838 			 "MPDU is more than txop limit", pdev->txop_ovf);
7839 	*length = len;
7840 }
7841 
7842 static void
7843 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7844 				 char *buf, u32 *length)
7845 {
7846 	u32 len = *length;
7847 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7848 
7849 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7850 			 "ath12k PDEV RX stats");
7851 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7852 			 "====================");
7853 
7854 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7855 			 "Mid PPDU route change",
7856 			 pdev->mid_ppdu_route_change);
7857 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7858 			 "Tot. number of statuses", pdev->status_rcvd);
7859 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7860 			 "Extra frags on rings 0", pdev->r0_frags);
7861 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7862 			 "Extra frags on rings 1", pdev->r1_frags);
7863 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7864 			 "Extra frags on rings 2", pdev->r2_frags);
7865 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7866 			 "Extra frags on rings 3", pdev->r3_frags);
7867 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7868 			 "MSDUs delivered to HTT", pdev->htt_msdus);
7869 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7870 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
7871 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7872 			 "MSDUs delivered to stack", pdev->loc_msdus);
7873 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7874 			 "MPDUs delivered to stack", pdev->loc_mpdus);
7875 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7876 			 "Oversized AMSUs", pdev->oversize_amsdu);
7877 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7878 			 "PHY errors", pdev->phy_errs);
7879 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7880 			 "PHY errors drops", pdev->phy_err_drop);
7881 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7882 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
7883 	*length = len;
7884 }
7885 
7886 static void
7887 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
7888 			      struct ath12k_fw_stats *fw_stats,
7889 			      char *buf, u32 *length)
7890 {
7891 	const struct ath12k_fw_stats_pdev *pdev;
7892 	u32 len = *length;
7893 
7894 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
7895 					struct ath12k_fw_stats_pdev, list);
7896 	if (!pdev) {
7897 		ath12k_warn(ar->ab, "failed to get pdev stats\n");
7898 		return;
7899 	}
7900 
7901 	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
7902 					   ar->ab->fw_soc_drop_count);
7903 	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
7904 	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
7905 
7906 	*length = len;
7907 }
7908 
7909 void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
7910 			      struct ath12k_fw_stats *fw_stats,
7911 			      u32 stats_id, char *buf)
7912 {
7913 	u32 len = 0;
7914 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7915 
7916 	spin_lock_bh(&ar->data_lock);
7917 
7918 	switch (stats_id) {
7919 	case WMI_REQUEST_VDEV_STAT:
7920 		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
7921 		break;
7922 	case WMI_REQUEST_BCN_STAT:
7923 		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
7924 		break;
7925 	case WMI_REQUEST_PDEV_STAT:
7926 		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
7927 		break;
7928 	default:
7929 		break;
7930 	}
7931 
7932 	spin_unlock_bh(&ar->data_lock);
7933 
7934 	if (len >= buf_len)
7935 		buf[len - 1] = 0;
7936 	else
7937 		buf[len] = 0;
7938 
7939 	ath12k_fw_stats_reset(ar);
7940 }
7941 
7942 static void
7943 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
7944 			   struct ath12k_fw_stats_vdev *dst)
7945 {
7946 	int i;
7947 
7948 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7949 	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
7950 	dst->data_snr = le32_to_cpu(src->data_snr);
7951 	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
7952 	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
7953 	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
7954 	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
7955 	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
7956 	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
7957 
7958 	for (i = 0; i < WLAN_MAX_AC; i++)
7959 		dst->num_tx_frames[i] =
7960 			le32_to_cpu(src->num_tx_frames[i]);
7961 
7962 	for (i = 0; i < WLAN_MAX_AC; i++)
7963 		dst->num_tx_frames_retries[i] =
7964 			le32_to_cpu(src->num_tx_frames_retries[i]);
7965 
7966 	for (i = 0; i < WLAN_MAX_AC; i++)
7967 		dst->num_tx_frames_failures[i] =
7968 			le32_to_cpu(src->num_tx_frames_failures[i]);
7969 
7970 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7971 		dst->tx_rate_history[i] =
7972 			le32_to_cpu(src->tx_rate_history[i]);
7973 
7974 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7975 		dst->beacon_rssi_history[i] =
7976 			le32_to_cpu(src->beacon_rssi_history[i]);
7977 }
7978 
7979 static void
7980 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
7981 			  struct ath12k_fw_stats_bcn *dst)
7982 {
7983 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7984 	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
7985 	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
7986 }
7987 
7988 static void
7989 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
7990 				struct ath12k_fw_stats_pdev *dst)
7991 {
7992 	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
7993 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
7994 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
7995 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
7996 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
7997 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
7998 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
7999 }
8000 
8001 static void
8002 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
8003 			      struct ath12k_fw_stats_pdev *dst)
8004 {
8005 	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
8006 	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
8007 	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
8008 	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
8009 	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
8010 	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
8011 	dst->local_freed = a_sle32_to_cpu(src->local_freed);
8012 	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
8013 	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
8014 	dst->underrun = a_sle32_to_cpu(src->underrun);
8015 	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
8016 	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
8017 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
8018 	dst->data_rc = __le32_to_cpu(src->data_rc);
8019 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
8020 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
8021 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
8022 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
8023 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
8024 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
8025 	dst->stateless_tid_alloc_failure =
8026 		__le32_to_cpu(src->stateless_tid_alloc_failure);
8027 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
8028 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
8029 }
8030 
8031 static void
8032 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
8033 			      struct ath12k_fw_stats_pdev *dst)
8034 {
8035 	dst->mid_ppdu_route_change =
8036 		a_sle32_to_cpu(src->mid_ppdu_route_change);
8037 	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
8038 	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
8039 	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
8040 	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
8041 	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
8042 	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
8043 	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
8044 	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
8045 	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
8046 	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
8047 	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
8048 	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
8049 	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
8050 }
8051 
8052 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
8053 					      struct wmi_tlv_fw_stats_parse *parse,
8054 					      const void *ptr,
8055 					      u16 len)
8056 {
8057 	const struct wmi_stats_event *ev = parse->ev;
8058 	struct ath12k_fw_stats *stats = parse->stats;
8059 	struct ath12k *ar;
8060 	struct ath12k_link_vif *arvif;
8061 	struct ieee80211_sta *sta;
8062 	struct ath12k_sta *ahsta;
8063 	struct ath12k_link_sta *arsta;
8064 	int i, ret = 0;
8065 	const void *data = ptr;
8066 
8067 	if (!ev) {
8068 		ath12k_warn(ab, "failed to fetch update stats ev");
8069 		return -EPROTO;
8070 	}
8071 
8072 	if (!stats)
8073 		return -EINVAL;
8074 
8075 	rcu_read_lock();
8076 
8077 	stats->pdev_id = le32_to_cpu(ev->pdev_id);
8078 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
8079 	if (!ar) {
8080 		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
8081 			    le32_to_cpu(ev->pdev_id));
8082 		ret = -EPROTO;
8083 		goto exit;
8084 	}
8085 
8086 	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
8087 		const struct wmi_vdev_stats_params *src;
8088 		struct ath12k_fw_stats_vdev *dst;
8089 
8090 		src = data;
8091 		if (len < sizeof(*src)) {
8092 			ret = -EPROTO;
8093 			goto exit;
8094 		}
8095 
8096 		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
8097 		if (arvif) {
8098 			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
8099 							   arvif->bssid,
8100 							   NULL);
8101 			if (sta) {
8102 				ahsta = ath12k_sta_to_ahsta(sta);
8103 				arsta = &ahsta->deflink;
8104 				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
8105 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8106 					   "wmi stats vdev id %d snr %d\n",
8107 					   src->vdev_id, src->beacon_snr);
8108 			} else {
8109 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8110 					   "not found station bssid %pM for vdev stat\n",
8111 					   arvif->bssid);
8112 			}
8113 		}
8114 
8115 		data += sizeof(*src);
8116 		len -= sizeof(*src);
8117 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8118 		if (!dst)
8119 			continue;
8120 		ath12k_wmi_pull_vdev_stats(src, dst);
8121 		stats->stats_id = WMI_REQUEST_VDEV_STAT;
8122 		list_add_tail(&dst->list, &stats->vdevs);
8123 	}
8124 	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
8125 		const struct ath12k_wmi_bcn_stats_params *src;
8126 		struct ath12k_fw_stats_bcn *dst;
8127 
8128 		src = data;
8129 		if (len < sizeof(*src)) {
8130 			ret = -EPROTO;
8131 			goto exit;
8132 		}
8133 
8134 		data += sizeof(*src);
8135 		len -= sizeof(*src);
8136 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8137 		if (!dst)
8138 			continue;
8139 		ath12k_wmi_pull_bcn_stats(src, dst);
8140 		stats->stats_id = WMI_REQUEST_BCN_STAT;
8141 		list_add_tail(&dst->list, &stats->bcn);
8142 	}
8143 	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
8144 		const struct ath12k_wmi_pdev_stats_params *src;
8145 		struct ath12k_fw_stats_pdev *dst;
8146 
8147 		src = data;
8148 		if (len < sizeof(*src)) {
8149 			ret = -EPROTO;
8150 			goto exit;
8151 		}
8152 
8153 		stats->stats_id = WMI_REQUEST_PDEV_STAT;
8154 
8155 		data += sizeof(*src);
8156 		len -= sizeof(*src);
8157 
8158 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8159 		if (!dst)
8160 			continue;
8161 
8162 		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
8163 		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
8164 		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
8165 		list_add_tail(&dst->list, &stats->pdevs);
8166 	}
8167 
8168 exit:
8169 	rcu_read_unlock();
8170 	return ret;
8171 }
8172 
8173 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
8174 					 u16 tag, u16 len,
8175 					 const void *ptr, void *data)
8176 {
8177 	struct wmi_tlv_fw_stats_parse *parse = data;
8178 	int ret = 0;
8179 
8180 	switch (tag) {
8181 	case WMI_TAG_STATS_EVENT:
8182 		parse->ev = ptr;
8183 		break;
8184 	case WMI_TAG_ARRAY_BYTE:
8185 		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
8186 		break;
8187 	default:
8188 		break;
8189 	}
8190 	return ret;
8191 }
8192 
8193 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
8194 				    struct ath12k_fw_stats *stats)
8195 {
8196 	struct wmi_tlv_fw_stats_parse parse = {};
8197 
8198 	stats->stats_id = 0;
8199 	parse.stats = stats;
8200 
8201 	return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8202 				   ath12k_wmi_tlv_fw_stats_parse,
8203 				   &parse);
8204 }
8205 
8206 static void ath12k_wmi_fw_stats_process(struct ath12k *ar,
8207 					struct ath12k_fw_stats *stats)
8208 {
8209 	struct ath12k_base *ab = ar->ab;
8210 	struct ath12k_pdev *pdev;
8211 	bool is_end = true;
8212 	size_t total_vdevs_started = 0;
8213 	int i;
8214 
8215 	if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
8216 		if (list_empty(&stats->vdevs)) {
8217 			ath12k_warn(ab, "empty vdev stats");
8218 			return;
8219 		}
8220 		/* FW sends all the active VDEV stats irrespective of PDEV,
8221 		 * hence limit until the count of all VDEVs started
8222 		 */
8223 		rcu_read_lock();
8224 		for (i = 0; i < ab->num_radios; i++) {
8225 			pdev = rcu_dereference(ab->pdevs_active[i]);
8226 			if (pdev && pdev->ar)
8227 				total_vdevs_started += pdev->ar->num_started_vdevs;
8228 		}
8229 		rcu_read_unlock();
8230 
8231 		if (total_vdevs_started)
8232 			is_end = ((++ar->fw_stats.num_vdev_recvd) ==
8233 				  total_vdevs_started);
8234 
8235 		list_splice_tail_init(&stats->vdevs,
8236 				      &ar->fw_stats.vdevs);
8237 
8238 		if (is_end)
8239 			complete(&ar->fw_stats_done);
8240 
8241 		return;
8242 	}
8243 
8244 	if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
8245 		if (list_empty(&stats->bcn)) {
8246 			ath12k_warn(ab, "empty beacon stats");
8247 			return;
8248 		}
8249 		/* Mark end until we reached the count of all started VDEVs
8250 		 * within the PDEV
8251 		 */
8252 		if (ar->num_started_vdevs)
8253 			is_end = ((++ar->fw_stats.num_bcn_recvd) ==
8254 				  ar->num_started_vdevs);
8255 
8256 		list_splice_tail_init(&stats->bcn,
8257 				      &ar->fw_stats.bcn);
8258 
8259 		if (is_end)
8260 			complete(&ar->fw_stats_done);
8261 	}
8262 }
8263 
8264 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
8265 {
8266 	struct ath12k_fw_stats stats = {};
8267 	struct ath12k *ar;
8268 	int ret;
8269 
8270 	INIT_LIST_HEAD(&stats.pdevs);
8271 	INIT_LIST_HEAD(&stats.vdevs);
8272 	INIT_LIST_HEAD(&stats.bcn);
8273 
8274 	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
8275 	if (ret) {
8276 		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
8277 		goto free;
8278 	}
8279 
8280 	ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
8281 
8282 	rcu_read_lock();
8283 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
8284 	if (!ar) {
8285 		rcu_read_unlock();
8286 		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
8287 			    stats.pdev_id, ret);
8288 		goto free;
8289 	}
8290 
8291 	spin_lock_bh(&ar->data_lock);
8292 
8293 	/* Handle WMI_REQUEST_PDEV_STAT status update */
8294 	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
8295 		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
8296 		complete(&ar->fw_stats_done);
8297 		goto complete;
8298 	}
8299 
8300 	/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
8301 	ath12k_wmi_fw_stats_process(ar, &stats);
8302 
8303 complete:
8304 	complete(&ar->fw_stats_complete);
8305 	spin_unlock_bh(&ar->data_lock);
8306 	rcu_read_unlock();
8307 
8308 	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
8309 	 * at this point, no need to free the individual list.
8310 	 */
8311 	return;
8312 
8313 free:
8314 	ath12k_fw_stats_free(&stats);
8315 }
8316 
8317 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
8318  * is not part of BDF CTL(Conformance test limits) table entries.
8319  */
8320 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
8321 						 struct sk_buff *skb)
8322 {
8323 	const void **tb;
8324 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
8325 	int ret;
8326 
8327 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8328 	if (IS_ERR(tb)) {
8329 		ret = PTR_ERR(tb);
8330 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8331 		return;
8332 	}
8333 
8334 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
8335 	if (!ev) {
8336 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
8337 		kfree(tb);
8338 		return;
8339 	}
8340 
8341 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8342 		   "pdev ctl failsafe check ev status %d\n",
8343 		   ev->ctl_failsafe_status);
8344 
8345 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
8346 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
8347 	 */
8348 	if (ev->ctl_failsafe_status != 0)
8349 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
8350 			    ev->ctl_failsafe_status);
8351 
8352 	kfree(tb);
8353 }
8354 
8355 static void
8356 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
8357 					  const struct ath12k_wmi_pdev_csa_event *ev,
8358 					  const u32 *vdev_ids)
8359 {
8360 	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
8361 	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
8362 	struct ieee80211_bss_conf *conf;
8363 	struct ath12k_link_vif *arvif;
8364 	struct ath12k_vif *ahvif;
8365 	int i;
8366 
8367 	rcu_read_lock();
8368 	for (i = 0; i < num_vdevs; i++) {
8369 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
8370 
8371 		if (!arvif) {
8372 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
8373 				    vdev_ids[i]);
8374 			continue;
8375 		}
8376 		ahvif = arvif->ahvif;
8377 
8378 		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
8379 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
8380 				    arvif->link_id);
8381 			continue;
8382 		}
8383 
8384 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
8385 		if (!conf) {
8386 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
8387 				    ahvif->vif->addr, arvif->link_id);
8388 			continue;
8389 		}
8390 
8391 		if (!arvif->is_up || !conf->csa_active)
8392 			continue;
8393 
8394 		/* Finish CSA when counter reaches zero */
8395 		if (!current_switch_count) {
8396 			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
8397 			arvif->current_cntdown_counter = 0;
8398 		} else if (current_switch_count > 1) {
8399 			/* If the count in event is not what we expect, don't update the
8400 			 * mac80211 count. Since during beacon Tx failure, count in the
8401 			 * firmware will not decrement and this event will come with the
8402 			 * previous count value again
8403 			 */
8404 			if (current_switch_count != arvif->current_cntdown_counter)
8405 				continue;
8406 
8407 			arvif->current_cntdown_counter =
8408 				ieee80211_beacon_update_cntdwn(ahvif->vif,
8409 							       arvif->link_id);
8410 		}
8411 	}
8412 	rcu_read_unlock();
8413 }
8414 
8415 static void
8416 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
8417 					      struct sk_buff *skb)
8418 {
8419 	const void **tb;
8420 	const struct ath12k_wmi_pdev_csa_event *ev;
8421 	const u32 *vdev_ids;
8422 	int ret;
8423 
8424 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8425 	if (IS_ERR(tb)) {
8426 		ret = PTR_ERR(tb);
8427 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8428 		return;
8429 	}
8430 
8431 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
8432 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
8433 
8434 	if (!ev || !vdev_ids) {
8435 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
8436 		kfree(tb);
8437 		return;
8438 	}
8439 
8440 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8441 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
8442 		   ev->current_switch_count, ev->pdev_id,
8443 		   ev->num_vdevs);
8444 
8445 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
8446 
8447 	kfree(tb);
8448 }
8449 
8450 static void
8451 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
8452 {
8453 	const void **tb;
8454 	struct ath12k_mac_get_any_chanctx_conf_arg arg;
8455 	const struct ath12k_wmi_pdev_radar_event *ev;
8456 	struct ath12k *ar;
8457 	int ret;
8458 
8459 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8460 	if (IS_ERR(tb)) {
8461 		ret = PTR_ERR(tb);
8462 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8463 		return;
8464 	}
8465 
8466 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
8467 
8468 	if (!ev) {
8469 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
8470 		kfree(tb);
8471 		return;
8472 	}
8473 
8474 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8475 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
8476 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
8477 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
8478 		   ev->freq_offset, ev->sidx);
8479 
8480 	rcu_read_lock();
8481 
8482 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
8483 
8484 	if (!ar) {
8485 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
8486 			    ev->pdev_id);
8487 		goto exit;
8488 	}
8489 
8490 	arg.ar = ar;
8491 	arg.chanctx_conf = NULL;
8492 	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
8493 					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
8494 	if (!arg.chanctx_conf) {
8495 		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
8496 		goto exit;
8497 	}
8498 
8499 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
8500 		   ev->pdev_id);
8501 
8502 	if (ar->dfs_block_radar_events)
8503 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
8504 	else
8505 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
8506 
8507 exit:
8508 	rcu_read_unlock();
8509 
8510 	kfree(tb);
8511 }
8512 
8513 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
8514 					  struct sk_buff *skb)
8515 {
8516 	const struct ath12k_wmi_ftm_event *ev;
8517 	const void **tb;
8518 	int ret;
8519 	u16 length;
8520 
8521 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8522 
8523 	if (IS_ERR(tb)) {
8524 		ret = PTR_ERR(tb);
8525 		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
8526 		return;
8527 	}
8528 
8529 	ev = tb[WMI_TAG_ARRAY_BYTE];
8530 	if (!ev) {
8531 		ath12k_warn(ab, "failed to fetch ftm msg\n");
8532 		kfree(tb);
8533 		return;
8534 	}
8535 
8536 	length = skb->len - TLV_HDR_SIZE;
8537 	ath12k_tm_process_event(ab, cmd_id, ev, length);
8538 	kfree(tb);
8539 	tb = NULL;
8540 }
8541 
8542 static void
8543 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
8544 				  struct sk_buff *skb)
8545 {
8546 	struct ath12k *ar;
8547 	struct wmi_pdev_temperature_event ev = {0};
8548 
8549 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
8550 		ath12k_warn(ab, "failed to extract pdev temperature event");
8551 		return;
8552 	}
8553 
8554 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8555 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
8556 
8557 	rcu_read_lock();
8558 
8559 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
8560 	if (!ar) {
8561 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
8562 		goto exit;
8563 	}
8564 
8565 exit:
8566 	rcu_read_unlock();
8567 }
8568 
8569 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
8570 					struct sk_buff *skb)
8571 {
8572 	const void **tb;
8573 	const struct wmi_fils_discovery_event *ev;
8574 	int ret;
8575 
8576 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8577 	if (IS_ERR(tb)) {
8578 		ret = PTR_ERR(tb);
8579 		ath12k_warn(ab,
8580 			    "failed to parse FILS discovery event tlv %d\n",
8581 			    ret);
8582 		return;
8583 	}
8584 
8585 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
8586 	if (!ev) {
8587 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
8588 		kfree(tb);
8589 		return;
8590 	}
8591 
8592 	ath12k_warn(ab,
8593 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
8594 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
8595 
8596 	kfree(tb);
8597 }
8598 
8599 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
8600 					      struct sk_buff *skb)
8601 {
8602 	const void **tb;
8603 	const struct wmi_probe_resp_tx_status_event *ev;
8604 	int ret;
8605 
8606 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8607 	if (IS_ERR(tb)) {
8608 		ret = PTR_ERR(tb);
8609 		ath12k_warn(ab,
8610 			    "failed to parse probe response transmission status event tlv: %d\n",
8611 			    ret);
8612 		return;
8613 	}
8614 
8615 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
8616 	if (!ev) {
8617 		ath12k_warn(ab,
8618 			    "failed to fetch probe response transmission status event");
8619 		kfree(tb);
8620 		return;
8621 	}
8622 
8623 	if (ev->tx_status)
8624 		ath12k_warn(ab,
8625 			    "Probe response transmission failed for vdev_id %u, status %u\n",
8626 			    ev->vdev_id, ev->tx_status);
8627 
8628 	kfree(tb);
8629 }
8630 
8631 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
8632 				    struct sk_buff *skb)
8633 {
8634 	const void **tb;
8635 	const struct wmi_p2p_noa_event *ev;
8636 	const struct ath12k_wmi_p2p_noa_info *noa;
8637 	struct ath12k *ar;
8638 	int ret, vdev_id;
8639 
8640 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8641 	if (IS_ERR(tb)) {
8642 		ret = PTR_ERR(tb);
8643 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
8644 		return ret;
8645 	}
8646 
8647 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
8648 	noa = tb[WMI_TAG_P2P_NOA_INFO];
8649 
8650 	if (!ev || !noa) {
8651 		ret = -EPROTO;
8652 		goto out;
8653 	}
8654 
8655 	vdev_id = __le32_to_cpu(ev->vdev_id);
8656 
8657 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8658 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
8659 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
8660 
8661 	rcu_read_lock();
8662 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
8663 	if (!ar) {
8664 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
8665 			    vdev_id);
8666 		ret = -EINVAL;
8667 		goto unlock;
8668 	}
8669 
8670 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
8671 
8672 	ret = 0;
8673 
8674 unlock:
8675 	rcu_read_unlock();
8676 out:
8677 	kfree(tb);
8678 	return ret;
8679 }
8680 
8681 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
8682 					     struct sk_buff *skb)
8683 {
8684 	const struct wmi_rfkill_state_change_event *ev;
8685 	const void **tb;
8686 	int ret;
8687 
8688 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8689 	if (IS_ERR(tb)) {
8690 		ret = PTR_ERR(tb);
8691 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8692 		return;
8693 	}
8694 
8695 	ev = tb[WMI_TAG_RFKILL_EVENT];
8696 	if (!ev) {
8697 		kfree(tb);
8698 		return;
8699 	}
8700 
8701 	ath12k_dbg(ab, ATH12K_DBG_MAC,
8702 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
8703 		   le32_to_cpu(ev->gpio_pin_num),
8704 		   le32_to_cpu(ev->int_type),
8705 		   le32_to_cpu(ev->radio_state));
8706 
8707 	spin_lock_bh(&ab->base_lock);
8708 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
8709 	spin_unlock_bh(&ab->base_lock);
8710 
8711 	queue_work(ab->workqueue, &ab->rfkill_work);
8712 	kfree(tb);
8713 }
8714 
8715 static void
8716 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
8717 {
8718 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
8719 }
8720 
8721 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
8722 					struct sk_buff *skb)
8723 {
8724 	const void **tb;
8725 	const struct wmi_twt_enable_event *ev;
8726 	int ret;
8727 
8728 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8729 	if (IS_ERR(tb)) {
8730 		ret = PTR_ERR(tb);
8731 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
8732 			    ret);
8733 		return;
8734 	}
8735 
8736 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
8737 	if (!ev) {
8738 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
8739 		goto exit;
8740 	}
8741 
8742 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
8743 		   le32_to_cpu(ev->pdev_id),
8744 		   le32_to_cpu(ev->status));
8745 
8746 exit:
8747 	kfree(tb);
8748 }
8749 
8750 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
8751 					 struct sk_buff *skb)
8752 {
8753 	const void **tb;
8754 	const struct wmi_twt_disable_event *ev;
8755 	int ret;
8756 
8757 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8758 	if (IS_ERR(tb)) {
8759 		ret = PTR_ERR(tb);
8760 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
8761 			    ret);
8762 		return;
8763 	}
8764 
8765 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
8766 	if (!ev) {
8767 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
8768 		goto exit;
8769 	}
8770 
8771 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
8772 		   le32_to_cpu(ev->pdev_id),
8773 		   le32_to_cpu(ev->status));
8774 
8775 exit:
8776 	kfree(tb);
8777 }
8778 
8779 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
8780 					    u16 tag, u16 len,
8781 					    const void *ptr, void *data)
8782 {
8783 	const struct wmi_wow_ev_pg_fault_param *pf_param;
8784 	const struct wmi_wow_ev_param *param;
8785 	struct wmi_wow_ev_arg *arg = data;
8786 	int pf_len;
8787 
8788 	switch (tag) {
8789 	case WMI_TAG_WOW_EVENT_INFO:
8790 		param = ptr;
8791 		arg->wake_reason = le32_to_cpu(param->wake_reason);
8792 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
8793 			   arg->wake_reason, wow_reason(arg->wake_reason));
8794 		break;
8795 
8796 	case WMI_TAG_ARRAY_BYTE:
8797 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
8798 			pf_param = ptr;
8799 			pf_len = le32_to_cpu(pf_param->len);
8800 			if (pf_len > len - sizeof(pf_len) ||
8801 			    pf_len < 0) {
8802 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
8803 					    pf_len);
8804 				return -EINVAL;
8805 			}
8806 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
8807 				   pf_len);
8808 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
8809 					"wow_reason_page_fault packet present",
8810 					"wow_pg_fault ",
8811 					pf_param->data,
8812 					pf_len);
8813 		}
8814 		break;
8815 	default:
8816 		break;
8817 	}
8818 
8819 	return 0;
8820 }
8821 
8822 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
8823 {
8824 	struct wmi_wow_ev_arg arg = { };
8825 	int ret;
8826 
8827 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8828 				  ath12k_wmi_wow_wakeup_host_parse,
8829 				  &arg);
8830 	if (ret) {
8831 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
8832 			    ret);
8833 		return;
8834 	}
8835 
8836 	complete(&ab->wow.wakeup_completed);
8837 }
8838 
8839 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
8840 						struct sk_buff *skb)
8841 {
8842 	const struct wmi_gtk_offload_status_event *ev;
8843 	struct ath12k_link_vif *arvif;
8844 	__be64 replay_ctr_be;
8845 	u64 replay_ctr;
8846 	const void **tb;
8847 	int ret;
8848 
8849 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8850 	if (IS_ERR(tb)) {
8851 		ret = PTR_ERR(tb);
8852 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8853 		return;
8854 	}
8855 
8856 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
8857 	if (!ev) {
8858 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
8859 		kfree(tb);
8860 		return;
8861 	}
8862 
8863 	rcu_read_lock();
8864 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
8865 	if (!arvif) {
8866 		rcu_read_unlock();
8867 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
8868 			    le32_to_cpu(ev->vdev_id));
8869 		kfree(tb);
8870 		return;
8871 	}
8872 
8873 	replay_ctr = le64_to_cpu(ev->replay_ctr);
8874 	arvif->rekey_data.replay_ctr = replay_ctr;
8875 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
8876 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
8877 
8878 	/* supplicant expects big-endian replay counter */
8879 	replay_ctr_be = cpu_to_be64(replay_ctr);
8880 
8881 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
8882 				   (void *)&replay_ctr_be, GFP_ATOMIC);
8883 
8884 	rcu_read_unlock();
8885 
8886 	kfree(tb);
8887 }
8888 
8889 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
8890 						struct sk_buff *skb)
8891 {
8892 	const struct wmi_mlo_setup_complete_event *ev;
8893 	struct ath12k *ar = NULL;
8894 	struct ath12k_pdev *pdev;
8895 	const void **tb;
8896 	int ret, i;
8897 
8898 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8899 	if (IS_ERR(tb)) {
8900 		ret = PTR_ERR(tb);
8901 		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
8902 			    ret);
8903 		return;
8904 	}
8905 
8906 	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
8907 	if (!ev) {
8908 		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
8909 		kfree(tb);
8910 		return;
8911 	}
8912 
8913 	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
8914 		goto skip_lookup;
8915 
8916 	for (i = 0; i < ab->num_radios; i++) {
8917 		pdev = &ab->pdevs[i];
8918 		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
8919 			ar = pdev->ar;
8920 			break;
8921 		}
8922 	}
8923 
8924 skip_lookup:
8925 	if (!ar) {
8926 		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
8927 			    ev->pdev_id, ev->status);
8928 		goto out;
8929 	}
8930 
8931 	ar->mlo_setup_status = le32_to_cpu(ev->status);
8932 	complete(&ar->mlo_setup_done);
8933 
8934 out:
8935 	kfree(tb);
8936 }
8937 
8938 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
8939 					       struct sk_buff *skb)
8940 {
8941 	const struct wmi_mlo_teardown_complete_event *ev;
8942 	const void **tb;
8943 	int ret;
8944 
8945 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8946 	if (IS_ERR(tb)) {
8947 		ret = PTR_ERR(tb);
8948 		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
8949 		return;
8950 	}
8951 
8952 	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
8953 	if (!ev) {
8954 		ath12k_warn(ab, "failed to fetch teardown complete event\n");
8955 		kfree(tb);
8956 		return;
8957 	}
8958 
8959 	kfree(tb);
8960 }
8961 
8962 #ifdef CONFIG_ATH12K_DEBUGFS
8963 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
8964 					    const void *ptr, u16 tag, u16 len,
8965 					    struct wmi_tpc_stats_arg *tpc_stats)
8966 {
8967 	u32 len1, len2, len3, len4;
8968 	s16 *dst_ptr;
8969 	s8 *dst_ptr_ctl;
8970 
8971 	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
8972 	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
8973 	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
8974 	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
8975 
8976 	switch (tpc_stats->event_count) {
8977 	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
8978 		if (len1 > len)
8979 			return -ENOBUFS;
8980 
8981 		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
8982 			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
8983 			memcpy(dst_ptr, ptr, len1);
8984 		}
8985 		break;
8986 	case ATH12K_TPC_STATS_RATES_EVENT1:
8987 		if (len2 > len)
8988 			return -ENOBUFS;
8989 
8990 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
8991 			dst_ptr = tpc_stats->rates_array1.rate_array;
8992 			memcpy(dst_ptr, ptr, len2);
8993 		}
8994 		break;
8995 	case ATH12K_TPC_STATS_RATES_EVENT2:
8996 		if (len3 > len)
8997 			return -ENOBUFS;
8998 
8999 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
9000 			dst_ptr = tpc_stats->rates_array2.rate_array;
9001 			memcpy(dst_ptr, ptr, len3);
9002 		}
9003 		break;
9004 	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
9005 		if (len4 > len)
9006 			return -ENOBUFS;
9007 
9008 		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
9009 			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
9010 			memcpy(dst_ptr_ctl, ptr, len4);
9011 		}
9012 		break;
9013 	}
9014 	return 0;
9015 }
9016 
9017 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
9018 				  struct wmi_tpc_stats_arg *tpc_stats,
9019 				  struct wmi_max_reg_power_fixed_params *ev)
9020 {
9021 	struct wmi_max_reg_power_allowed_arg *reg_pwr;
9022 	u32 total_size;
9023 
9024 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9025 		   "Received reg power array type %d length %d for tpc stats\n",
9026 		   ev->reg_power_type, ev->reg_array_len);
9027 
9028 	switch (le32_to_cpu(ev->reg_power_type)) {
9029 	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
9030 		reg_pwr = &tpc_stats->max_reg_allowed_power;
9031 		break;
9032 	default:
9033 		return -EINVAL;
9034 	}
9035 
9036 	/* Each entry is 2 byte hence multiplying the indices with 2 */
9037 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9038 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
9039 	if (le32_to_cpu(ev->reg_array_len) != total_size) {
9040 		ath12k_warn(ab,
9041 			    "Total size and reg_array_len doesn't match for tpc stats\n");
9042 		return -EINVAL;
9043 	}
9044 
9045 	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
9046 
9047 	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
9048 					 GFP_ATOMIC);
9049 	if (!reg_pwr->reg_pwr_array)
9050 		return -ENOMEM;
9051 
9052 	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
9053 
9054 	return 0;
9055 }
9056 
9057 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
9058 				     struct wmi_tpc_stats_arg *tpc_stats,
9059 				     struct wmi_tpc_rates_array_fixed_params *ev)
9060 {
9061 	struct wmi_tpc_rates_array_arg *rates_array;
9062 	u32 flag = 0, rate_array_len;
9063 
9064 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9065 		   "Received rates array type %d length %d for tpc stats\n",
9066 		   ev->rate_array_type, ev->rate_array_len);
9067 
9068 	switch (le32_to_cpu(ev->rate_array_type)) {
9069 	case ATH12K_TPC_STATS_RATES_ARRAY1:
9070 		rates_array = &tpc_stats->rates_array1;
9071 		flag = WMI_TPC_RATES_ARRAY1;
9072 		break;
9073 	case ATH12K_TPC_STATS_RATES_ARRAY2:
9074 		rates_array = &tpc_stats->rates_array2;
9075 		flag = WMI_TPC_RATES_ARRAY2;
9076 		break;
9077 	default:
9078 		ath12k_warn(ab,
9079 			    "Received invalid type of rates array for tpc stats\n");
9080 		return -EINVAL;
9081 	}
9082 	memcpy(&rates_array->tpc_rates_array, ev,
9083 	       sizeof(struct wmi_tpc_rates_array_fixed_params));
9084 	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
9085 	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
9086 	if (!rates_array->rate_array)
9087 		return -ENOMEM;
9088 
9089 	tpc_stats->tlvs_rcvd |= flag;
9090 	return 0;
9091 }
9092 
9093 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
9094 				      struct wmi_tpc_stats_arg *tpc_stats,
9095 				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
9096 {
9097 	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
9098 	u32 total_size, ctl_array_len, flag = 0;
9099 
9100 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9101 		   "Received ctl array type %d length %d for tpc stats\n",
9102 		   ev->ctl_array_type, ev->ctl_array_len);
9103 
9104 	switch (le32_to_cpu(ev->ctl_array_type)) {
9105 	case ATH12K_TPC_STATS_CTL_ARRAY:
9106 		ctl_array = &tpc_stats->ctl_array;
9107 		flag = WMI_TPC_CTL_PWR_ARRAY;
9108 		break;
9109 	default:
9110 		ath12k_warn(ab,
9111 			    "Received invalid type of ctl pwr table for tpc stats\n");
9112 		return -EINVAL;
9113 	}
9114 
9115 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9116 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
9117 	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
9118 		ath12k_warn(ab,
9119 			    "Total size and ctl_array_len doesn't match for tpc stats\n");
9120 		return -EINVAL;
9121 	}
9122 
9123 	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
9124 	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
9125 	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
9126 	if (!ctl_array->ctl_pwr_table)
9127 		return -ENOMEM;
9128 
9129 	tpc_stats->tlvs_rcvd |= flag;
9130 	return 0;
9131 }
9132 
9133 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
9134 					      u16 tag, u16 len,
9135 					      const void *ptr, void *data)
9136 {
9137 	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
9138 	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
9139 	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
9140 	struct wmi_tpc_stats_arg *tpc_stats = data;
9141 	struct wmi_tpc_config_params *tpc_config;
9142 	int ret = 0;
9143 
9144 	if (!tpc_stats) {
9145 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9146 		return -EINVAL;
9147 	}
9148 
9149 	switch (tag) {
9150 	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
9151 		tpc_config = (struct wmi_tpc_config_params *)ptr;
9152 		memcpy(&tpc_stats->tpc_config, tpc_config,
9153 		       sizeof(struct wmi_tpc_config_params));
9154 		break;
9155 	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
9156 		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
9157 		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
9158 		break;
9159 	case WMI_TAG_TPC_STATS_RATES_ARRAY:
9160 		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
9161 		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
9162 		break;
9163 	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
9164 		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
9165 		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
9166 		break;
9167 	default:
9168 		ath12k_warn(ab,
9169 			    "Received invalid tag for tpc stats in subtlvs\n");
9170 		return -EINVAL;
9171 	}
9172 	return ret;
9173 }
9174 
9175 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
9176 					     u16 tag, u16 len,
9177 					     const void *ptr, void *data)
9178 {
9179 	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
9180 	int ret;
9181 
9182 	switch (tag) {
9183 	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
9184 		ret = 0;
9185 		/* Fixed param is already processed*/
9186 		break;
9187 	case WMI_TAG_ARRAY_STRUCT:
9188 		/* len 0 is expected for array of struct when there
9189 		 * is no content of that type to pack inside that tlv
9190 		 */
9191 		if (len == 0)
9192 			return 0;
9193 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9194 					  ath12k_wmi_tpc_stats_subtlv_parser,
9195 					  tpc_stats);
9196 		break;
9197 	case WMI_TAG_ARRAY_INT16:
9198 		if (len == 0)
9199 			return 0;
9200 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9201 						       WMI_TAG_ARRAY_INT16,
9202 						       len, tpc_stats);
9203 		break;
9204 	case WMI_TAG_ARRAY_BYTE:
9205 		if (len == 0)
9206 			return 0;
9207 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9208 						       WMI_TAG_ARRAY_BYTE,
9209 						       len, tpc_stats);
9210 		break;
9211 	default:
9212 		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
9213 		ret = -EINVAL;
9214 		break;
9215 	}
9216 	return ret;
9217 }
9218 
9219 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
9220 {
9221 	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
9222 
9223 	lockdep_assert_held(&ar->data_lock);
9224 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
9225 	if (tpc_stats) {
9226 		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
9227 		kfree(tpc_stats->rates_array1.rate_array);
9228 		kfree(tpc_stats->rates_array2.rate_array);
9229 		kfree(tpc_stats->ctl_array.ctl_pwr_table);
9230 		kfree(tpc_stats);
9231 		ar->debug.tpc_stats = NULL;
9232 	}
9233 }
9234 
9235 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9236 					 struct sk_buff *skb)
9237 {
9238 	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
9239 	struct wmi_tpc_stats_arg *tpc_stats;
9240 	const struct wmi_tlv *tlv;
9241 	void *ptr = skb->data;
9242 	struct ath12k *ar;
9243 	u16 tlv_tag;
9244 	u32 event_count;
9245 	int ret;
9246 
9247 	if (!skb->data) {
9248 		ath12k_warn(ab, "No data present in tpc stats event\n");
9249 		return;
9250 	}
9251 
9252 	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9253 		ath12k_warn(ab, "TPC stats event size invalid\n");
9254 		return;
9255 	}
9256 
9257 	tlv = (struct wmi_tlv *)ptr;
9258 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9259 	ptr += sizeof(*tlv);
9260 
9261 	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
9262 		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
9263 		return;
9264 	}
9265 
9266 	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
9267 	rcu_read_lock();
9268 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
9269 	if (!ar) {
9270 		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
9271 		rcu_read_unlock();
9272 		return;
9273 	}
9274 	spin_lock_bh(&ar->data_lock);
9275 	if (!ar->debug.tpc_request) {
9276 		/* Event is received either without request or the
9277 		 * timeout, if memory is already allocated free it
9278 		 */
9279 		if (ar->debug.tpc_stats) {
9280 			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
9281 			ath12k_wmi_free_tpc_stats_mem(ar);
9282 		}
9283 		goto unlock;
9284 	}
9285 
9286 	event_count = le32_to_cpu(fixed_param->event_count);
9287 	if (event_count == 0) {
9288 		if (ar->debug.tpc_stats) {
9289 			ath12k_warn(ab,
9290 				    "Invalid tpc memory present\n");
9291 			goto unlock;
9292 		}
9293 		ar->debug.tpc_stats =
9294 			kzalloc(sizeof(struct wmi_tpc_stats_arg),
9295 				GFP_ATOMIC);
9296 		if (!ar->debug.tpc_stats) {
9297 			ath12k_warn(ab,
9298 				    "Failed to allocate memory for tpc stats\n");
9299 			goto unlock;
9300 		}
9301 	}
9302 
9303 	tpc_stats = ar->debug.tpc_stats;
9304 	if (!tpc_stats) {
9305 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9306 		goto unlock;
9307 	}
9308 
9309 	if (!(event_count == 0)) {
9310 		if (event_count != tpc_stats->event_count + 1) {
9311 			ath12k_warn(ab,
9312 				    "Invalid tpc event received\n");
9313 			goto unlock;
9314 		}
9315 	}
9316 	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
9317 	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
9318 	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
9319 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9320 		   "tpc stats event_count %d\n",
9321 		   tpc_stats->event_count);
9322 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9323 				  ath12k_wmi_tpc_stats_event_parser,
9324 				  tpc_stats);
9325 	if (ret) {
9326 		ath12k_wmi_free_tpc_stats_mem(ar);
9327 		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
9328 		goto unlock;
9329 	}
9330 
9331 	if (tpc_stats->end_of_event)
9332 		complete(&ar->debug.tpc_complete);
9333 
9334 unlock:
9335 	spin_unlock_bh(&ar->data_lock);
9336 	rcu_read_unlock();
9337 }
9338 #else
9339 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9340 					 struct sk_buff *skb)
9341 {
9342 }
9343 #endif
9344 
9345 static int
9346 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab,
9347 						u16 tag, u16 len,
9348 						const void *ptr, void *data)
9349 {
9350 	const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info;
9351 	const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info;
9352 	struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data;
9353 	struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg;
9354 	s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM];
9355 	u8 num_20mhz_segments;
9356 	s8 min_nf, *nf_ptr;
9357 	int i, j;
9358 
9359 	switch (tag) {
9360 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO:
9361 		if (len < sizeof(*param_info)) {
9362 			ath12k_warn(ab,
9363 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9364 				    tag, len);
9365 			return -EINVAL;
9366 		}
9367 
9368 		param_info = ptr;
9369 
9370 		param_arg.curr_bw = le32_to_cpu(param_info->curr_bw);
9371 		param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask);
9372 
9373 		/* The received array is actually a 2D byte-array for per chain,
9374 		 * per 20MHz subband. Convert to 2D byte-array
9375 		 */
9376 		nf_ptr = &param_arg.nf_hw_dbm[0][0];
9377 
9378 		for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) {
9379 			nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]);
9380 
9381 			for (j = 0; j < 4; j++) {
9382 				*nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF;
9383 				nf_ptr++;
9384 			}
9385 		}
9386 
9387 		switch (param_arg.curr_bw) {
9388 		case WMI_CHAN_WIDTH_20:
9389 			num_20mhz_segments = 1;
9390 			break;
9391 		case WMI_CHAN_WIDTH_40:
9392 			num_20mhz_segments = 2;
9393 			break;
9394 		case WMI_CHAN_WIDTH_80:
9395 			num_20mhz_segments = 4;
9396 			break;
9397 		case WMI_CHAN_WIDTH_160:
9398 			num_20mhz_segments = 8;
9399 			break;
9400 		case WMI_CHAN_WIDTH_320:
9401 			num_20mhz_segments = 16;
9402 			break;
9403 		default:
9404 			ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event",
9405 				    param_arg.curr_bw);
9406 			/* In error case, still consider the primary 20 MHz segment since
9407 			 * that would be much better than instead of dropping the whole
9408 			 * event
9409 			 */
9410 			num_20mhz_segments = 1;
9411 		}
9412 
9413 		min_nf = ATH12K_DEFAULT_NOISE_FLOOR;
9414 
9415 		for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) {
9416 			if (!(param_arg.curr_rx_chainmask & BIT(i)))
9417 				continue;
9418 
9419 			for (j = 0; j < num_20mhz_segments; j++) {
9420 				if (param_arg.nf_hw_dbm[i][j] < min_nf)
9421 					min_nf = param_arg.nf_hw_dbm[i][j];
9422 			}
9423 		}
9424 
9425 		rssi_info->min_nf_dbm = min_nf;
9426 		rssi_info->nf_dbm_present = true;
9427 		break;
9428 	case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO:
9429 		if (len < sizeof(*temp_info)) {
9430 			ath12k_warn(ab,
9431 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9432 				    tag, len);
9433 			return -EINVAL;
9434 		}
9435 
9436 		temp_info = ptr;
9437 		rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset);
9438 		rssi_info->temp_offset_present = true;
9439 		break;
9440 	default:
9441 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9442 			   "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag);
9443 	}
9444 
9445 	return 0;
9446 }
9447 
9448 static int
9449 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab,
9450 					   u16 tag, u16 len,
9451 					   const void *ptr, void *data)
9452 {
9453 	int ret = 0;
9454 
9455 	switch (tag) {
9456 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM:
9457 		/* Fixed param is already processed*/
9458 		break;
9459 	case WMI_TAG_ARRAY_STRUCT:
9460 		/* len 0 is expected for array of struct when there
9461 		 * is no content of that type inside that tlv
9462 		 */
9463 		if (len == 0)
9464 			return 0;
9465 
9466 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9467 					  ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser,
9468 					  data);
9469 		break;
9470 	default:
9471 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9472 			   "Received invalid tag 0x%x for RSSI dbm conv info event\n",
9473 			   tag);
9474 		break;
9475 	}
9476 
9477 	return ret;
9478 }
9479 
9480 static int
9481 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr,
9482 						  size_t len, int *pdev_id)
9483 {
9484 	struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param;
9485 	const struct wmi_tlv *tlv;
9486 	u16 tlv_tag;
9487 
9488 	if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9489 		ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len);
9490 		return -EINVAL;
9491 	}
9492 
9493 	tlv = (struct wmi_tlv *)ptr;
9494 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9495 	ptr += sizeof(*tlv);
9496 
9497 	if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) {
9498 		ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n");
9499 		return -EINVAL;
9500 	}
9501 
9502 	fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr;
9503 	*pdev_id = le32_to_cpu(fixed_param->pdev_id);
9504 
9505 	return 0;
9506 }
9507 
9508 static void
9509 ath12k_wmi_update_rssi_offsets(struct ath12k *ar,
9510 			       struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info)
9511 {
9512 	struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info;
9513 
9514 	lockdep_assert_held(&ar->data_lock);
9515 
9516 	if (rssi_info->temp_offset_present)
9517 		info->temp_offset = rssi_info->temp_offset;
9518 
9519 	if (rssi_info->nf_dbm_present)
9520 		info->min_nf_dbm = rssi_info->min_nf_dbm;
9521 
9522 	info->noise_floor = info->min_nf_dbm + info->temp_offset;
9523 }
9524 
9525 static void
9526 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab,
9527 						 struct sk_buff *skb)
9528 {
9529 	struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info;
9530 	struct ath12k *ar;
9531 	s32 noise_floor;
9532 	u32 pdev_id;
9533 	int ret;
9534 
9535 	ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len,
9536 								&pdev_id);
9537 	if (ret) {
9538 		ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n",
9539 			    ret);
9540 		return;
9541 	}
9542 
9543 	rcu_read_lock();
9544 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
9545 	/* If pdev is not active, ignore the event */
9546 	if (!ar)
9547 		goto out_unlock;
9548 
9549 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9550 				  ath12k_wmi_rssi_dbm_conv_info_event_parser,
9551 				  &rssi_info);
9552 	if (ret) {
9553 		ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n");
9554 		goto out_unlock;
9555 	}
9556 
9557 	spin_lock_bh(&ar->data_lock);
9558 	ath12k_wmi_update_rssi_offsets(ar, &rssi_info);
9559 	noise_floor = ath12k_pdev_get_noise_floor(ar);
9560 	spin_unlock_bh(&ar->data_lock);
9561 
9562 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9563 		   "RSSI noise floor updated, new value is %d dbm\n", noise_floor);
9564 out_unlock:
9565 	rcu_read_unlock();
9566 }
9567 
9568 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
9569 {
9570 	struct wmi_cmd_hdr *cmd_hdr;
9571 	enum wmi_tlv_event_id id;
9572 
9573 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
9574 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
9575 
9576 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
9577 		goto out;
9578 
9579 	switch (id) {
9580 		/* Process all the WMI events here */
9581 	case WMI_SERVICE_READY_EVENTID:
9582 		ath12k_service_ready_event(ab, skb);
9583 		break;
9584 	case WMI_SERVICE_READY_EXT_EVENTID:
9585 		ath12k_service_ready_ext_event(ab, skb);
9586 		break;
9587 	case WMI_SERVICE_READY_EXT2_EVENTID:
9588 		ath12k_service_ready_ext2_event(ab, skb);
9589 		break;
9590 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
9591 		ath12k_reg_chan_list_event(ab, skb);
9592 		break;
9593 	case WMI_READY_EVENTID:
9594 		ath12k_ready_event(ab, skb);
9595 		break;
9596 	case WMI_PEER_DELETE_RESP_EVENTID:
9597 		ath12k_peer_delete_resp_event(ab, skb);
9598 		break;
9599 	case WMI_VDEV_START_RESP_EVENTID:
9600 		ath12k_vdev_start_resp_event(ab, skb);
9601 		break;
9602 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
9603 		ath12k_bcn_tx_status_event(ab, skb);
9604 		break;
9605 	case WMI_VDEV_STOPPED_EVENTID:
9606 		ath12k_vdev_stopped_event(ab, skb);
9607 		break;
9608 	case WMI_MGMT_RX_EVENTID:
9609 		ath12k_mgmt_rx_event(ab, skb);
9610 		/* mgmt_rx_event() owns the skb now! */
9611 		return;
9612 	case WMI_MGMT_TX_COMPLETION_EVENTID:
9613 		ath12k_mgmt_tx_compl_event(ab, skb);
9614 		break;
9615 	case WMI_SCAN_EVENTID:
9616 		ath12k_scan_event(ab, skb);
9617 		break;
9618 	case WMI_PEER_STA_KICKOUT_EVENTID:
9619 		ath12k_peer_sta_kickout_event(ab, skb);
9620 		break;
9621 	case WMI_ROAM_EVENTID:
9622 		ath12k_roam_event(ab, skb);
9623 		break;
9624 	case WMI_CHAN_INFO_EVENTID:
9625 		ath12k_chan_info_event(ab, skb);
9626 		break;
9627 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
9628 		ath12k_pdev_bss_chan_info_event(ab, skb);
9629 		break;
9630 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
9631 		ath12k_vdev_install_key_compl_event(ab, skb);
9632 		break;
9633 	case WMI_SERVICE_AVAILABLE_EVENTID:
9634 		ath12k_service_available_event(ab, skb);
9635 		break;
9636 	case WMI_PEER_ASSOC_CONF_EVENTID:
9637 		ath12k_peer_assoc_conf_event(ab, skb);
9638 		break;
9639 	case WMI_UPDATE_STATS_EVENTID:
9640 		ath12k_update_stats_event(ab, skb);
9641 		break;
9642 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
9643 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
9644 		break;
9645 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
9646 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
9647 		break;
9648 	case WMI_PDEV_TEMPERATURE_EVENTID:
9649 		ath12k_wmi_pdev_temperature_event(ab, skb);
9650 		break;
9651 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
9652 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
9653 		break;
9654 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
9655 		ath12k_fils_discovery_event(ab, skb);
9656 		break;
9657 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
9658 		ath12k_probe_resp_tx_status_event(ab, skb);
9659 		break;
9660 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
9661 		ath12k_rfkill_state_change_event(ab, skb);
9662 		break;
9663 	case WMI_TWT_ENABLE_EVENTID:
9664 		ath12k_wmi_twt_enable_event(ab, skb);
9665 		break;
9666 	case WMI_TWT_DISABLE_EVENTID:
9667 		ath12k_wmi_twt_disable_event(ab, skb);
9668 		break;
9669 	case WMI_P2P_NOA_EVENTID:
9670 		ath12k_wmi_p2p_noa_event(ab, skb);
9671 		break;
9672 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
9673 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
9674 		break;
9675 	case WMI_VDEV_DELETE_RESP_EVENTID:
9676 		ath12k_vdev_delete_resp_event(ab, skb);
9677 		break;
9678 	case WMI_DIAG_EVENTID:
9679 		ath12k_wmi_diag_event(ab, skb);
9680 		break;
9681 	case WMI_WOW_WAKEUP_HOST_EVENTID:
9682 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
9683 		break;
9684 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
9685 		ath12k_wmi_gtk_offload_status_event(ab, skb);
9686 		break;
9687 	case WMI_MLO_SETUP_COMPLETE_EVENTID:
9688 		ath12k_wmi_event_mlo_setup_complete(ab, skb);
9689 		break;
9690 	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
9691 		ath12k_wmi_event_teardown_complete(ab, skb);
9692 		break;
9693 	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
9694 		ath12k_wmi_process_tpc_stats(ab, skb);
9695 		break;
9696 	case WMI_11D_NEW_COUNTRY_EVENTID:
9697 		ath12k_reg_11d_new_cc_event(ab, skb);
9698 		break;
9699 	case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID:
9700 		ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb);
9701 		break;
9702 	/* add Unsupported events (rare) here */
9703 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
9704 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
9705 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
9706 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9707 			   "ignoring unsupported event 0x%x\n", id);
9708 		break;
9709 	/* add Unsupported events (frequent) here */
9710 	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
9711 	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
9712 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
9713 		/* debug might flood hence silently ignore (no-op) */
9714 		break;
9715 	case WMI_PDEV_UTF_EVENTID:
9716 		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
9717 			ath12k_tm_wmi_event_segmented(ab, id, skb);
9718 		else
9719 			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
9720 		break;
9721 	default:
9722 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
9723 		break;
9724 	}
9725 
9726 out:
9727 	dev_kfree_skb(skb);
9728 }
9729 
9730 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
9731 					   u32 pdev_idx)
9732 {
9733 	int status;
9734 	static const u32 svc_id[] = {
9735 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
9736 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
9737 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
9738 	};
9739 	struct ath12k_htc_svc_conn_req conn_req = {};
9740 	struct ath12k_htc_svc_conn_resp conn_resp = {};
9741 
9742 	/* these fields are the same for all service endpoints */
9743 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
9744 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
9745 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
9746 
9747 	/* connect to control service */
9748 	conn_req.service_id = svc_id[pdev_idx];
9749 
9750 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
9751 	if (status) {
9752 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
9753 			    status);
9754 		return status;
9755 	}
9756 
9757 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
9758 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
9759 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
9760 
9761 	return 0;
9762 }
9763 
9764 static int
9765 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
9766 			      struct wmi_unit_test_cmd ut_cmd,
9767 			      u32 *test_args)
9768 {
9769 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9770 	struct wmi_unit_test_cmd *cmd;
9771 	struct sk_buff *skb;
9772 	struct wmi_tlv *tlv;
9773 	void *ptr;
9774 	u32 *ut_cmd_args;
9775 	int buf_len, arg_len;
9776 	int ret;
9777 	int i;
9778 
9779 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
9780 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
9781 
9782 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9783 	if (!skb)
9784 		return -ENOMEM;
9785 
9786 	cmd = (struct wmi_unit_test_cmd *)skb->data;
9787 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
9788 						 sizeof(ut_cmd));
9789 
9790 	cmd->vdev_id = ut_cmd.vdev_id;
9791 	cmd->module_id = ut_cmd.module_id;
9792 	cmd->num_args = ut_cmd.num_args;
9793 	cmd->diag_token = ut_cmd.diag_token;
9794 
9795 	ptr = skb->data + sizeof(ut_cmd);
9796 
9797 	tlv = ptr;
9798 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
9799 
9800 	ptr += TLV_HDR_SIZE;
9801 
9802 	ut_cmd_args = ptr;
9803 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
9804 		ut_cmd_args[i] = test_args[i];
9805 
9806 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9807 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
9808 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
9809 		   cmd->diag_token);
9810 
9811 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
9812 
9813 	if (ret) {
9814 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
9815 			    ret);
9816 		dev_kfree_skb(skb);
9817 	}
9818 
9819 	return ret;
9820 }
9821 
9822 int ath12k_wmi_simulate_radar(struct ath12k *ar)
9823 {
9824 	struct ath12k_link_vif *arvif;
9825 	u32 dfs_args[DFS_MAX_TEST_ARGS];
9826 	struct wmi_unit_test_cmd wmi_ut;
9827 	bool arvif_found = false;
9828 
9829 	list_for_each_entry(arvif, &ar->arvifs, list) {
9830 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
9831 			arvif_found = true;
9832 			break;
9833 		}
9834 	}
9835 
9836 	if (!arvif_found)
9837 		return -EINVAL;
9838 
9839 	dfs_args[DFS_TEST_CMDID] = 0;
9840 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
9841 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
9842 	 * freq offset (b3 - b10) to unit test. For simulation
9843 	 * purpose this can be set to 0 which is valid.
9844 	 */
9845 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
9846 
9847 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
9848 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
9849 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
9850 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
9851 
9852 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
9853 
9854 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
9855 }
9856 
9857 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
9858 				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
9859 {
9860 	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
9861 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9862 	struct sk_buff *skb;
9863 	struct wmi_tlv *tlv;
9864 	__le32 *pdev_id;
9865 	u32 buf_len;
9866 	void *ptr;
9867 	int ret;
9868 
9869 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
9870 
9871 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9872 	if (!skb)
9873 		return -ENOMEM;
9874 	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
9875 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
9876 						 sizeof(*cmd));
9877 
9878 	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
9879 	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
9880 	cmd->subid = cpu_to_le32(tpc_stats_type);
9881 
9882 	ptr = skb->data + sizeof(*cmd);
9883 
9884 	/* The below TLV arrays optionally follow this fixed param TLV structure
9885 	 * 1. ARRAY_UINT32 pdev_ids[]
9886 	 *      If this array is present and non-zero length, stats should only
9887 	 *      be provided from the pdevs identified in the array.
9888 	 * 2. ARRAY_UNIT32 vdev_ids[]
9889 	 *      If this array is present and non-zero length, stats should only
9890 	 *      be provided from the vdevs identified in the array.
9891 	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
9892 	 *      If this array is present and non-zero length, stats should only
9893 	 *      be provided from the peers with the MAC addresses specified
9894 	 *      in the array
9895 	 */
9896 	tlv = ptr;
9897 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9898 	ptr += TLV_HDR_SIZE;
9899 
9900 	pdev_id = ptr;
9901 	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
9902 	ptr += sizeof(*pdev_id);
9903 
9904 	tlv = ptr;
9905 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9906 	ptr += TLV_HDR_SIZE;
9907 
9908 	tlv = ptr;
9909 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
9910 	ptr += TLV_HDR_SIZE;
9911 
9912 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
9913 	if (ret) {
9914 		ath12k_warn(ar->ab,
9915 			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
9916 		dev_kfree_skb(skb);
9917 		return ret;
9918 	}
9919 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
9920 		   ar->pdev->pdev_id);
9921 
9922 	return ret;
9923 }
9924 
9925 int ath12k_wmi_connect(struct ath12k_base *ab)
9926 {
9927 	u32 i;
9928 	u8 wmi_ep_count;
9929 
9930 	wmi_ep_count = ab->htc.wmi_ep_count;
9931 	if (wmi_ep_count > ab->hw_params->max_radios)
9932 		return -1;
9933 
9934 	for (i = 0; i < wmi_ep_count; i++)
9935 		ath12k_connect_pdev_htc_service(ab, i);
9936 
9937 	return 0;
9938 }
9939 
9940 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
9941 {
9942 	if (WARN_ON(pdev_id >= MAX_RADIOS))
9943 		return;
9944 
9945 	/* TODO: Deinit any pdev specific wmi resource */
9946 }
9947 
9948 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
9949 			   u8 pdev_id)
9950 {
9951 	struct ath12k_wmi_pdev *wmi_handle;
9952 
9953 	if (pdev_id >= ab->hw_params->max_radios)
9954 		return -EINVAL;
9955 
9956 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
9957 
9958 	wmi_handle->wmi_ab = &ab->wmi_ab;
9959 
9960 	ab->wmi_ab.ab = ab;
9961 	/* TODO: Init remaining resource specific to pdev */
9962 
9963 	return 0;
9964 }
9965 
9966 int ath12k_wmi_attach(struct ath12k_base *ab)
9967 {
9968 	int ret;
9969 
9970 	ret = ath12k_wmi_pdev_attach(ab, 0);
9971 	if (ret)
9972 		return ret;
9973 
9974 	ab->wmi_ab.ab = ab;
9975 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
9976 
9977 	/* It's overwritten when service_ext_ready is handled */
9978 	if (ab->hw_params->single_pdev_only)
9979 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
9980 
9981 	/* TODO: Init remaining wmi soc resources required */
9982 	init_completion(&ab->wmi_ab.service_ready);
9983 	init_completion(&ab->wmi_ab.unified_ready);
9984 
9985 	return 0;
9986 }
9987 
9988 void ath12k_wmi_detach(struct ath12k_base *ab)
9989 {
9990 	int i;
9991 
9992 	/* TODO: Deinit wmi resource specific to SOC as required */
9993 
9994 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
9995 		ath12k_wmi_pdev_detach(ab, i);
9996 
9997 	ath12k_wmi_free_dbring_caps(ab);
9998 }
9999 
10000 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
10001 {
10002 	struct wmi_hw_data_filter_cmd *cmd;
10003 	struct sk_buff *skb;
10004 	int len;
10005 
10006 	len = sizeof(*cmd);
10007 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10008 
10009 	if (!skb)
10010 		return -ENOMEM;
10011 
10012 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
10013 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
10014 						 sizeof(*cmd));
10015 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
10016 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
10017 
10018 	/* Set all modes in case of disable */
10019 	if (arg->enable)
10020 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
10021 	else
10022 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
10023 
10024 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10025 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
10026 		   arg->enable, arg->hw_filter_bitmap);
10027 
10028 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
10029 }
10030 
10031 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
10032 {
10033 	struct wmi_wow_host_wakeup_cmd *cmd;
10034 	struct sk_buff *skb;
10035 	size_t len;
10036 
10037 	len = sizeof(*cmd);
10038 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10039 	if (!skb)
10040 		return -ENOMEM;
10041 
10042 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
10043 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
10044 						 sizeof(*cmd));
10045 
10046 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
10047 
10048 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
10049 }
10050 
10051 int ath12k_wmi_wow_enable(struct ath12k *ar)
10052 {
10053 	struct wmi_wow_enable_cmd *cmd;
10054 	struct sk_buff *skb;
10055 	int len;
10056 
10057 	len = sizeof(*cmd);
10058 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10059 	if (!skb)
10060 		return -ENOMEM;
10061 
10062 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
10063 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
10064 						 sizeof(*cmd));
10065 
10066 	cmd->enable = cpu_to_le32(1);
10067 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
10068 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
10069 
10070 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
10071 }
10072 
10073 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
10074 				    enum wmi_wow_wakeup_event event,
10075 				    u32 enable)
10076 {
10077 	struct wmi_wow_add_del_event_cmd *cmd;
10078 	struct sk_buff *skb;
10079 	size_t len;
10080 
10081 	len = sizeof(*cmd);
10082 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10083 	if (!skb)
10084 		return -ENOMEM;
10085 
10086 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
10087 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
10088 						 sizeof(*cmd));
10089 	cmd->vdev_id = cpu_to_le32(vdev_id);
10090 	cmd->is_add = cpu_to_le32(enable);
10091 	cmd->event_bitmap = cpu_to_le32((1 << event));
10092 
10093 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
10094 		   wow_wakeup_event(event), enable, vdev_id);
10095 
10096 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
10097 }
10098 
10099 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
10100 			       const u8 *pattern, const u8 *mask,
10101 			       int pattern_len, int pattern_offset)
10102 {
10103 	struct wmi_wow_add_pattern_cmd *cmd;
10104 	struct wmi_wow_bitmap_pattern_params *bitmap;
10105 	struct wmi_tlv *tlv;
10106 	struct sk_buff *skb;
10107 	void *ptr;
10108 	size_t len;
10109 
10110 	len = sizeof(*cmd) +
10111 	      sizeof(*tlv) +			/* array struct */
10112 	      sizeof(*bitmap) +			/* bitmap */
10113 	      sizeof(*tlv) +			/* empty ipv4 sync */
10114 	      sizeof(*tlv) +			/* empty ipv6 sync */
10115 	      sizeof(*tlv) +			/* empty magic */
10116 	      sizeof(*tlv) +			/* empty info timeout */
10117 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
10118 
10119 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10120 	if (!skb)
10121 		return -ENOMEM;
10122 
10123 	/* cmd */
10124 	ptr = skb->data;
10125 	cmd = ptr;
10126 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
10127 						 sizeof(*cmd));
10128 	cmd->vdev_id = cpu_to_le32(vdev_id);
10129 	cmd->pattern_id = cpu_to_le32(pattern_id);
10130 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10131 
10132 	ptr += sizeof(*cmd);
10133 
10134 	/* bitmap */
10135 	tlv = ptr;
10136 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
10137 
10138 	ptr += sizeof(*tlv);
10139 
10140 	bitmap = ptr;
10141 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
10142 						    sizeof(*bitmap));
10143 	memcpy(bitmap->patternbuf, pattern, pattern_len);
10144 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
10145 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
10146 	bitmap->pattern_len = cpu_to_le32(pattern_len);
10147 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
10148 	bitmap->pattern_id = cpu_to_le32(pattern_id);
10149 
10150 	ptr += sizeof(*bitmap);
10151 
10152 	/* ipv4 sync */
10153 	tlv = ptr;
10154 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10155 
10156 	ptr += sizeof(*tlv);
10157 
10158 	/* ipv6 sync */
10159 	tlv = ptr;
10160 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10161 
10162 	ptr += sizeof(*tlv);
10163 
10164 	/* magic */
10165 	tlv = ptr;
10166 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10167 
10168 	ptr += sizeof(*tlv);
10169 
10170 	/* pattern info timeout */
10171 	tlv = ptr;
10172 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10173 
10174 	ptr += sizeof(*tlv);
10175 
10176 	/* ratelimit interval */
10177 	tlv = ptr;
10178 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
10179 
10180 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
10181 		   vdev_id, pattern_id, pattern_offset, pattern_len);
10182 
10183 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
10184 			bitmap->patternbuf, pattern_len);
10185 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
10186 			bitmap->bitmaskbuf, pattern_len);
10187 
10188 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
10189 }
10190 
10191 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
10192 {
10193 	struct wmi_wow_del_pattern_cmd *cmd;
10194 	struct sk_buff *skb;
10195 	size_t len;
10196 
10197 	len = sizeof(*cmd);
10198 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10199 	if (!skb)
10200 		return -ENOMEM;
10201 
10202 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
10203 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
10204 						 sizeof(*cmd));
10205 	cmd->vdev_id = cpu_to_le32(vdev_id);
10206 	cmd->pattern_id = cpu_to_le32(pattern_id);
10207 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10208 
10209 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
10210 		   vdev_id, pattern_id);
10211 
10212 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
10213 }
10214 
10215 static struct sk_buff *
10216 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
10217 				   struct wmi_pno_scan_req_arg *pno)
10218 {
10219 	struct nlo_configured_params *nlo_list;
10220 	size_t len, nlo_list_len, channel_list_len;
10221 	struct wmi_wow_nlo_config_cmd *cmd;
10222 	__le32 *channel_list;
10223 	struct wmi_tlv *tlv;
10224 	struct sk_buff *skb;
10225 	void *ptr;
10226 	u32 i;
10227 
10228 	len = sizeof(*cmd) +
10229 	      sizeof(*tlv) +
10230 	      /* TLV place holder for array of structures
10231 	       * nlo_configured_params(nlo_list)
10232 	       */
10233 	      sizeof(*tlv);
10234 	      /* TLV place holder for array of uint32 channel_list */
10235 
10236 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
10237 	len += channel_list_len;
10238 
10239 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
10240 	len += nlo_list_len;
10241 
10242 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10243 	if (!skb)
10244 		return ERR_PTR(-ENOMEM);
10245 
10246 	ptr = skb->data;
10247 	cmd = ptr;
10248 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
10249 
10250 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
10251 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
10252 
10253 	/* current FW does not support min-max range for dwell time */
10254 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
10255 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
10256 
10257 	if (pno->do_passive_scan)
10258 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
10259 
10260 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
10261 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
10262 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
10263 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
10264 
10265 	if (pno->enable_pno_scan_randomization) {
10266 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
10267 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
10268 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
10269 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
10270 	}
10271 
10272 	ptr += sizeof(*cmd);
10273 
10274 	/* nlo_configured_params(nlo_list) */
10275 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
10276 	tlv = ptr;
10277 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
10278 
10279 	ptr += sizeof(*tlv);
10280 	nlo_list = ptr;
10281 	for (i = 0; i < pno->uc_networks_count; i++) {
10282 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
10283 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
10284 						     sizeof(*nlo_list));
10285 
10286 		nlo_list[i].ssid.valid = cpu_to_le32(1);
10287 		nlo_list[i].ssid.ssid.ssid_len =
10288 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
10289 		memcpy(nlo_list[i].ssid.ssid.ssid,
10290 		       pno->a_networks[i].ssid.ssid,
10291 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
10292 
10293 		if (pno->a_networks[i].rssi_threshold &&
10294 		    pno->a_networks[i].rssi_threshold > -300) {
10295 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
10296 			nlo_list[i].rssi_cond.rssi =
10297 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
10298 		}
10299 
10300 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
10301 		nlo_list[i].bcast_nw_type.bcast_nw_type =
10302 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
10303 	}
10304 
10305 	ptr += nlo_list_len;
10306 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
10307 	tlv = ptr;
10308 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
10309 	ptr += sizeof(*tlv);
10310 	channel_list = ptr;
10311 
10312 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
10313 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
10314 
10315 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
10316 		   vdev_id);
10317 
10318 	return skb;
10319 }
10320 
10321 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
10322 							 u32 vdev_id)
10323 {
10324 	struct wmi_wow_nlo_config_cmd *cmd;
10325 	struct sk_buff *skb;
10326 	size_t len;
10327 
10328 	len = sizeof(*cmd);
10329 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10330 	if (!skb)
10331 		return ERR_PTR(-ENOMEM);
10332 
10333 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
10334 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
10335 
10336 	cmd->vdev_id = cpu_to_le32(vdev_id);
10337 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
10338 
10339 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10340 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
10341 	return skb;
10342 }
10343 
10344 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
10345 			      struct wmi_pno_scan_req_arg  *pno_scan)
10346 {
10347 	struct sk_buff *skb;
10348 
10349 	if (pno_scan->enable)
10350 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
10351 	else
10352 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
10353 
10354 	if (IS_ERR_OR_NULL(skb))
10355 		return -ENOMEM;
10356 
10357 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
10358 }
10359 
10360 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
10361 				       struct wmi_arp_ns_offload_arg *offload,
10362 				       void **ptr,
10363 				       bool enable,
10364 				       bool ext)
10365 {
10366 	struct wmi_ns_offload_params *ns;
10367 	struct wmi_tlv *tlv;
10368 	void *buf_ptr = *ptr;
10369 	u32 ns_cnt, ns_ext_tuples;
10370 	int i, max_offloads;
10371 
10372 	ns_cnt = offload->ipv6_count;
10373 
10374 	tlv  = buf_ptr;
10375 
10376 	if (ext) {
10377 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
10378 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10379 						 ns_ext_tuples * sizeof(*ns));
10380 		i = WMI_MAX_NS_OFFLOADS;
10381 		max_offloads = offload->ipv6_count;
10382 	} else {
10383 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10384 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
10385 		i = 0;
10386 		max_offloads = WMI_MAX_NS_OFFLOADS;
10387 	}
10388 
10389 	buf_ptr += sizeof(*tlv);
10390 
10391 	for (; i < max_offloads; i++) {
10392 		ns = buf_ptr;
10393 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
10394 							sizeof(*ns));
10395 
10396 		if (enable) {
10397 			if (i < ns_cnt)
10398 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
10399 
10400 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
10401 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
10402 
10403 			if (offload->ipv6_type[i])
10404 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
10405 
10406 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
10407 
10408 			if (!is_zero_ether_addr(ns->target_mac.addr))
10409 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
10410 
10411 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10412 				   "wmi index %d ns_solicited %pI6 target %pI6",
10413 				   i, ns->solicitation_ipaddr,
10414 				   ns->target_ipaddr[0]);
10415 		}
10416 
10417 		buf_ptr += sizeof(*ns);
10418 	}
10419 
10420 	*ptr = buf_ptr;
10421 }
10422 
10423 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
10424 					struct wmi_arp_ns_offload_arg *offload,
10425 					void **ptr,
10426 					bool enable)
10427 {
10428 	struct wmi_arp_offload_params *arp;
10429 	struct wmi_tlv *tlv;
10430 	void *buf_ptr = *ptr;
10431 	int i;
10432 
10433 	/* fill arp tuple */
10434 	tlv = buf_ptr;
10435 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10436 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
10437 	buf_ptr += sizeof(*tlv);
10438 
10439 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
10440 		arp = buf_ptr;
10441 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
10442 							 sizeof(*arp));
10443 
10444 		if (enable && i < offload->ipv4_count) {
10445 			/* Copy the target ip addr and flags */
10446 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
10447 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
10448 
10449 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
10450 				   arp->target_ipaddr);
10451 		}
10452 
10453 		buf_ptr += sizeof(*arp);
10454 	}
10455 
10456 	*ptr = buf_ptr;
10457 }
10458 
10459 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
10460 			      struct ath12k_link_vif *arvif,
10461 			      struct wmi_arp_ns_offload_arg *offload,
10462 			      bool enable)
10463 {
10464 	struct wmi_set_arp_ns_offload_cmd *cmd;
10465 	struct wmi_tlv *tlv;
10466 	struct sk_buff *skb;
10467 	void *buf_ptr;
10468 	size_t len;
10469 	u8 ns_cnt, ns_ext_tuples = 0;
10470 
10471 	ns_cnt = offload->ipv6_count;
10472 
10473 	len = sizeof(*cmd) +
10474 	      sizeof(*tlv) +
10475 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
10476 	      sizeof(*tlv) +
10477 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
10478 
10479 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
10480 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
10481 		len += sizeof(*tlv) +
10482 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
10483 	}
10484 
10485 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10486 	if (!skb)
10487 		return -ENOMEM;
10488 
10489 	buf_ptr = skb->data;
10490 	cmd = buf_ptr;
10491 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
10492 						 sizeof(*cmd));
10493 	cmd->flags = cpu_to_le32(0);
10494 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10495 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
10496 
10497 	buf_ptr += sizeof(*cmd);
10498 
10499 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
10500 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
10501 
10502 	if (ns_ext_tuples)
10503 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
10504 
10505 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
10506 }
10507 
10508 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
10509 				 struct ath12k_link_vif *arvif, bool enable)
10510 {
10511 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
10512 	struct wmi_gtk_rekey_offload_cmd *cmd;
10513 	struct sk_buff *skb;
10514 	__le64 replay_ctr;
10515 	int len;
10516 
10517 	len = sizeof(*cmd);
10518 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10519 	if (!skb)
10520 		return -ENOMEM;
10521 
10522 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
10523 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
10524 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10525 
10526 	if (enable) {
10527 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
10528 
10529 		/* the length in rekey_data and cmd is equal */
10530 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
10531 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
10532 
10533 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
10534 		memcpy(cmd->replay_ctr, &replay_ctr,
10535 		       sizeof(replay_ctr));
10536 	} else {
10537 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
10538 	}
10539 
10540 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
10541 		   arvif->vdev_id, enable);
10542 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
10543 }
10544 
10545 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
10546 				 struct ath12k_link_vif *arvif)
10547 {
10548 	struct wmi_gtk_rekey_offload_cmd *cmd;
10549 	struct sk_buff *skb;
10550 	int len;
10551 
10552 	len = sizeof(*cmd);
10553 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10554 	if (!skb)
10555 		return -ENOMEM;
10556 
10557 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
10558 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
10559 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10560 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
10561 
10562 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
10563 		   arvif->vdev_id);
10564 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
10565 }
10566 
10567 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
10568 			     const struct wmi_sta_keepalive_arg *arg)
10569 {
10570 	struct wmi_sta_keepalive_arp_resp_params *arp;
10571 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10572 	struct wmi_sta_keepalive_cmd *cmd;
10573 	struct sk_buff *skb;
10574 	size_t len;
10575 
10576 	len = sizeof(*cmd) + sizeof(*arp);
10577 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10578 	if (!skb)
10579 		return -ENOMEM;
10580 
10581 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
10582 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
10583 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
10584 	cmd->enabled = cpu_to_le32(arg->enabled);
10585 	cmd->interval = cpu_to_le32(arg->interval);
10586 	cmd->method = cpu_to_le32(arg->method);
10587 
10588 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
10589 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
10590 						 sizeof(*arp));
10591 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
10592 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
10593 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
10594 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
10595 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
10596 	}
10597 
10598 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10599 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
10600 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
10601 
10602 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
10603 }
10604 
10605 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
10606 {
10607 	struct wmi_mlo_setup_cmd *cmd;
10608 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10609 	u32 *partner_links, num_links;
10610 	int i, ret, buf_len, arg_len;
10611 	struct sk_buff *skb;
10612 	struct wmi_tlv *tlv;
10613 	void *ptr;
10614 
10615 	num_links = mlo_params->num_partner_links;
10616 	arg_len = num_links * sizeof(u32);
10617 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
10618 
10619 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
10620 	if (!skb)
10621 		return -ENOMEM;
10622 
10623 	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
10624 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
10625 						 sizeof(*cmd));
10626 	cmd->mld_group_id = mlo_params->group_id;
10627 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10628 	ptr = skb->data + sizeof(*cmd);
10629 
10630 	tlv = ptr;
10631 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
10632 	ptr += TLV_HDR_SIZE;
10633 
10634 	partner_links = ptr;
10635 	for (i = 0; i < num_links; i++)
10636 		partner_links[i] = mlo_params->partner_link_id[i];
10637 
10638 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
10639 	if (ret) {
10640 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
10641 			    ret);
10642 		dev_kfree_skb(skb);
10643 		return ret;
10644 	}
10645 
10646 	return 0;
10647 }
10648 
10649 int ath12k_wmi_mlo_ready(struct ath12k *ar)
10650 {
10651 	struct wmi_mlo_ready_cmd *cmd;
10652 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10653 	struct sk_buff *skb;
10654 	int ret, len;
10655 
10656 	len = sizeof(*cmd);
10657 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10658 	if (!skb)
10659 		return -ENOMEM;
10660 
10661 	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
10662 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
10663 						 sizeof(*cmd));
10664 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10665 
10666 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
10667 	if (ret) {
10668 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
10669 			    ret);
10670 		dev_kfree_skb(skb);
10671 		return ret;
10672 	}
10673 
10674 	return 0;
10675 }
10676 
10677 int ath12k_wmi_mlo_teardown(struct ath12k *ar)
10678 {
10679 	struct wmi_mlo_teardown_cmd *cmd;
10680 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10681 	struct sk_buff *skb;
10682 	int ret, len;
10683 
10684 	len = sizeof(*cmd);
10685 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10686 	if (!skb)
10687 		return -ENOMEM;
10688 
10689 	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
10690 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
10691 						 sizeof(*cmd));
10692 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10693 	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
10694 
10695 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
10696 	if (ret) {
10697 		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
10698 			    ret);
10699 		dev_kfree_skb(skb);
10700 		return ret;
10701 	}
10702 
10703 	return 0;
10704 }
10705 
10706 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
10707 {
10708 	return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
10709 			ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
10710 }
10711 
10712 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
10713 				       u32 vdev_id,
10714 				       struct ath12k_reg_tpc_power_info *param)
10715 {
10716 	struct wmi_vdev_set_tpc_power_cmd *cmd;
10717 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10718 	struct wmi_vdev_ch_power_params *ch;
10719 	int i, ret, len, array_len;
10720 	struct sk_buff *skb;
10721 	struct wmi_tlv *tlv;
10722 	u8 *ptr;
10723 
10724 	array_len = sizeof(*ch) * param->num_pwr_levels;
10725 	len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
10726 
10727 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10728 	if (!skb)
10729 		return -ENOMEM;
10730 
10731 	ptr = skb->data;
10732 
10733 	cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
10734 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
10735 						 sizeof(*cmd));
10736 	cmd->vdev_id = cpu_to_le32(vdev_id);
10737 	cmd->psd_power = cpu_to_le32(param->is_psd_power);
10738 	cmd->eirp_power = cpu_to_le32(param->eirp_power);
10739 	cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
10740 
10741 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10742 		   "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
10743 		   vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
10744 
10745 	ptr += sizeof(*cmd);
10746 	tlv = (struct wmi_tlv *)ptr;
10747 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
10748 
10749 	ptr += TLV_HDR_SIZE;
10750 	ch = (struct wmi_vdev_ch_power_params *)ptr;
10751 
10752 	for (i = 0; i < param->num_pwr_levels; i++, ch++) {
10753 		ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
10754 							sizeof(*ch));
10755 		ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
10756 		ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
10757 
10758 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
10759 			   ch->chan_cfreq, ch->tx_power);
10760 	}
10761 
10762 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
10763 	if (ret) {
10764 		ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
10765 		dev_kfree_skb(skb);
10766 		return ret;
10767 	}
10768 
10769 	return 0;
10770 }
10771 
10772 static int
10773 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab,
10774 				struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap,
10775 				struct wmi_mlo_link_set_active_arg *arg)
10776 {
10777 	struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg;
10778 	u8 i;
10779 
10780 	if (arg->num_disallow_mode_comb >
10781 	    ARRAY_SIZE(arg->disallow_bmap)) {
10782 		ath12k_warn(ab, "invalid num_disallow_mode_comb: %d",
10783 			    arg->num_disallow_mode_comb);
10784 		return -EINVAL;
10785 	}
10786 
10787 	dislw_bmap_arg = &arg->disallow_bmap[0];
10788 	for (i = 0; i < arg->num_disallow_mode_comb; i++) {
10789 		dislw_bmap->tlv_header =
10790 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap));
10791 		dislw_bmap->disallowed_mode_bitmap =
10792 				cpu_to_le32(dislw_bmap_arg->disallowed_mode);
10793 		dislw_bmap->ieee_link_id_comb =
10794 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[0],
10795 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) |
10796 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[1],
10797 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) |
10798 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[2],
10799 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) |
10800 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[3],
10801 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4);
10802 
10803 		ath12k_dbg(ab, ATH12K_DBG_WMI,
10804 			   "entry %d disallowed_mode %d ieee_link_id_comb 0x%x",
10805 			   i, dislw_bmap_arg->disallowed_mode,
10806 			   dislw_bmap_arg->ieee_link_id_comb);
10807 		dislw_bmap++;
10808 		dislw_bmap_arg++;
10809 	}
10810 
10811 	return 0;
10812 }
10813 
10814 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
10815 					    struct wmi_mlo_link_set_active_arg *arg)
10816 {
10817 	struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap;
10818 	struct wmi_mlo_set_active_link_number_params *link_num_param;
10819 	u32 num_link_num_param = 0, num_vdev_bitmap = 0;
10820 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
10821 	struct wmi_mlo_link_set_active_cmd *cmd;
10822 	u32 num_inactive_vdev_bitmap = 0;
10823 	u32 num_disallow_mode_comb = 0;
10824 	struct wmi_tlv *tlv;
10825 	struct sk_buff *skb;
10826 	__le32 *vdev_bitmap;
10827 	void *buf_ptr;
10828 	int i, ret;
10829 	u32 len;
10830 
10831 	if (!arg->num_vdev_bitmap && !arg->num_link_entry) {
10832 		ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry");
10833 		return -EINVAL;
10834 	}
10835 
10836 	switch (arg->force_mode) {
10837 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM:
10838 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM:
10839 		num_link_num_param = arg->num_link_entry;
10840 		fallthrough;
10841 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE:
10842 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE:
10843 	case WMI_MLO_LINK_FORCE_MODE_NO_FORCE:
10844 		num_vdev_bitmap = arg->num_vdev_bitmap;
10845 		break;
10846 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE:
10847 		num_vdev_bitmap = arg->num_vdev_bitmap;
10848 		num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap;
10849 		break;
10850 	default:
10851 		ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode);
10852 		return -EINVAL;
10853 	}
10854 
10855 	num_disallow_mode_comb = arg->num_disallow_mode_comb;
10856 	len = sizeof(*cmd) +
10857 	      TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param +
10858 	      TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap +
10859 	      TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE +
10860 	      TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb;
10861 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE)
10862 		len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
10863 
10864 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
10865 	if (!skb)
10866 		return -ENOMEM;
10867 
10868 	cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data;
10869 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD,
10870 						 sizeof(*cmd));
10871 	cmd->force_mode = cpu_to_le32(arg->force_mode);
10872 	cmd->reason = cpu_to_le32(arg->reason);
10873 	ath12k_dbg(ab, ATH12K_DBG_WMI,
10874 		   "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d",
10875 		   arg->force_mode, arg->reason, num_link_num_param,
10876 		   num_vdev_bitmap, num_inactive_vdev_bitmap,
10877 		   num_disallow_mode_comb);
10878 
10879 	buf_ptr = skb->data + sizeof(*cmd);
10880 	tlv = buf_ptr;
10881 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10882 					 sizeof(*link_num_param) * num_link_num_param);
10883 	buf_ptr += TLV_HDR_SIZE;
10884 
10885 	if (num_link_num_param) {
10886 		cmd->ctrl_flags =
10887 			le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0,
10888 					 CRTL_F_DYNC_FORCE_LINK_NUM);
10889 
10890 		link_num_param = buf_ptr;
10891 		for (i = 0; i < num_link_num_param; i++) {
10892 			link_num_param->tlv_header =
10893 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param));
10894 			link_num_param->num_of_link =
10895 				cpu_to_le32(arg->link_num[i].num_of_link);
10896 			link_num_param->vdev_type =
10897 				cpu_to_le32(arg->link_num[i].vdev_type);
10898 			link_num_param->vdev_subtype =
10899 				cpu_to_le32(arg->link_num[i].vdev_subtype);
10900 			link_num_param->home_freq =
10901 				cpu_to_le32(arg->link_num[i].home_freq);
10902 			ath12k_dbg(ab, ATH12K_DBG_WMI,
10903 				   "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d",
10904 				   i, arg->link_num[i].num_of_link,
10905 				   arg->link_num[i].vdev_type,
10906 				   arg->link_num[i].vdev_subtype,
10907 				   arg->link_num[i].home_freq,
10908 				   __le32_to_cpu(cmd->ctrl_flags));
10909 			link_num_param++;
10910 		}
10911 
10912 		buf_ptr += sizeof(*link_num_param) * num_link_num_param;
10913 	}
10914 
10915 	tlv = buf_ptr;
10916 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
10917 					 sizeof(*vdev_bitmap) * num_vdev_bitmap);
10918 	buf_ptr += TLV_HDR_SIZE;
10919 
10920 	if (num_vdev_bitmap) {
10921 		vdev_bitmap = buf_ptr;
10922 		for (i = 0; i < num_vdev_bitmap; i++) {
10923 			vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]);
10924 			ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x",
10925 				   i, arg->vdev_bitmap[i]);
10926 		}
10927 
10928 		buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap;
10929 	}
10930 
10931 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
10932 		tlv = buf_ptr;
10933 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
10934 						 sizeof(*vdev_bitmap) *
10935 						 num_inactive_vdev_bitmap);
10936 		buf_ptr += TLV_HDR_SIZE;
10937 
10938 		if (num_inactive_vdev_bitmap) {
10939 			vdev_bitmap = buf_ptr;
10940 			for (i = 0; i < num_inactive_vdev_bitmap; i++) {
10941 				vdev_bitmap[i] =
10942 					cpu_to_le32(arg->inactive_vdev_bitmap[i]);
10943 				ath12k_dbg(ab, ATH12K_DBG_WMI,
10944 					   "entry %d inactive_vdev_id_bitmap 0x%x",
10945 					    i, arg->inactive_vdev_bitmap[i]);
10946 			}
10947 
10948 			buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
10949 		}
10950 	} else {
10951 		/* add empty vdev bitmap2 tlv */
10952 		tlv = buf_ptr;
10953 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10954 		buf_ptr += TLV_HDR_SIZE;
10955 	}
10956 
10957 	/* add empty ieee_link_id_bitmap tlv */
10958 	tlv = buf_ptr;
10959 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10960 	buf_ptr += TLV_HDR_SIZE;
10961 
10962 	/* add empty ieee_link_id_bitmap2 tlv */
10963 	tlv = buf_ptr;
10964 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10965 	buf_ptr += TLV_HDR_SIZE;
10966 
10967 	tlv = buf_ptr;
10968 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10969 					 sizeof(*disallowed_mode_bmap) *
10970 					 arg->num_disallow_mode_comb);
10971 	buf_ptr += TLV_HDR_SIZE;
10972 
10973 	ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg);
10974 	if (ret)
10975 		goto free_skb;
10976 
10977 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID);
10978 	if (ret) {
10979 		ath12k_warn(ab,
10980 			    "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret);
10981 		goto free_skb;
10982 	}
10983 
10984 	ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd");
10985 
10986 	return ret;
10987 
10988 free_skb:
10989 	dev_kfree_skb(skb);
10990 	return ret;
10991 }
10992