xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 6397b92bbb00f7cda024056c8c8a10594a27ccaa)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debugfs.h"
19 #include "debug.h"
20 #include "mac.h"
21 #include "hw.h"
22 #include "peer.h"
23 #include "p2p.h"
24 #include "testmode.h"
25 
26 struct ath12k_wmi_svc_ready_parse {
27 	bool wmi_svc_bitmap_done;
28 };
29 
30 struct wmi_tlv_fw_stats_parse {
31 	const struct wmi_stats_event *ev;
32 	struct ath12k_fw_stats *stats;
33 };
34 
35 struct ath12k_wmi_dma_ring_caps_parse {
36 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
37 	u32 n_dma_ring_caps;
38 };
39 
40 struct ath12k_wmi_service_ext_arg {
41 	u32 default_conc_scan_config_bits;
42 	u32 default_fw_config_bits;
43 	struct ath12k_wmi_ppe_threshold_arg ppet;
44 	u32 he_cap_info;
45 	u32 mpdu_density;
46 	u32 max_bssid_rx_filters;
47 	u32 num_hw_modes;
48 	u32 num_phy;
49 };
50 
51 struct ath12k_wmi_svc_rdy_ext_parse {
52 	struct ath12k_wmi_service_ext_arg arg;
53 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
54 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
55 	u32 n_hw_mode_caps;
56 	u32 tot_phy_id;
57 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
58 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
59 	u32 n_mac_phy_caps;
60 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
61 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
62 	u32 n_ext_hal_reg_caps;
63 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
64 	bool hw_mode_done;
65 	bool mac_phy_done;
66 	bool ext_hal_reg_done;
67 	bool mac_phy_chainmask_combo_done;
68 	bool mac_phy_chainmask_cap_done;
69 	bool oem_dma_ring_cap_done;
70 	bool dma_ring_cap_done;
71 };
72 
73 struct ath12k_wmi_svc_rdy_ext2_arg {
74 	u32 reg_db_version;
75 	u32 hw_min_max_tx_power_2ghz;
76 	u32 hw_min_max_tx_power_5ghz;
77 	u32 chwidth_num_peer_caps;
78 	u32 preamble_puncture_bw;
79 	u32 max_user_per_ppdu_ofdma;
80 	u32 max_user_per_ppdu_mumimo;
81 	u32 target_cap_flags;
82 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
83 	u32 max_num_linkview_peers;
84 	u32 max_num_msduq_supported_per_tid;
85 	u32 default_num_msduq_supported_per_tid;
86 };
87 
88 struct ath12k_wmi_svc_rdy_ext2_parse {
89 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
90 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
91 	bool dma_ring_cap_done;
92 	bool spectral_bin_scaling_done;
93 	bool mac_phy_caps_ext_done;
94 	bool hal_reg_caps_ext2_done;
95 	bool scan_radio_caps_ext2_done;
96 	bool twt_caps_done;
97 	bool htt_msdu_idx_to_qtype_map_done;
98 	bool dbs_or_sbs_cap_ext_done;
99 };
100 
101 struct ath12k_wmi_rdy_parse {
102 	u32 num_extra_mac_addr;
103 };
104 
105 struct ath12k_wmi_dma_buf_release_arg {
106 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
107 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
108 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
109 	u32 num_buf_entry;
110 	u32 num_meta;
111 	bool buf_entry_done;
112 	bool meta_data_done;
113 };
114 
115 struct ath12k_wmi_tlv_policy {
116 	size_t min_len;
117 };
118 
119 struct wmi_tlv_mgmt_rx_parse {
120 	const struct ath12k_wmi_mgmt_rx_params *fixed;
121 	const u8 *frame_buf;
122 	bool frame_buf_done;
123 };
124 
125 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
126 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
127 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
128 	[WMI_TAG_SERVICE_READY_EVENT] = {
129 		.min_len = sizeof(struct wmi_service_ready_event) },
130 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
131 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
132 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
133 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
134 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
135 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
136 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
137 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
138 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
139 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
140 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
141 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
142 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
143 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
144 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
145 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
146 	[WMI_TAG_MGMT_RX_HDR] = {
147 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
148 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
149 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
150 	[WMI_TAG_SCAN_EVENT] = {
151 		.min_len = sizeof(struct wmi_scan_event) },
152 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
153 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
154 	[WMI_TAG_ROAM_EVENT] = {
155 		.min_len = sizeof(struct wmi_roam_event) },
156 	[WMI_TAG_CHAN_INFO_EVENT] = {
157 		.min_len = sizeof(struct wmi_chan_info_event) },
158 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
159 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
160 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
161 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
162 	[WMI_TAG_READY_EVENT] = {
163 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
164 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
165 		.min_len = sizeof(struct wmi_service_available_event) },
166 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
167 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
168 	[WMI_TAG_RFKILL_EVENT] = {
169 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
170 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
171 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
172 	[WMI_TAG_HOST_SWFDA_EVENT] = {
173 		.min_len = sizeof(struct wmi_fils_discovery_event) },
174 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
175 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
176 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
177 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
178 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
179 		.min_len = sizeof(struct wmi_twt_enable_event) },
180 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
181 		.min_len = sizeof(struct wmi_twt_disable_event) },
182 	[WMI_TAG_P2P_NOA_INFO] = {
183 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
184 	[WMI_TAG_P2P_NOA_EVENT] = {
185 		.min_len = sizeof(struct wmi_p2p_noa_event) },
186 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
187 		.min_len = sizeof(struct wmi_11d_new_cc_event) },
188 };
189 
190 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
191 {
192 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
193 		le32_encode_bits(len, WMI_TLV_LEN);
194 }
195 
196 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
197 {
198 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
199 }
200 
201 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
202 			     struct ath12k_wmi_resource_config_arg *config)
203 {
204 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
205 	config->num_peers = ab->num_radios *
206 		ath12k_core_get_max_peers_per_radio(ab);
207 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
208 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
209 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
210 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
211 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
212 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
213 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
214 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
215 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
216 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
217 
218 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
219 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
220 	else
221 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
222 
223 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
224 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
225 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
226 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
227 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
228 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
229 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
230 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
231 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
232 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
233 	config->rx_skip_defrag_timeout_dup_detection_check =
234 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
235 	config->vow_config = TARGET_VOW_CONFIG;
236 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
237 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
238 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
239 	config->rx_batchmode = TARGET_RX_BATCHMODE;
240 	/* Indicates host supports peer map v3 and unmap v2 support */
241 	config->peer_map_unmap_version = 0x32;
242 	config->twt_ap_pdev_count = ab->num_radios;
243 	config->twt_ap_sta_count = 1000;
244 	config->ema_max_vap_cnt = ab->num_radios;
245 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
246 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
247 
248 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
249 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
250 }
251 
252 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
253 			     struct ath12k_wmi_resource_config_arg *config)
254 {
255 	config->num_vdevs = 4;
256 	config->num_peers = 16;
257 	config->num_tids = 32;
258 
259 	config->num_offload_peers = 3;
260 	config->num_offload_reorder_buffs = 3;
261 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
262 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
263 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
264 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
265 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
266 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
267 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
268 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
269 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
270 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
271 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
272 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
273 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
274 	config->num_mcast_groups = 0;
275 	config->num_mcast_table_elems = 0;
276 	config->mcast2ucast_mode = 0;
277 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
278 	config->num_wds_entries = 0;
279 	config->dma_burst_size = 0;
280 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
281 	config->vow_config = TARGET_VOW_CONFIG;
282 	config->gtk_offload_max_vdev = 2;
283 	config->num_msdu_desc = 0x400;
284 	config->beacon_tx_offload_max_vdev = 2;
285 	config->rx_batchmode = TARGET_RX_BATCHMODE;
286 
287 	config->peer_map_unmap_version = 0x1;
288 	config->use_pdev_id = 1;
289 	config->max_frag_entries = 0xa;
290 	config->num_tdls_vdevs = 0x1;
291 	config->num_tdls_conn_table_entries = 8;
292 	config->beacon_tx_offload_max_vdev = 0x2;
293 	config->num_multicast_filter_entries = 0x20;
294 	config->num_wow_filters = 0x16;
295 	config->num_keep_alive_pattern = 0;
296 }
297 
298 #define PRIMAP(_hw_mode_) \
299 	[_hw_mode_] = _hw_mode_##_PRI
300 
301 static const int ath12k_hw_mode_pri_map[] = {
302 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
303 	PRIMAP(WMI_HOST_HW_MODE_DBS),
304 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
305 	PRIMAP(WMI_HOST_HW_MODE_SBS),
306 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
307 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
308 	/* keep last */
309 	PRIMAP(WMI_HOST_HW_MODE_MAX),
310 };
311 
312 static int
313 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
314 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
315 				const void *ptr, void *data),
316 		    void *data)
317 {
318 	const void *begin = ptr;
319 	const struct wmi_tlv *tlv;
320 	u16 tlv_tag, tlv_len;
321 	int ret;
322 
323 	while (len > 0) {
324 		if (len < sizeof(*tlv)) {
325 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
326 				   ptr - begin, len, sizeof(*tlv));
327 			return -EINVAL;
328 		}
329 
330 		tlv = ptr;
331 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
332 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
333 		ptr += sizeof(*tlv);
334 		len -= sizeof(*tlv);
335 
336 		if (tlv_len > len) {
337 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
338 				   tlv_tag, ptr - begin, len, tlv_len);
339 			return -EINVAL;
340 		}
341 
342 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
343 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
344 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
345 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
346 				   tlv_tag, ptr - begin, tlv_len,
347 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
348 			return -EINVAL;
349 		}
350 
351 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
352 		if (ret)
353 			return ret;
354 
355 		ptr += tlv_len;
356 		len -= tlv_len;
357 	}
358 
359 	return 0;
360 }
361 
362 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
363 				     const void *ptr, void *data)
364 {
365 	const void **tb = data;
366 
367 	if (tag < WMI_TAG_MAX)
368 		tb[tag] = ptr;
369 
370 	return 0;
371 }
372 
373 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
374 				const void *ptr, size_t len)
375 {
376 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
377 				   (void *)tb);
378 }
379 
380 static const void **
381 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
382 			   struct sk_buff *skb, gfp_t gfp)
383 {
384 	const void **tb;
385 	int ret;
386 
387 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
388 	if (!tb)
389 		return ERR_PTR(-ENOMEM);
390 
391 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
392 	if (ret) {
393 		kfree(tb);
394 		return ERR_PTR(ret);
395 	}
396 
397 	return tb;
398 }
399 
400 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
401 				      u32 cmd_id)
402 {
403 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
404 	struct ath12k_base *ab = wmi->wmi_ab->ab;
405 	struct wmi_cmd_hdr *cmd_hdr;
406 	int ret;
407 
408 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
409 		return -ENOMEM;
410 
411 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
412 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
413 
414 	memset(skb_cb, 0, sizeof(*skb_cb));
415 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
416 
417 	if (ret)
418 		goto err_pull;
419 
420 	return 0;
421 
422 err_pull:
423 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
424 	return ret;
425 }
426 
427 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
428 			u32 cmd_id)
429 {
430 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
431 	int ret = -EOPNOTSUPP;
432 
433 	might_sleep();
434 
435 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
436 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
437 
438 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
439 			ret = -ESHUTDOWN;
440 
441 		(ret != -EAGAIN);
442 	}), WMI_SEND_TIMEOUT_HZ);
443 
444 	if (ret == -EAGAIN)
445 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
446 
447 	return ret;
448 }
449 
450 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
451 				     const void *ptr,
452 				     struct ath12k_wmi_service_ext_arg *arg)
453 {
454 	const struct wmi_service_ready_ext_event *ev = ptr;
455 	int i;
456 
457 	if (!ev)
458 		return -EINVAL;
459 
460 	/* Move this to host based bitmap */
461 	arg->default_conc_scan_config_bits =
462 		le32_to_cpu(ev->default_conc_scan_config_bits);
463 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
464 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
465 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
466 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
467 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
468 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
469 
470 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
471 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
472 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
473 
474 	return 0;
475 }
476 
477 static int
478 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
479 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
480 				      u8 hw_mode_id, u8 phy_id,
481 				      struct ath12k_pdev *pdev)
482 {
483 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
484 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
485 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
486 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
487 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
488 	struct ath12k_band_cap *cap_band;
489 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
490 	struct ath12k_fw_pdev *fw_pdev;
491 	u32 phy_map;
492 	u32 hw_idx, phy_idx = 0;
493 	int i;
494 
495 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
496 		return -EINVAL;
497 
498 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
499 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
500 			break;
501 
502 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
503 		phy_idx = fls(phy_map);
504 	}
505 
506 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
507 		return -EINVAL;
508 
509 	phy_idx += phy_id;
510 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
511 		return -EINVAL;
512 
513 	mac_caps = wmi_mac_phy_caps + phy_idx;
514 
515 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
516 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
517 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
518 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
519 
520 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
521 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
522 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
523 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
524 	ab->fw_pdev_count++;
525 
526 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
527 	 * band to band for a single radio, need to see how this should be
528 	 * handled.
529 	 */
530 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
531 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
532 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
533 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
534 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
535 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
536 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
537 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
538 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
539 		pdev_cap->nss_ratio_enabled =
540 			WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
541 		pdev_cap->nss_ratio_info =
542 			WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
543 	} else {
544 		return -EINVAL;
545 	}
546 
547 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
548 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
549 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
550 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
551 	 * will be advertised for second mac or vice-versa. Compute the shift value
552 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
553 	 * mac80211.
554 	 */
555 	pdev_cap->tx_chain_mask_shift =
556 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
557 	pdev_cap->rx_chain_mask_shift =
558 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
559 
560 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
561 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
562 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
563 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
564 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
565 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
566 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
567 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
568 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
569 			cap_band->he_cap_phy_info[i] =
570 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
571 
572 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
573 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
574 
575 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
576 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
577 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
578 	}
579 
580 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
581 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
582 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
583 		cap_band->max_bw_supported =
584 			le32_to_cpu(mac_caps->max_bw_supported_5g);
585 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
586 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
587 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
588 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
589 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
590 			cap_band->he_cap_phy_info[i] =
591 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
592 
593 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
594 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
595 
596 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
597 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
598 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
599 
600 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
601 		cap_band->max_bw_supported =
602 			le32_to_cpu(mac_caps->max_bw_supported_5g);
603 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
604 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
605 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
606 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
607 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
608 			cap_band->he_cap_phy_info[i] =
609 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
610 
611 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
612 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
613 
614 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
615 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
616 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
617 	}
618 
619 	return 0;
620 }
621 
622 static int
623 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
624 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
625 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
626 				u8 phy_idx,
627 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
628 {
629 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
630 
631 	if (!reg_caps || !ext_caps)
632 		return -EINVAL;
633 
634 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
635 		return -EINVAL;
636 
637 	ext_reg_cap = &ext_caps[phy_idx];
638 
639 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
640 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
641 	param->eeprom_reg_domain_ext =
642 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
643 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
644 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
645 	/* check if param->wireless_mode is needed */
646 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
647 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
648 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
649 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
650 
651 	return 0;
652 }
653 
654 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
655 					 const void *evt_buf,
656 					 struct ath12k_wmi_target_cap_arg *cap)
657 {
658 	const struct wmi_service_ready_event *ev = evt_buf;
659 
660 	if (!ev) {
661 		ath12k_err(ab, "%s: failed by NULL param\n",
662 			   __func__);
663 		return -EINVAL;
664 	}
665 
666 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
667 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
668 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
669 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
670 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
671 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
672 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
673 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
674 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
675 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
676 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
677 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
678 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
679 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
680 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
681 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
682 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
683 
684 	return 0;
685 }
686 
687 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
688  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
689  * 4-byte word.
690  */
691 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
692 					   const u32 *wmi_svc_bm)
693 {
694 	int i, j;
695 
696 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
697 		do {
698 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
699 				set_bit(j, wmi->wmi_ab->svc_map);
700 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
701 	}
702 }
703 
704 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
705 				    const void *ptr, void *data)
706 {
707 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
708 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
709 	u16 expect_len;
710 
711 	switch (tag) {
712 	case WMI_TAG_SERVICE_READY_EVENT:
713 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
714 			return -EINVAL;
715 		break;
716 
717 	case WMI_TAG_ARRAY_UINT32:
718 		if (!svc_ready->wmi_svc_bitmap_done) {
719 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
720 			if (len < expect_len) {
721 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
722 					    len, tag);
723 				return -EINVAL;
724 			}
725 
726 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
727 
728 			svc_ready->wmi_svc_bitmap_done = true;
729 		}
730 		break;
731 	default:
732 		break;
733 	}
734 
735 	return 0;
736 }
737 
738 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
739 {
740 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
741 	int ret;
742 
743 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
744 				  ath12k_wmi_svc_rdy_parse,
745 				  &svc_ready);
746 	if (ret) {
747 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
748 		return ret;
749 	}
750 
751 	return 0;
752 }
753 
754 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
755 				    struct ieee80211_tx_info *info)
756 {
757 	struct ath12k_base *ab = ar->ab;
758 	u32 freq = 0;
759 
760 	if (ab->hw_params->single_pdev_only &&
761 	    ar->scan.is_roc &&
762 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
763 		freq = ar->scan.roc_freq;
764 
765 	return freq;
766 }
767 
768 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
769 {
770 	struct sk_buff *skb;
771 	struct ath12k_base *ab = wmi_ab->ab;
772 	u32 round_len = roundup(len, 4);
773 
774 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
775 	if (!skb)
776 		return NULL;
777 
778 	skb_reserve(skb, WMI_SKB_HEADROOM);
779 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
780 		ath12k_warn(ab, "unaligned WMI skb data\n");
781 
782 	skb_put(skb, round_len);
783 	memset(skb->data, 0, round_len);
784 
785 	return skb;
786 }
787 
788 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
789 			 struct sk_buff *frame)
790 {
791 	struct ath12k_wmi_pdev *wmi = ar->wmi;
792 	struct wmi_mgmt_send_cmd *cmd;
793 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
794 	struct wmi_tlv *frame_tlv;
795 	struct sk_buff *skb;
796 	u32 buf_len;
797 	int ret, len;
798 
799 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
800 
801 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
802 
803 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
804 	if (!skb)
805 		return -ENOMEM;
806 
807 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
808 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
809 						 sizeof(*cmd));
810 	cmd->vdev_id = cpu_to_le32(vdev_id);
811 	cmd->desc_id = cpu_to_le32(buf_id);
812 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
813 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
814 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
815 	cmd->frame_len = cpu_to_le32(frame->len);
816 	cmd->buf_len = cpu_to_le32(buf_len);
817 	cmd->tx_params_valid = 0;
818 
819 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
820 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
821 
822 	memcpy(frame_tlv->value, frame->data, buf_len);
823 
824 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
825 	if (ret) {
826 		ath12k_warn(ar->ab,
827 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
828 		dev_kfree_skb(skb);
829 	}
830 
831 	return ret;
832 }
833 
834 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
835 				      u32 vdev_id, u32 pdev_id)
836 {
837 	struct ath12k_wmi_pdev *wmi = ar->wmi;
838 	struct wmi_request_stats_cmd *cmd;
839 	struct sk_buff *skb;
840 	int ret;
841 
842 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
843 	if (!skb)
844 		return -ENOMEM;
845 
846 	cmd = (struct wmi_request_stats_cmd *)skb->data;
847 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
848 						 sizeof(*cmd));
849 
850 	cmd->stats_id = cpu_to_le32(stats_id);
851 	cmd->vdev_id = cpu_to_le32(vdev_id);
852 	cmd->pdev_id = cpu_to_le32(pdev_id);
853 
854 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
855 	if (ret) {
856 		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
857 		dev_kfree_skb(skb);
858 	}
859 
860 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
861 		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
862 		   stats_id, vdev_id, pdev_id);
863 
864 	return ret;
865 }
866 
867 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
868 			   struct ath12k_wmi_vdev_create_arg *args)
869 {
870 	struct ath12k_wmi_pdev *wmi = ar->wmi;
871 	struct wmi_vdev_create_cmd *cmd;
872 	struct sk_buff *skb;
873 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
874 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
875 	struct wmi_vdev_create_mlo_params *ml_params;
876 	struct wmi_tlv *tlv;
877 	int ret, len;
878 	void *ptr;
879 
880 	/* It can be optimized my sending tx/rx chain configuration
881 	 * only for supported bands instead of always sending it for
882 	 * both the bands.
883 	 */
884 	len = sizeof(*cmd) + TLV_HDR_SIZE +
885 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
886 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
887 
888 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
889 	if (!skb)
890 		return -ENOMEM;
891 
892 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
893 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
894 						 sizeof(*cmd));
895 
896 	cmd->vdev_id = cpu_to_le32(args->if_id);
897 	cmd->vdev_type = cpu_to_le32(args->type);
898 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
899 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
900 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
901 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
902 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
903 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
904 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
905 
906 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
907 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
908 
909 	ptr = skb->data + sizeof(*cmd);
910 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
911 
912 	tlv = ptr;
913 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
914 
915 	ptr += TLV_HDR_SIZE;
916 	txrx_streams = ptr;
917 	len = sizeof(*txrx_streams);
918 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
919 							  len);
920 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
921 	txrx_streams->supported_tx_streams =
922 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
923 	txrx_streams->supported_rx_streams =
924 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
925 
926 	txrx_streams++;
927 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
928 							  len);
929 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
930 	txrx_streams->supported_tx_streams =
931 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
932 	txrx_streams->supported_rx_streams =
933 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
934 
935 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
936 
937 	if (is_ml_vdev) {
938 		tlv = ptr;
939 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
940 						 sizeof(*ml_params));
941 		ptr += TLV_HDR_SIZE;
942 		ml_params = ptr;
943 
944 		ml_params->tlv_header =
945 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
946 					       sizeof(*ml_params));
947 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
948 	}
949 
950 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
951 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
952 		   args->if_id, args->type, args->subtype,
953 		   macaddr, args->pdev_id);
954 
955 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
956 	if (ret) {
957 		ath12k_warn(ar->ab,
958 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
959 		dev_kfree_skb(skb);
960 	}
961 
962 	return ret;
963 }
964 
965 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
966 {
967 	struct ath12k_wmi_pdev *wmi = ar->wmi;
968 	struct wmi_vdev_delete_cmd *cmd;
969 	struct sk_buff *skb;
970 	int ret;
971 
972 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
973 	if (!skb)
974 		return -ENOMEM;
975 
976 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
977 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
978 						 sizeof(*cmd));
979 	cmd->vdev_id = cpu_to_le32(vdev_id);
980 
981 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
982 
983 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
984 	if (ret) {
985 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
986 		dev_kfree_skb(skb);
987 	}
988 
989 	return ret;
990 }
991 
992 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
993 {
994 	struct ath12k_wmi_pdev *wmi = ar->wmi;
995 	struct wmi_vdev_stop_cmd *cmd;
996 	struct sk_buff *skb;
997 	int ret;
998 
999 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1000 	if (!skb)
1001 		return -ENOMEM;
1002 
1003 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1004 
1005 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
1006 						 sizeof(*cmd));
1007 	cmd->vdev_id = cpu_to_le32(vdev_id);
1008 
1009 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
1010 
1011 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
1012 	if (ret) {
1013 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
1014 		dev_kfree_skb(skb);
1015 	}
1016 
1017 	return ret;
1018 }
1019 
1020 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
1021 {
1022 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1023 	struct wmi_vdev_down_cmd *cmd;
1024 	struct sk_buff *skb;
1025 	int ret;
1026 
1027 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1028 	if (!skb)
1029 		return -ENOMEM;
1030 
1031 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1032 
1033 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
1034 						 sizeof(*cmd));
1035 	cmd->vdev_id = cpu_to_le32(vdev_id);
1036 
1037 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
1038 
1039 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
1040 	if (ret) {
1041 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
1042 		dev_kfree_skb(skb);
1043 	}
1044 
1045 	return ret;
1046 }
1047 
1048 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
1049 				       struct wmi_vdev_start_req_arg *arg)
1050 {
1051 	u32 center_freq1 = arg->band_center_freq1;
1052 
1053 	memset(chan, 0, sizeof(*chan));
1054 
1055 	chan->mhz = cpu_to_le32(arg->freq);
1056 	chan->band_center_freq1 = cpu_to_le32(center_freq1);
1057 	if (arg->mode == MODE_11BE_EHT320) {
1058 		if (arg->freq > center_freq1)
1059 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
1060 		else
1061 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
1062 
1063 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1064 
1065 	} else if (arg->mode == MODE_11BE_EHT160 ||
1066 		   arg->mode == MODE_11AX_HE160) {
1067 		if (arg->freq > center_freq1)
1068 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
1069 		else
1070 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
1071 
1072 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1073 	} else {
1074 		chan->band_center_freq2 = 0;
1075 	}
1076 
1077 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1078 	if (arg->passive)
1079 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1080 	if (arg->allow_ibss)
1081 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1082 	if (arg->allow_ht)
1083 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1084 	if (arg->allow_vht)
1085 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1086 	if (arg->allow_he)
1087 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1088 	if (arg->ht40plus)
1089 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1090 	if (arg->chan_radar)
1091 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1092 	if (arg->freq2_radar)
1093 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1094 
1095 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1096 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1097 		le32_encode_bits(arg->max_reg_power,
1098 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1099 
1100 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1101 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1102 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1103 }
1104 
1105 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1106 			  bool restart)
1107 {
1108 	struct wmi_vdev_start_mlo_params *ml_params;
1109 	struct wmi_partner_link_info *partner_info;
1110 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1111 	struct wmi_vdev_start_request_cmd *cmd;
1112 	struct sk_buff *skb;
1113 	struct ath12k_wmi_channel_params *chan;
1114 	struct wmi_tlv *tlv;
1115 	void *ptr;
1116 	int ret, len, i, ml_arg_size = 0;
1117 
1118 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1119 		return -EINVAL;
1120 
1121 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1122 
1123 	if (!restart && arg->ml.enabled) {
1124 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1125 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1126 					      sizeof(*partner_info));
1127 		len += ml_arg_size;
1128 	}
1129 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1130 	if (!skb)
1131 		return -ENOMEM;
1132 
1133 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1134 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1135 						 sizeof(*cmd));
1136 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1137 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1138 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1139 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1140 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1141 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1142 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1143 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1144 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1145 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1146 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1147 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1148 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1149 
1150 	if (!restart) {
1151 		if (arg->ssid) {
1152 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1153 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1154 		}
1155 		if (arg->hidden_ssid)
1156 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1157 		if (arg->pmf_enabled)
1158 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1159 	}
1160 
1161 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1162 
1163 	ptr = skb->data + sizeof(*cmd);
1164 	chan = ptr;
1165 
1166 	ath12k_wmi_put_wmi_channel(chan, arg);
1167 
1168 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1169 						  sizeof(*chan));
1170 	ptr += sizeof(*chan);
1171 
1172 	tlv = ptr;
1173 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1174 
1175 	/* Note: This is a nested TLV containing:
1176 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1177 	 */
1178 
1179 	ptr += sizeof(*tlv);
1180 
1181 	if (ml_arg_size) {
1182 		tlv = ptr;
1183 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1184 						 sizeof(*ml_params));
1185 		ptr += TLV_HDR_SIZE;
1186 
1187 		ml_params = ptr;
1188 
1189 		ml_params->tlv_header =
1190 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1191 					       sizeof(*ml_params));
1192 
1193 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1194 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1195 				   le32_encode_bits(arg->ml.assoc_link,
1196 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1197 				   le32_encode_bits(arg->ml.mcast_link,
1198 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1199 				   le32_encode_bits(arg->ml.link_add,
1200 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1201 
1202 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1203 			   arg->vdev_id, ml_params->flags);
1204 
1205 		ptr += sizeof(*ml_params);
1206 
1207 		tlv = ptr;
1208 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1209 						 arg->ml.num_partner_links *
1210 						 sizeof(*partner_info));
1211 		ptr += TLV_HDR_SIZE;
1212 
1213 		partner_info = ptr;
1214 
1215 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1216 			partner_info->tlv_header =
1217 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1218 						       sizeof(*partner_info));
1219 			partner_info->vdev_id =
1220 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1221 			partner_info->hw_link_id =
1222 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1223 			ether_addr_copy(partner_info->vdev_addr.addr,
1224 					arg->ml.partner_info[i].addr);
1225 
1226 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1227 				   partner_info->vdev_id, partner_info->hw_link_id,
1228 				   partner_info->vdev_addr.addr);
1229 
1230 			partner_info++;
1231 		}
1232 
1233 		ptr = partner_info;
1234 	}
1235 
1236 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1237 		   restart ? "restart" : "start", arg->vdev_id,
1238 		   arg->freq, arg->mode);
1239 
1240 	if (restart)
1241 		ret = ath12k_wmi_cmd_send(wmi, skb,
1242 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1243 	else
1244 		ret = ath12k_wmi_cmd_send(wmi, skb,
1245 					  WMI_VDEV_START_REQUEST_CMDID);
1246 	if (ret) {
1247 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1248 			    restart ? "restart" : "start");
1249 		dev_kfree_skb(skb);
1250 	}
1251 
1252 	return ret;
1253 }
1254 
1255 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1256 {
1257 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1258 	struct wmi_vdev_up_cmd *cmd;
1259 	struct sk_buff *skb;
1260 	int ret;
1261 
1262 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1263 	if (!skb)
1264 		return -ENOMEM;
1265 
1266 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1267 
1268 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1269 						 sizeof(*cmd));
1270 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1271 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1272 
1273 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1274 
1275 	if (params->tx_bssid) {
1276 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1277 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1278 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1279 	}
1280 
1281 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1282 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1283 		   params->vdev_id, params->aid, params->bssid);
1284 
1285 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1286 	if (ret) {
1287 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1288 		dev_kfree_skb(skb);
1289 	}
1290 
1291 	return ret;
1292 }
1293 
1294 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1295 				    struct ath12k_wmi_peer_create_arg *arg)
1296 {
1297 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1298 	struct wmi_peer_create_cmd *cmd;
1299 	struct sk_buff *skb;
1300 	int ret, len;
1301 	struct wmi_peer_create_mlo_params *ml_param;
1302 	void *ptr;
1303 	struct wmi_tlv *tlv;
1304 
1305 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1306 
1307 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1308 	if (!skb)
1309 		return -ENOMEM;
1310 
1311 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1312 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1313 						 sizeof(*cmd));
1314 
1315 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1316 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1317 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1318 
1319 	ptr = skb->data + sizeof(*cmd);
1320 	tlv = ptr;
1321 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1322 					 sizeof(*ml_param));
1323 	ptr += TLV_HDR_SIZE;
1324 	ml_param = ptr;
1325 	ml_param->tlv_header =
1326 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1327 					       sizeof(*ml_param));
1328 	if (arg->ml_enabled)
1329 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1330 
1331 	ptr += sizeof(*ml_param);
1332 
1333 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1334 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1335 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1336 
1337 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1338 	if (ret) {
1339 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1340 		dev_kfree_skb(skb);
1341 	}
1342 
1343 	return ret;
1344 }
1345 
1346 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1347 				    const u8 *peer_addr, u8 vdev_id)
1348 {
1349 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1350 	struct wmi_peer_delete_cmd *cmd;
1351 	struct sk_buff *skb;
1352 	int ret;
1353 
1354 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1355 	if (!skb)
1356 		return -ENOMEM;
1357 
1358 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1359 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1360 						 sizeof(*cmd));
1361 
1362 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1363 	cmd->vdev_id = cpu_to_le32(vdev_id);
1364 
1365 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1366 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1367 		   vdev_id,  peer_addr);
1368 
1369 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1370 	if (ret) {
1371 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1372 		dev_kfree_skb(skb);
1373 	}
1374 
1375 	return ret;
1376 }
1377 
1378 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1379 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1380 {
1381 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1382 	struct wmi_pdev_set_regdomain_cmd *cmd;
1383 	struct sk_buff *skb;
1384 	int ret;
1385 
1386 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1387 	if (!skb)
1388 		return -ENOMEM;
1389 
1390 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1391 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1392 						 sizeof(*cmd));
1393 
1394 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1395 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1396 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1397 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1398 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1399 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1400 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1401 
1402 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1403 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1404 		   arg->current_rd_in_use, arg->current_rd_2g,
1405 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1406 
1407 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1408 	if (ret) {
1409 		ath12k_warn(ar->ab,
1410 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1411 		dev_kfree_skb(skb);
1412 	}
1413 
1414 	return ret;
1415 }
1416 
1417 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1418 			      u32 vdev_id, u32 param_id, u32 param_val)
1419 {
1420 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1421 	struct wmi_peer_set_param_cmd *cmd;
1422 	struct sk_buff *skb;
1423 	int ret;
1424 
1425 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1426 	if (!skb)
1427 		return -ENOMEM;
1428 
1429 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1430 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1431 						 sizeof(*cmd));
1432 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1433 	cmd->vdev_id = cpu_to_le32(vdev_id);
1434 	cmd->param_id = cpu_to_le32(param_id);
1435 	cmd->param_value = cpu_to_le32(param_val);
1436 
1437 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1438 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1439 		   vdev_id, peer_addr, param_id, param_val);
1440 
1441 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1442 	if (ret) {
1443 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1444 		dev_kfree_skb(skb);
1445 	}
1446 
1447 	return ret;
1448 }
1449 
1450 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1451 					u8 peer_addr[ETH_ALEN],
1452 					u32 peer_tid_bitmap,
1453 					u8 vdev_id)
1454 {
1455 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1456 	struct wmi_peer_flush_tids_cmd *cmd;
1457 	struct sk_buff *skb;
1458 	int ret;
1459 
1460 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1461 	if (!skb)
1462 		return -ENOMEM;
1463 
1464 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1465 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1466 						 sizeof(*cmd));
1467 
1468 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1469 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1470 	cmd->vdev_id = cpu_to_le32(vdev_id);
1471 
1472 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1473 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1474 		   vdev_id, peer_addr, peer_tid_bitmap);
1475 
1476 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1477 	if (ret) {
1478 		ath12k_warn(ar->ab,
1479 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1480 		dev_kfree_skb(skb);
1481 	}
1482 
1483 	return ret;
1484 }
1485 
1486 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1487 					   int vdev_id, const u8 *addr,
1488 					   dma_addr_t paddr, u8 tid,
1489 					   u8 ba_window_size_valid,
1490 					   u32 ba_window_size)
1491 {
1492 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1493 	struct sk_buff *skb;
1494 	int ret;
1495 
1496 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1497 	if (!skb)
1498 		return -ENOMEM;
1499 
1500 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1501 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1502 						 sizeof(*cmd));
1503 
1504 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1505 	cmd->vdev_id = cpu_to_le32(vdev_id);
1506 	cmd->tid = cpu_to_le32(tid);
1507 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1508 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1509 	cmd->queue_no = cpu_to_le32(tid);
1510 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1511 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1512 
1513 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1514 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1515 		   addr, vdev_id, tid);
1516 
1517 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1518 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1519 	if (ret) {
1520 		ath12k_warn(ar->ab,
1521 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1522 		dev_kfree_skb(skb);
1523 	}
1524 
1525 	return ret;
1526 }
1527 
1528 int
1529 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1530 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1531 {
1532 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1533 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1534 	struct sk_buff *skb;
1535 	int ret;
1536 
1537 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1538 	if (!skb)
1539 		return -ENOMEM;
1540 
1541 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1542 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1543 						 sizeof(*cmd));
1544 
1545 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1546 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1547 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1548 
1549 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1550 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1551 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1552 
1553 	ret = ath12k_wmi_cmd_send(wmi, skb,
1554 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1555 	if (ret) {
1556 		ath12k_warn(ar->ab,
1557 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1558 		dev_kfree_skb(skb);
1559 	}
1560 
1561 	return ret;
1562 }
1563 
1564 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1565 			      u32 param_value, u8 pdev_id)
1566 {
1567 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1568 	struct wmi_pdev_set_param_cmd *cmd;
1569 	struct sk_buff *skb;
1570 	int ret;
1571 
1572 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1573 	if (!skb)
1574 		return -ENOMEM;
1575 
1576 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1577 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1578 						 sizeof(*cmd));
1579 	cmd->pdev_id = cpu_to_le32(pdev_id);
1580 	cmd->param_id = cpu_to_le32(param_id);
1581 	cmd->param_value = cpu_to_le32(param_value);
1582 
1583 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1584 		   "WMI pdev set param %d pdev id %d value %d\n",
1585 		   param_id, pdev_id, param_value);
1586 
1587 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1588 	if (ret) {
1589 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1590 		dev_kfree_skb(skb);
1591 	}
1592 
1593 	return ret;
1594 }
1595 
1596 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1597 {
1598 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1599 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1600 	struct sk_buff *skb;
1601 	int ret;
1602 
1603 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1604 	if (!skb)
1605 		return -ENOMEM;
1606 
1607 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1608 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1609 						 sizeof(*cmd));
1610 	cmd->vdev_id = cpu_to_le32(vdev_id);
1611 	cmd->sta_ps_mode = cpu_to_le32(enable);
1612 
1613 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1614 		   "WMI vdev set psmode %d vdev id %d\n",
1615 		   enable, vdev_id);
1616 
1617 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1618 	if (ret) {
1619 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1620 		dev_kfree_skb(skb);
1621 	}
1622 
1623 	return ret;
1624 }
1625 
1626 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1627 			    u32 pdev_id)
1628 {
1629 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1630 	struct wmi_pdev_suspend_cmd *cmd;
1631 	struct sk_buff *skb;
1632 	int ret;
1633 
1634 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1635 	if (!skb)
1636 		return -ENOMEM;
1637 
1638 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1639 
1640 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1641 						 sizeof(*cmd));
1642 
1643 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1644 	cmd->pdev_id = cpu_to_le32(pdev_id);
1645 
1646 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1647 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1648 
1649 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1650 	if (ret) {
1651 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1652 		dev_kfree_skb(skb);
1653 	}
1654 
1655 	return ret;
1656 }
1657 
1658 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1659 {
1660 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1661 	struct wmi_pdev_resume_cmd *cmd;
1662 	struct sk_buff *skb;
1663 	int ret;
1664 
1665 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1666 	if (!skb)
1667 		return -ENOMEM;
1668 
1669 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1670 
1671 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1672 						 sizeof(*cmd));
1673 	cmd->pdev_id = cpu_to_le32(pdev_id);
1674 
1675 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1676 		   "WMI pdev resume pdev id %d\n", pdev_id);
1677 
1678 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1679 	if (ret) {
1680 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1681 		dev_kfree_skb(skb);
1682 	}
1683 
1684 	return ret;
1685 }
1686 
1687 /* TODO FW Support for the cmd is not available yet.
1688  * Can be tested once the command and corresponding
1689  * event is implemented in FW
1690  */
1691 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1692 					  enum wmi_bss_chan_info_req_type type)
1693 {
1694 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1695 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1696 	struct sk_buff *skb;
1697 	int ret;
1698 
1699 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1700 	if (!skb)
1701 		return -ENOMEM;
1702 
1703 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1704 
1705 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1706 						 sizeof(*cmd));
1707 	cmd->req_type = cpu_to_le32(type);
1708 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1709 
1710 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1711 		   "WMI bss chan info req type %d\n", type);
1712 
1713 	ret = ath12k_wmi_cmd_send(wmi, skb,
1714 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1715 	if (ret) {
1716 		ath12k_warn(ar->ab,
1717 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1718 		dev_kfree_skb(skb);
1719 	}
1720 
1721 	return ret;
1722 }
1723 
1724 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1725 					struct ath12k_wmi_ap_ps_arg *arg)
1726 {
1727 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1728 	struct wmi_ap_ps_peer_cmd *cmd;
1729 	struct sk_buff *skb;
1730 	int ret;
1731 
1732 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1733 	if (!skb)
1734 		return -ENOMEM;
1735 
1736 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1737 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1738 						 sizeof(*cmd));
1739 
1740 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1741 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1742 	cmd->param = cpu_to_le32(arg->param);
1743 	cmd->value = cpu_to_le32(arg->value);
1744 
1745 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1746 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1747 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1748 
1749 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1750 	if (ret) {
1751 		ath12k_warn(ar->ab,
1752 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1753 		dev_kfree_skb(skb);
1754 	}
1755 
1756 	return ret;
1757 }
1758 
1759 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1760 				u32 param, u32 param_value)
1761 {
1762 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1763 	struct wmi_sta_powersave_param_cmd *cmd;
1764 	struct sk_buff *skb;
1765 	int ret;
1766 
1767 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1768 	if (!skb)
1769 		return -ENOMEM;
1770 
1771 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1772 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1773 						 sizeof(*cmd));
1774 
1775 	cmd->vdev_id = cpu_to_le32(vdev_id);
1776 	cmd->param = cpu_to_le32(param);
1777 	cmd->value = cpu_to_le32(param_value);
1778 
1779 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1780 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1781 		   vdev_id, param, param_value);
1782 
1783 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1784 	if (ret) {
1785 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1786 		dev_kfree_skb(skb);
1787 	}
1788 
1789 	return ret;
1790 }
1791 
1792 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1793 {
1794 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1795 	struct wmi_force_fw_hang_cmd *cmd;
1796 	struct sk_buff *skb;
1797 	int ret, len;
1798 
1799 	len = sizeof(*cmd);
1800 
1801 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1802 	if (!skb)
1803 		return -ENOMEM;
1804 
1805 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1806 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1807 						 len);
1808 
1809 	cmd->type = cpu_to_le32(type);
1810 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1811 
1812 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1813 
1814 	if (ret) {
1815 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1816 		dev_kfree_skb(skb);
1817 	}
1818 	return ret;
1819 }
1820 
1821 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1822 				  u32 param_id, u32 param_value)
1823 {
1824 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1825 	struct wmi_vdev_set_param_cmd *cmd;
1826 	struct sk_buff *skb;
1827 	int ret;
1828 
1829 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1830 	if (!skb)
1831 		return -ENOMEM;
1832 
1833 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1834 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1835 						 sizeof(*cmd));
1836 
1837 	cmd->vdev_id = cpu_to_le32(vdev_id);
1838 	cmd->param_id = cpu_to_le32(param_id);
1839 	cmd->param_value = cpu_to_le32(param_value);
1840 
1841 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1842 		   "WMI vdev id 0x%x set param %d value %d\n",
1843 		   vdev_id, param_id, param_value);
1844 
1845 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1846 	if (ret) {
1847 		ath12k_warn(ar->ab,
1848 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1849 		dev_kfree_skb(skb);
1850 	}
1851 
1852 	return ret;
1853 }
1854 
1855 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1856 {
1857 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1858 	struct wmi_get_pdev_temperature_cmd *cmd;
1859 	struct sk_buff *skb;
1860 	int ret;
1861 
1862 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1863 	if (!skb)
1864 		return -ENOMEM;
1865 
1866 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1867 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1868 						 sizeof(*cmd));
1869 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1870 
1871 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1872 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1873 
1874 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1875 	if (ret) {
1876 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1877 		dev_kfree_skb(skb);
1878 	}
1879 
1880 	return ret;
1881 }
1882 
1883 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1884 					    u32 vdev_id, u32 bcn_ctrl_op)
1885 {
1886 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1887 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1888 	struct sk_buff *skb;
1889 	int ret;
1890 
1891 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1892 	if (!skb)
1893 		return -ENOMEM;
1894 
1895 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1896 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1897 						 sizeof(*cmd));
1898 
1899 	cmd->vdev_id = cpu_to_le32(vdev_id);
1900 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1901 
1902 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1903 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1904 		   vdev_id, bcn_ctrl_op);
1905 
1906 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1907 	if (ret) {
1908 		ath12k_warn(ar->ab,
1909 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1910 		dev_kfree_skb(skb);
1911 	}
1912 
1913 	return ret;
1914 }
1915 
1916 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1917 			     const u8 *p2p_ie)
1918 {
1919 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1920 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1921 	size_t p2p_ie_len, aligned_len;
1922 	struct wmi_tlv *tlv;
1923 	struct sk_buff *skb;
1924 	void *ptr;
1925 	int ret, len;
1926 
1927 	p2p_ie_len = p2p_ie[1] + 2;
1928 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1929 
1930 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1931 
1932 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1933 	if (!skb)
1934 		return -ENOMEM;
1935 
1936 	ptr = skb->data;
1937 	cmd = ptr;
1938 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1939 						 sizeof(*cmd));
1940 	cmd->vdev_id = cpu_to_le32(vdev_id);
1941 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1942 
1943 	ptr += sizeof(*cmd);
1944 	tlv = ptr;
1945 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1946 					     aligned_len);
1947 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1948 
1949 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1950 	if (ret) {
1951 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1952 		dev_kfree_skb(skb);
1953 	}
1954 
1955 	return ret;
1956 }
1957 
1958 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
1959 			struct ieee80211_mutable_offsets *offs,
1960 			struct sk_buff *bcn,
1961 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1962 {
1963 	struct ath12k *ar = arvif->ar;
1964 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1965 	struct ath12k_base *ab = ar->ab;
1966 	struct wmi_bcn_tmpl_cmd *cmd;
1967 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1968 	struct ath12k_vif *ahvif = arvif->ahvif;
1969 	struct ieee80211_bss_conf *conf;
1970 	u32 vdev_id = arvif->vdev_id;
1971 	struct wmi_tlv *tlv;
1972 	struct sk_buff *skb;
1973 	u32 ema_params = 0;
1974 	void *ptr;
1975 	int ret, len;
1976 	size_t aligned_len = roundup(bcn->len, 4);
1977 
1978 	conf = ath12k_mac_get_link_bss_conf(arvif);
1979 	if (!conf) {
1980 		ath12k_warn(ab,
1981 			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
1982 			    ahvif->vif->addr, arvif->link_id);
1983 		return -EINVAL;
1984 	}
1985 
1986 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1987 
1988 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1989 	if (!skb)
1990 		return -ENOMEM;
1991 
1992 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1993 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1994 						 sizeof(*cmd));
1995 	cmd->vdev_id = cpu_to_le32(vdev_id);
1996 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1997 
1998 	if (conf->csa_active) {
1999 		cmd->csa_switch_count_offset =
2000 				cpu_to_le32(offs->cntdwn_counter_offs[0]);
2001 		cmd->ext_csa_switch_count_offset =
2002 				cpu_to_le32(offs->cntdwn_counter_offs[1]);
2003 		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
2004 		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
2005 	}
2006 
2007 	cmd->buf_len = cpu_to_le32(bcn->len);
2008 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
2009 	if (ema_args) {
2010 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
2011 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
2012 		if (ema_args->bcn_index == 0)
2013 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
2014 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
2015 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
2016 		cmd->ema_params = cpu_to_le32(ema_params);
2017 	}
2018 	cmd->feature_enable_bitmap =
2019 		cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
2020 					    WMI_BEACON_PROTECTION_EN_BIT));
2021 
2022 	ptr = skb->data + sizeof(*cmd);
2023 
2024 	bcn_prb_info = ptr;
2025 	len = sizeof(*bcn_prb_info);
2026 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
2027 							  len);
2028 	bcn_prb_info->caps = 0;
2029 	bcn_prb_info->erp = 0;
2030 
2031 	ptr += sizeof(*bcn_prb_info);
2032 
2033 	tlv = ptr;
2034 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2035 	memcpy(tlv->value, bcn->data, bcn->len);
2036 
2037 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
2038 	if (ret) {
2039 		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
2040 		dev_kfree_skb(skb);
2041 	}
2042 
2043 	return ret;
2044 }
2045 
2046 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
2047 				struct wmi_vdev_install_key_arg *arg)
2048 {
2049 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2050 	struct wmi_vdev_install_key_cmd *cmd;
2051 	struct wmi_tlv *tlv;
2052 	struct sk_buff *skb;
2053 	int ret, len, key_len_aligned;
2054 
2055 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
2056 	 * length is specified in cmd->key_len.
2057 	 */
2058 	key_len_aligned = roundup(arg->key_len, 4);
2059 
2060 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
2061 
2062 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2063 	if (!skb)
2064 		return -ENOMEM;
2065 
2066 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
2067 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
2068 						 sizeof(*cmd));
2069 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2070 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2071 	cmd->key_idx = cpu_to_le32(arg->key_idx);
2072 	cmd->key_flags = cpu_to_le32(arg->key_flags);
2073 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
2074 	cmd->key_len = cpu_to_le32(arg->key_len);
2075 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
2076 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
2077 
2078 	if (arg->key_rsc_counter)
2079 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
2080 
2081 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2082 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
2083 	memcpy(tlv->value, arg->key_data, arg->key_len);
2084 
2085 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2086 		   "WMI vdev install key idx %d cipher %d len %d\n",
2087 		   arg->key_idx, arg->key_cipher, arg->key_len);
2088 
2089 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
2090 	if (ret) {
2091 		ath12k_warn(ar->ab,
2092 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2093 		dev_kfree_skb(skb);
2094 	}
2095 
2096 	return ret;
2097 }
2098 
2099 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2100 				       struct ath12k_wmi_peer_assoc_arg *arg,
2101 				       bool hw_crypto_disabled)
2102 {
2103 	cmd->peer_flags = 0;
2104 	cmd->peer_flags_ext = 0;
2105 
2106 	if (arg->is_wme_set) {
2107 		if (arg->qos_flag)
2108 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2109 		if (arg->apsd_flag)
2110 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2111 		if (arg->ht_flag)
2112 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2113 		if (arg->bw_40)
2114 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2115 		if (arg->bw_80)
2116 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2117 		if (arg->bw_160)
2118 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2119 		if (arg->bw_320)
2120 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2121 
2122 		/* Typically if STBC is enabled for VHT it should be enabled
2123 		 * for HT as well
2124 		 **/
2125 		if (arg->stbc_flag)
2126 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2127 
2128 		/* Typically if LDPC is enabled for VHT it should be enabled
2129 		 * for HT as well
2130 		 **/
2131 		if (arg->ldpc_flag)
2132 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2133 
2134 		if (arg->static_mimops_flag)
2135 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2136 		if (arg->dynamic_mimops_flag)
2137 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2138 		if (arg->spatial_mux_flag)
2139 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2140 		if (arg->vht_flag)
2141 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2142 		if (arg->he_flag)
2143 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2144 		if (arg->twt_requester)
2145 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2146 		if (arg->twt_responder)
2147 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2148 		if (arg->eht_flag)
2149 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2150 	}
2151 
2152 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2153 	 * (during re-association).
2154 	 * Authorization will be done for these modes on key installation.
2155 	 */
2156 	if (arg->auth_flag)
2157 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2158 	if (arg->need_ptk_4_way) {
2159 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2160 		if (!hw_crypto_disabled && arg->is_assoc)
2161 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2162 	}
2163 	if (arg->need_gtk_2_way)
2164 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2165 	/* safe mode bypass the 4-way handshake */
2166 	if (arg->safe_mode_enabled)
2167 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2168 						 WMI_PEER_NEED_GTK_2_WAY));
2169 
2170 	if (arg->is_pmf_enabled)
2171 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2172 
2173 	/* Disable AMSDU for station transmit, if user configures it */
2174 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2175 	 * it
2176 	 * if (arg->amsdu_disable) Add after FW support
2177 	 **/
2178 
2179 	/* Target asserts if node is marked HT and all MCS is set to 0.
2180 	 * Mark the node as non-HT if all the mcs rates are disabled through
2181 	 * iwpriv
2182 	 **/
2183 	if (arg->peer_ht_rates.num_rates == 0)
2184 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2185 }
2186 
2187 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2188 				   struct ath12k_wmi_peer_assoc_arg *arg)
2189 {
2190 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2191 	struct wmi_peer_assoc_complete_cmd *cmd;
2192 	struct ath12k_wmi_vht_rate_set_params *mcs;
2193 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2194 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2195 	struct wmi_peer_assoc_mlo_params *ml_params;
2196 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2197 	struct sk_buff *skb;
2198 	struct wmi_tlv *tlv;
2199 	void *ptr;
2200 	u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
2201 	u32 peer_ht_rates_align, eml_trans_timeout;
2202 	int i, ret, len;
2203 	u16 eml_cap;
2204 	__le32 v;
2205 
2206 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2207 					  sizeof(u32));
2208 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2209 				      sizeof(u32));
2210 
2211 	len = sizeof(*cmd) +
2212 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2213 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2214 	      sizeof(*mcs) + TLV_HDR_SIZE +
2215 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2216 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2217 
2218 	if (arg->ml.enabled)
2219 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2220 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2221 	else
2222 		len += (2 * TLV_HDR_SIZE);
2223 
2224 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2225 	if (!skb)
2226 		return -ENOMEM;
2227 
2228 	ptr = skb->data;
2229 
2230 	cmd = ptr;
2231 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2232 						 sizeof(*cmd));
2233 
2234 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2235 
2236 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2237 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2238 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2239 
2240 	ath12k_wmi_copy_peer_flags(cmd, arg,
2241 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2242 					    &ar->ab->dev_flags));
2243 
2244 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2245 
2246 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2247 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2248 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2249 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2250 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2251 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2252 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2253 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2254 
2255 	/* Update 11ax capabilities */
2256 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2257 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2258 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2259 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2260 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2261 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2262 		cmd->peer_he_cap_phy[i] =
2263 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2264 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2265 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2266 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2267 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2268 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2269 
2270 	/* Update 11be capabilities */
2271 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2272 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2273 		       0);
2274 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2275 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2276 		       0);
2277 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2278 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2279 
2280 	/* Update peer legacy rate information */
2281 	ptr += sizeof(*cmd);
2282 
2283 	tlv = ptr;
2284 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2285 
2286 	ptr += TLV_HDR_SIZE;
2287 
2288 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2289 	memcpy(ptr, arg->peer_legacy_rates.rates,
2290 	       arg->peer_legacy_rates.num_rates);
2291 
2292 	/* Update peer HT rate information */
2293 	ptr += peer_legacy_rates_align;
2294 
2295 	tlv = ptr;
2296 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2297 	ptr += TLV_HDR_SIZE;
2298 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2299 	memcpy(ptr, arg->peer_ht_rates.rates,
2300 	       arg->peer_ht_rates.num_rates);
2301 
2302 	/* VHT Rates */
2303 	ptr += peer_ht_rates_align;
2304 
2305 	mcs = ptr;
2306 
2307 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2308 						 sizeof(*mcs));
2309 
2310 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2311 
2312 	/* Update bandwidth-NSS mapping */
2313 	cmd->peer_bw_rxnss_override = 0;
2314 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2315 
2316 	if (arg->vht_capable) {
2317 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2318 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2319 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2320 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2321 	}
2322 
2323 	/* HE Rates */
2324 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2325 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2326 
2327 	ptr += sizeof(*mcs);
2328 
2329 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2330 
2331 	tlv = ptr;
2332 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2333 	ptr += TLV_HDR_SIZE;
2334 
2335 	/* Loop through the HE rate set */
2336 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2337 		he_mcs = ptr;
2338 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2339 							    sizeof(*he_mcs));
2340 
2341 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2342 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2343 		ptr += sizeof(*he_mcs);
2344 	}
2345 
2346 	tlv = ptr;
2347 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2348 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2349 	ptr += TLV_HDR_SIZE;
2350 	if (!len)
2351 		goto skip_ml_params;
2352 
2353 	ml_params = ptr;
2354 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2355 						       len);
2356 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2357 
2358 	if (arg->ml.assoc_link)
2359 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2360 
2361 	if (arg->ml.primary_umac)
2362 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2363 
2364 	if (arg->ml.logical_link_idx_valid)
2365 		ml_params->flags |=
2366 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2367 
2368 	if (arg->ml.peer_id_valid)
2369 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2370 
2371 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2372 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2373 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2374 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2375 
2376 	eml_cap = arg->ml.eml_cap;
2377 	if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
2378 		/* Padding delay */
2379 		eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
2380 		ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
2381 		/* Transition delay */
2382 		eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
2383 		ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
2384 		/* Transition timeout */
2385 		eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
2386 		ml_params->emlsr_trans_timeout_us =
2387 					cpu_to_le32(eml_trans_timeout);
2388 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
2389 			   arg->peer_mac, eml_pad_delay, eml_trans_delay,
2390 			   eml_trans_timeout);
2391 	}
2392 
2393 	ptr += sizeof(*ml_params);
2394 
2395 skip_ml_params:
2396 	/* Loop through the EHT rate set */
2397 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2398 	tlv = ptr;
2399 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2400 	ptr += TLV_HDR_SIZE;
2401 
2402 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2403 		eht_mcs = ptr;
2404 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
2405 							     sizeof(*eht_mcs));
2406 
2407 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2408 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2409 		ptr += sizeof(*eht_mcs);
2410 	}
2411 
2412 	/* Update MCS15 capability */
2413 	if (arg->eht_disable_mcs15)
2414 		cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
2415 
2416 	tlv = ptr;
2417 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2418 	/* fill ML Partner links */
2419 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2420 	ptr += TLV_HDR_SIZE;
2421 
2422 	if (len == 0)
2423 		goto send;
2424 
2425 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2426 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2427 
2428 		partner_info = ptr;
2429 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2430 								  sizeof(*partner_info));
2431 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2432 		partner_info->hw_link_id =
2433 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2434 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2435 
2436 		if (arg->ml.partner_info[i].assoc_link)
2437 			partner_info->flags |=
2438 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2439 
2440 		if (arg->ml.partner_info[i].primary_umac)
2441 			partner_info->flags |=
2442 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2443 
2444 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2445 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2446 			partner_info->flags |= v;
2447 		}
2448 
2449 		partner_info->logical_link_idx =
2450 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2451 		ptr += sizeof(*partner_info);
2452 	}
2453 
2454 send:
2455 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2456 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
2457 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2458 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2459 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2460 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2461 		   cmd->peer_mpdu_density,
2462 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2463 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2464 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2465 		   cmd->peer_he_cap_phy[2],
2466 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2467 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2468 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2469 		   cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
2470 
2471 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2472 	if (ret) {
2473 		ath12k_warn(ar->ab,
2474 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2475 		dev_kfree_skb(skb);
2476 	}
2477 
2478 	return ret;
2479 }
2480 
2481 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2482 				struct ath12k_wmi_scan_req_arg *arg)
2483 {
2484 	/* setup commonly used values */
2485 	arg->scan_req_id = 1;
2486 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2487 	arg->dwell_time_active = 50;
2488 	arg->dwell_time_active_2g = 0;
2489 	arg->dwell_time_passive = 150;
2490 	arg->dwell_time_active_6g = 70;
2491 	arg->dwell_time_passive_6g = 70;
2492 	arg->min_rest_time = 50;
2493 	arg->max_rest_time = 500;
2494 	arg->repeat_probe_time = 0;
2495 	arg->probe_spacing_time = 0;
2496 	arg->idle_time = 0;
2497 	arg->max_scan_time = 20000;
2498 	arg->probe_delay = 5;
2499 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2500 				  WMI_SCAN_EVENT_COMPLETED |
2501 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2502 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2503 				  WMI_SCAN_EVENT_DEQUEUED;
2504 	arg->scan_f_chan_stat_evnt = 1;
2505 	arg->num_bssid = 1;
2506 
2507 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2508 	 * ZEROs in probe request
2509 	 */
2510 	eth_broadcast_addr(arg->bssid_list[0].addr);
2511 }
2512 
2513 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2514 						   struct ath12k_wmi_scan_req_arg *arg)
2515 {
2516 	/* Scan events subscription */
2517 	if (arg->scan_ev_started)
2518 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2519 	if (arg->scan_ev_completed)
2520 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2521 	if (arg->scan_ev_bss_chan)
2522 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2523 	if (arg->scan_ev_foreign_chan)
2524 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2525 	if (arg->scan_ev_dequeued)
2526 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2527 	if (arg->scan_ev_preempted)
2528 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2529 	if (arg->scan_ev_start_failed)
2530 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2531 	if (arg->scan_ev_restarted)
2532 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2533 	if (arg->scan_ev_foreign_chn_exit)
2534 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2535 	if (arg->scan_ev_suspended)
2536 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2537 	if (arg->scan_ev_resumed)
2538 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2539 
2540 	/** Set scan control flags */
2541 	cmd->scan_ctrl_flags = 0;
2542 	if (arg->scan_f_passive)
2543 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2544 	if (arg->scan_f_strict_passive_pch)
2545 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2546 	if (arg->scan_f_promisc_mode)
2547 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2548 	if (arg->scan_f_capture_phy_err)
2549 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2550 	if (arg->scan_f_half_rate)
2551 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2552 	if (arg->scan_f_quarter_rate)
2553 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2554 	if (arg->scan_f_cck_rates)
2555 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2556 	if (arg->scan_f_ofdm_rates)
2557 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2558 	if (arg->scan_f_chan_stat_evnt)
2559 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2560 	if (arg->scan_f_filter_prb_req)
2561 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2562 	if (arg->scan_f_bcast_probe)
2563 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2564 	if (arg->scan_f_offchan_mgmt_tx)
2565 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2566 	if (arg->scan_f_offchan_data_tx)
2567 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2568 	if (arg->scan_f_force_active_dfs_chn)
2569 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2570 	if (arg->scan_f_add_tpc_ie_in_probe)
2571 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2572 	if (arg->scan_f_add_ds_ie_in_probe)
2573 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2574 	if (arg->scan_f_add_spoofed_mac_in_probe)
2575 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2576 	if (arg->scan_f_add_rand_seq_in_probe)
2577 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2578 	if (arg->scan_f_en_ie_whitelist_in_probe)
2579 		cmd->scan_ctrl_flags |=
2580 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2581 
2582 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2583 						 WMI_SCAN_DWELL_MODE_MASK);
2584 }
2585 
2586 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2587 				   struct ath12k_wmi_scan_req_arg *arg)
2588 {
2589 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2590 	struct wmi_start_scan_cmd *cmd;
2591 	struct ath12k_wmi_ssid_params *ssid = NULL;
2592 	struct ath12k_wmi_mac_addr_params *bssid;
2593 	struct sk_buff *skb;
2594 	struct wmi_tlv *tlv;
2595 	void *ptr;
2596 	int i, ret, len;
2597 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2598 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2599 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2600 
2601 	len = sizeof(*cmd);
2602 
2603 	len += TLV_HDR_SIZE;
2604 	if (arg->num_chan)
2605 		len += arg->num_chan * sizeof(u32);
2606 
2607 	len += TLV_HDR_SIZE;
2608 	if (arg->num_ssids)
2609 		len += arg->num_ssids * sizeof(*ssid);
2610 
2611 	len += TLV_HDR_SIZE;
2612 	if (arg->num_bssid)
2613 		len += sizeof(*bssid) * arg->num_bssid;
2614 
2615 	if (arg->num_hint_bssid)
2616 		len += TLV_HDR_SIZE +
2617 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2618 
2619 	if (arg->num_hint_s_ssid)
2620 		len += TLV_HDR_SIZE +
2621 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2622 
2623 	len += TLV_HDR_SIZE;
2624 	if (arg->extraie.len)
2625 		extraie_len_with_pad =
2626 			roundup(arg->extraie.len, sizeof(u32));
2627 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2628 		len += extraie_len_with_pad;
2629 	} else {
2630 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2631 			    arg->extraie.len);
2632 		extraie_len_with_pad = 0;
2633 	}
2634 
2635 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2636 	if (!skb)
2637 		return -ENOMEM;
2638 
2639 	ptr = skb->data;
2640 
2641 	cmd = ptr;
2642 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2643 						 sizeof(*cmd));
2644 
2645 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2646 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2647 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2648 	if (ar->state_11d == ATH12K_11D_PREPARING)
2649 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
2650 	else
2651 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2652 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2653 
2654 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2655 
2656 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2657 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2658 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2659 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2660 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2661 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2662 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2663 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2664 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2665 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2666 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2667 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2668 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2669 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2670 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2671 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2672 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2673 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2674 
2675 	ptr += sizeof(*cmd);
2676 
2677 	len = arg->num_chan * sizeof(u32);
2678 
2679 	tlv = ptr;
2680 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2681 	ptr += TLV_HDR_SIZE;
2682 	tmp_ptr = (u32 *)ptr;
2683 
2684 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2685 
2686 	ptr += len;
2687 
2688 	len = arg->num_ssids * sizeof(*ssid);
2689 	tlv = ptr;
2690 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2691 
2692 	ptr += TLV_HDR_SIZE;
2693 
2694 	if (arg->num_ssids) {
2695 		ssid = ptr;
2696 		for (i = 0; i < arg->num_ssids; ++i) {
2697 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2698 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2699 			       arg->ssid[i].ssid_len);
2700 			ssid++;
2701 		}
2702 	}
2703 
2704 	ptr += (arg->num_ssids * sizeof(*ssid));
2705 	len = arg->num_bssid * sizeof(*bssid);
2706 	tlv = ptr;
2707 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2708 
2709 	ptr += TLV_HDR_SIZE;
2710 	bssid = ptr;
2711 
2712 	if (arg->num_bssid) {
2713 		for (i = 0; i < arg->num_bssid; ++i) {
2714 			ether_addr_copy(bssid->addr,
2715 					arg->bssid_list[i].addr);
2716 			bssid++;
2717 		}
2718 	}
2719 
2720 	ptr += arg->num_bssid * sizeof(*bssid);
2721 
2722 	len = extraie_len_with_pad;
2723 	tlv = ptr;
2724 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2725 	ptr += TLV_HDR_SIZE;
2726 
2727 	if (extraie_len_with_pad)
2728 		memcpy(ptr, arg->extraie.ptr,
2729 		       arg->extraie.len);
2730 
2731 	ptr += extraie_len_with_pad;
2732 
2733 	if (arg->num_hint_s_ssid) {
2734 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2735 		tlv = ptr;
2736 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2737 		ptr += TLV_HDR_SIZE;
2738 		s_ssid = ptr;
2739 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2740 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2741 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2742 			s_ssid++;
2743 		}
2744 		ptr += len;
2745 	}
2746 
2747 	if (arg->num_hint_bssid) {
2748 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2749 		tlv = ptr;
2750 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2751 		ptr += TLV_HDR_SIZE;
2752 		hint_bssid = ptr;
2753 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2754 			hint_bssid->freq_flags =
2755 				arg->hint_bssid[i].freq_flags;
2756 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2757 					&hint_bssid->bssid.addr[0]);
2758 			hint_bssid++;
2759 		}
2760 	}
2761 
2762 	ret = ath12k_wmi_cmd_send(wmi, skb,
2763 				  WMI_START_SCAN_CMDID);
2764 	if (ret) {
2765 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2766 		dev_kfree_skb(skb);
2767 	}
2768 
2769 	return ret;
2770 }
2771 
2772 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2773 				  struct ath12k_wmi_scan_cancel_arg *arg)
2774 {
2775 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2776 	struct wmi_stop_scan_cmd *cmd;
2777 	struct sk_buff *skb;
2778 	int ret;
2779 
2780 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2781 	if (!skb)
2782 		return -ENOMEM;
2783 
2784 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2785 
2786 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2787 						 sizeof(*cmd));
2788 
2789 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2790 	cmd->requestor = cpu_to_le32(arg->requester);
2791 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2792 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2793 	/* stop the scan with the corresponding scan_id */
2794 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2795 		/* Cancelling all scans */
2796 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2797 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2798 		/* Cancelling VAP scans */
2799 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2800 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2801 		/* Cancelling specific scan */
2802 		cmd->req_type = WMI_SCAN_STOP_ONE;
2803 	} else {
2804 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2805 			    arg->req_type);
2806 		dev_kfree_skb(skb);
2807 		return -EINVAL;
2808 	}
2809 
2810 	ret = ath12k_wmi_cmd_send(wmi, skb,
2811 				  WMI_STOP_SCAN_CMDID);
2812 	if (ret) {
2813 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2814 		dev_kfree_skb(skb);
2815 	}
2816 
2817 	return ret;
2818 }
2819 
2820 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2821 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2822 {
2823 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2824 	struct wmi_scan_chan_list_cmd *cmd;
2825 	struct sk_buff *skb;
2826 	struct ath12k_wmi_channel_params *chan_info;
2827 	struct ath12k_wmi_channel_arg *channel_arg;
2828 	struct wmi_tlv *tlv;
2829 	void *ptr;
2830 	int i, ret, len;
2831 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2832 	__le32 *reg1, *reg2;
2833 
2834 	channel_arg = &arg->channel[0];
2835 	while (arg->nallchans) {
2836 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2837 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2838 			sizeof(*chan_info);
2839 
2840 		num_send_chans = min(arg->nallchans, max_chan_limit);
2841 
2842 		arg->nallchans -= num_send_chans;
2843 		len += sizeof(*chan_info) * num_send_chans;
2844 
2845 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2846 		if (!skb)
2847 			return -ENOMEM;
2848 
2849 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2850 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2851 							 sizeof(*cmd));
2852 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2853 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2854 		if (num_sends)
2855 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2856 
2857 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2858 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2859 			   num_send_chans, len, cmd->pdev_id, num_sends);
2860 
2861 		ptr = skb->data + sizeof(*cmd);
2862 
2863 		len = sizeof(*chan_info) * num_send_chans;
2864 		tlv = ptr;
2865 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2866 						     len);
2867 		ptr += TLV_HDR_SIZE;
2868 
2869 		for (i = 0; i < num_send_chans; ++i) {
2870 			chan_info = ptr;
2871 			memset(chan_info, 0, sizeof(*chan_info));
2872 			len = sizeof(*chan_info);
2873 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2874 								       len);
2875 
2876 			reg1 = &chan_info->reg_info_1;
2877 			reg2 = &chan_info->reg_info_2;
2878 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2879 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2880 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2881 
2882 			if (channel_arg->is_chan_passive)
2883 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2884 			if (channel_arg->allow_he)
2885 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2886 			else if (channel_arg->allow_vht)
2887 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2888 			else if (channel_arg->allow_ht)
2889 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2890 			if (channel_arg->half_rate)
2891 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2892 			if (channel_arg->quarter_rate)
2893 				chan_info->info |=
2894 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2895 
2896 			if (channel_arg->psc_channel)
2897 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2898 
2899 			if (channel_arg->dfs_set)
2900 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2901 
2902 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2903 							    WMI_CHAN_INFO_MODE);
2904 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2905 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2906 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2907 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2908 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2909 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2910 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2911 						  WMI_CHAN_REG_INFO1_REG_CLS);
2912 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2913 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2914 			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
2915 						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
2916 
2917 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2918 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2919 				   i, chan_info->mhz, chan_info->info);
2920 
2921 			ptr += sizeof(*chan_info);
2922 
2923 			channel_arg++;
2924 		}
2925 
2926 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2927 		if (ret) {
2928 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2929 			dev_kfree_skb(skb);
2930 			return ret;
2931 		}
2932 
2933 		num_sends++;
2934 	}
2935 
2936 	return 0;
2937 }
2938 
2939 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2940 				   struct wmi_wmm_params_all_arg *param)
2941 {
2942 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2943 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2944 	struct wmi_wmm_params *wmm_param;
2945 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2946 	struct sk_buff *skb;
2947 	int ret, ac;
2948 
2949 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2950 	if (!skb)
2951 		return -ENOMEM;
2952 
2953 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2954 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2955 						 sizeof(*cmd));
2956 
2957 	cmd->vdev_id = cpu_to_le32(vdev_id);
2958 	cmd->wmm_param_type = 0;
2959 
2960 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2961 		switch (ac) {
2962 		case WME_AC_BE:
2963 			wmi_wmm_arg = &param->ac_be;
2964 			break;
2965 		case WME_AC_BK:
2966 			wmi_wmm_arg = &param->ac_bk;
2967 			break;
2968 		case WME_AC_VI:
2969 			wmi_wmm_arg = &param->ac_vi;
2970 			break;
2971 		case WME_AC_VO:
2972 			wmi_wmm_arg = &param->ac_vo;
2973 			break;
2974 		}
2975 
2976 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2977 		wmm_param->tlv_header =
2978 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2979 					       sizeof(*wmm_param));
2980 
2981 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2982 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2983 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2984 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2985 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2986 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2987 
2988 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2989 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2990 			   ac, wmm_param->aifs, wmm_param->cwmin,
2991 			   wmm_param->cwmax, wmm_param->txoplimit,
2992 			   wmm_param->acm, wmm_param->no_ack);
2993 	}
2994 	ret = ath12k_wmi_cmd_send(wmi, skb,
2995 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2996 	if (ret) {
2997 		ath12k_warn(ar->ab,
2998 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2999 		dev_kfree_skb(skb);
3000 	}
3001 
3002 	return ret;
3003 }
3004 
3005 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
3006 						  u32 pdev_id)
3007 {
3008 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3009 	struct wmi_dfs_phyerr_offload_cmd *cmd;
3010 	struct sk_buff *skb;
3011 	int ret;
3012 
3013 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3014 	if (!skb)
3015 		return -ENOMEM;
3016 
3017 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
3018 	cmd->tlv_header =
3019 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
3020 				       sizeof(*cmd));
3021 
3022 	cmd->pdev_id = cpu_to_le32(pdev_id);
3023 
3024 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3025 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
3026 
3027 	ret = ath12k_wmi_cmd_send(wmi, skb,
3028 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
3029 	if (ret) {
3030 		ath12k_warn(ar->ab,
3031 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
3032 		dev_kfree_skb(skb);
3033 	}
3034 
3035 	return ret;
3036 }
3037 
3038 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
3039 			    const u8 *buf, size_t buf_len)
3040 {
3041 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3042 	struct wmi_pdev_set_bios_interface_cmd *cmd;
3043 	struct wmi_tlv *tlv;
3044 	struct sk_buff *skb;
3045 	u8 *ptr;
3046 	u32 len, len_aligned;
3047 	int ret;
3048 
3049 	len_aligned = roundup(buf_len, sizeof(u32));
3050 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
3051 
3052 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3053 	if (!skb)
3054 		return -ENOMEM;
3055 
3056 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
3057 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
3058 						 sizeof(*cmd));
3059 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3060 	cmd->param_type_id = cpu_to_le32(param_id);
3061 	cmd->length = cpu_to_le32(buf_len);
3062 
3063 	ptr = skb->data + sizeof(*cmd);
3064 	tlv = (struct wmi_tlv *)ptr;
3065 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
3066 	ptr += TLV_HDR_SIZE;
3067 	memcpy(ptr, buf, buf_len);
3068 
3069 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3070 				  skb,
3071 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
3072 	if (ret) {
3073 		ath12k_warn(ab,
3074 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
3075 			    param_id, ret);
3076 		dev_kfree_skb(skb);
3077 	}
3078 
3079 	return 0;
3080 }
3081 
3082 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
3083 {
3084 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3085 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
3086 	struct wmi_tlv *tlv;
3087 	struct sk_buff *skb;
3088 	int ret;
3089 	u8 *buf_ptr;
3090 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
3091 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
3092 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
3093 
3094 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
3095 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
3096 					      sizeof(u32));
3097 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
3098 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
3099 
3100 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3101 	if (!skb)
3102 		return -ENOMEM;
3103 
3104 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
3105 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
3106 						 sizeof(*cmd));
3107 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3108 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3109 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3110 
3111 	buf_ptr = skb->data + sizeof(*cmd);
3112 	tlv = (struct wmi_tlv *)buf_ptr;
3113 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3114 					 sar_table_len_aligned);
3115 	buf_ptr += TLV_HDR_SIZE;
3116 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3117 
3118 	buf_ptr += sar_table_len_aligned;
3119 	tlv = (struct wmi_tlv *)buf_ptr;
3120 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3121 					 sar_dbs_backoff_len_aligned);
3122 	buf_ptr += TLV_HDR_SIZE;
3123 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3124 
3125 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3126 				  skb,
3127 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3128 	if (ret) {
3129 		ath12k_warn(ab,
3130 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3131 			    ret);
3132 		dev_kfree_skb(skb);
3133 	}
3134 
3135 	return ret;
3136 }
3137 
3138 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3139 {
3140 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3141 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3142 	struct wmi_tlv *tlv;
3143 	struct sk_buff *skb;
3144 	int ret;
3145 	u8 *buf_ptr;
3146 	u32 len, sar_geo_len_aligned;
3147 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3148 
3149 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3150 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3151 
3152 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3153 	if (!skb)
3154 		return -ENOMEM;
3155 
3156 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3157 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3158 						 sizeof(*cmd));
3159 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3160 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3161 
3162 	buf_ptr = skb->data + sizeof(*cmd);
3163 	tlv = (struct wmi_tlv *)buf_ptr;
3164 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3165 	buf_ptr += TLV_HDR_SIZE;
3166 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3167 
3168 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3169 				  skb,
3170 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3171 	if (ret) {
3172 		ath12k_warn(ab,
3173 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3174 			    ret);
3175 		dev_kfree_skb(skb);
3176 	}
3177 
3178 	return ret;
3179 }
3180 
3181 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3182 			  u32 tid, u32 initiator, u32 reason)
3183 {
3184 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3185 	struct wmi_delba_send_cmd *cmd;
3186 	struct sk_buff *skb;
3187 	int ret;
3188 
3189 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3190 	if (!skb)
3191 		return -ENOMEM;
3192 
3193 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3194 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3195 						 sizeof(*cmd));
3196 	cmd->vdev_id = cpu_to_le32(vdev_id);
3197 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3198 	cmd->tid = cpu_to_le32(tid);
3199 	cmd->initiator = cpu_to_le32(initiator);
3200 	cmd->reasoncode = cpu_to_le32(reason);
3201 
3202 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3203 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3204 		   vdev_id, mac, tid, initiator, reason);
3205 
3206 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3207 
3208 	if (ret) {
3209 		ath12k_warn(ar->ab,
3210 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3211 		dev_kfree_skb(skb);
3212 	}
3213 
3214 	return ret;
3215 }
3216 
3217 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3218 			      u32 tid, u32 status)
3219 {
3220 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3221 	struct wmi_addba_setresponse_cmd *cmd;
3222 	struct sk_buff *skb;
3223 	int ret;
3224 
3225 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3226 	if (!skb)
3227 		return -ENOMEM;
3228 
3229 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3230 	cmd->tlv_header =
3231 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3232 				       sizeof(*cmd));
3233 	cmd->vdev_id = cpu_to_le32(vdev_id);
3234 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3235 	cmd->tid = cpu_to_le32(tid);
3236 	cmd->statuscode = cpu_to_le32(status);
3237 
3238 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3239 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3240 		   vdev_id, mac, tid, status);
3241 
3242 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3243 
3244 	if (ret) {
3245 		ath12k_warn(ar->ab,
3246 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3247 		dev_kfree_skb(skb);
3248 	}
3249 
3250 	return ret;
3251 }
3252 
3253 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3254 			  u32 tid, u32 buf_size)
3255 {
3256 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3257 	struct wmi_addba_send_cmd *cmd;
3258 	struct sk_buff *skb;
3259 	int ret;
3260 
3261 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3262 	if (!skb)
3263 		return -ENOMEM;
3264 
3265 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3266 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3267 						 sizeof(*cmd));
3268 	cmd->vdev_id = cpu_to_le32(vdev_id);
3269 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3270 	cmd->tid = cpu_to_le32(tid);
3271 	cmd->buffersize = cpu_to_le32(buf_size);
3272 
3273 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3274 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3275 		   vdev_id, mac, tid, buf_size);
3276 
3277 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3278 
3279 	if (ret) {
3280 		ath12k_warn(ar->ab,
3281 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3282 		dev_kfree_skb(skb);
3283 	}
3284 
3285 	return ret;
3286 }
3287 
3288 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3289 {
3290 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3291 	struct wmi_addba_clear_resp_cmd *cmd;
3292 	struct sk_buff *skb;
3293 	int ret;
3294 
3295 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3296 	if (!skb)
3297 		return -ENOMEM;
3298 
3299 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3300 	cmd->tlv_header =
3301 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3302 				       sizeof(*cmd));
3303 	cmd->vdev_id = cpu_to_le32(vdev_id);
3304 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3305 
3306 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3307 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3308 		   vdev_id, mac);
3309 
3310 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3311 
3312 	if (ret) {
3313 		ath12k_warn(ar->ab,
3314 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3315 		dev_kfree_skb(skb);
3316 	}
3317 
3318 	return ret;
3319 }
3320 
3321 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3322 				     struct ath12k_wmi_init_country_arg *arg)
3323 {
3324 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3325 	struct wmi_init_country_cmd *cmd;
3326 	struct sk_buff *skb;
3327 	int ret;
3328 
3329 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3330 	if (!skb)
3331 		return -ENOMEM;
3332 
3333 	cmd = (struct wmi_init_country_cmd *)skb->data;
3334 	cmd->tlv_header =
3335 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3336 				       sizeof(*cmd));
3337 
3338 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3339 
3340 	switch (arg->flags) {
3341 	case ALPHA_IS_SET:
3342 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3343 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3344 		break;
3345 	case CC_IS_SET:
3346 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3347 		cmd->cc_info.country_code =
3348 			cpu_to_le32(arg->cc_info.country_code);
3349 		break;
3350 	case REGDMN_IS_SET:
3351 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3352 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3353 		break;
3354 	default:
3355 		ret = -EINVAL;
3356 		goto out;
3357 	}
3358 
3359 	ret = ath12k_wmi_cmd_send(wmi, skb,
3360 				  WMI_SET_INIT_COUNTRY_CMDID);
3361 
3362 out:
3363 	if (ret) {
3364 		ath12k_warn(ar->ab,
3365 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3366 			    ret);
3367 		dev_kfree_skb(skb);
3368 	}
3369 
3370 	return ret;
3371 }
3372 
3373 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
3374 					    struct wmi_set_current_country_arg *arg)
3375 {
3376 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3377 	struct wmi_set_current_country_cmd *cmd;
3378 	struct sk_buff *skb;
3379 	int ret;
3380 
3381 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3382 	if (!skb)
3383 		return -ENOMEM;
3384 
3385 	cmd = (struct wmi_set_current_country_cmd *)skb->data;
3386 	cmd->tlv_header =
3387 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
3388 				       sizeof(*cmd));
3389 
3390 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3391 	memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
3392 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
3393 
3394 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3395 		   "set current country pdev id %d alpha2 %c%c\n",
3396 		   ar->pdev->pdev_id,
3397 		   arg->alpha2[0],
3398 		   arg->alpha2[1]);
3399 
3400 	if (ret) {
3401 		ath12k_warn(ar->ab,
3402 			    "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
3403 		dev_kfree_skb(skb);
3404 	}
3405 
3406 	return ret;
3407 }
3408 
3409 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
3410 				       struct wmi_11d_scan_start_arg *arg)
3411 {
3412 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3413 	struct wmi_11d_scan_start_cmd *cmd;
3414 	struct sk_buff *skb;
3415 	int ret;
3416 
3417 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3418 	if (!skb)
3419 		return -ENOMEM;
3420 
3421 	cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
3422 	cmd->tlv_header =
3423 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
3424 				       sizeof(*cmd));
3425 
3426 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3427 	cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
3428 	cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
3429 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
3430 
3431 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3432 		   "send 11d scan start vdev id %d period %d ms internal %d ms\n",
3433 		   arg->vdev_id, arg->scan_period_msec,
3434 		   arg->start_interval_msec);
3435 
3436 	if (ret) {
3437 		ath12k_warn(ar->ab,
3438 			    "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
3439 		dev_kfree_skb(skb);
3440 	}
3441 
3442 	return ret;
3443 }
3444 
3445 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
3446 {
3447 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3448 	struct wmi_11d_scan_stop_cmd *cmd;
3449 	struct sk_buff *skb;
3450 	int ret;
3451 
3452 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3453 	if (!skb)
3454 		return -ENOMEM;
3455 
3456 	cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
3457 	cmd->tlv_header =
3458 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
3459 				       sizeof(*cmd));
3460 
3461 	cmd->vdev_id = cpu_to_le32(vdev_id);
3462 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
3463 
3464 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3465 		   "send 11d scan stop vdev id %d\n",
3466 		   cmd->vdev_id);
3467 
3468 	if (ret) {
3469 		ath12k_warn(ar->ab,
3470 			    "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
3471 		dev_kfree_skb(skb);
3472 	}
3473 
3474 	return ret;
3475 }
3476 
3477 int
3478 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3479 {
3480 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3481 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3482 	struct wmi_twt_enable_params_cmd *cmd;
3483 	struct sk_buff *skb;
3484 	int ret, len;
3485 
3486 	len = sizeof(*cmd);
3487 
3488 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3489 	if (!skb)
3490 		return -ENOMEM;
3491 
3492 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3493 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3494 						 len);
3495 	cmd->pdev_id = cpu_to_le32(pdev_id);
3496 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3497 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3498 	cmd->congestion_thresh_setup =
3499 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3500 	cmd->congestion_thresh_teardown =
3501 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3502 	cmd->congestion_thresh_critical =
3503 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3504 	cmd->interference_thresh_teardown =
3505 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3506 	cmd->interference_thresh_setup =
3507 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3508 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3509 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3510 	cmd->no_of_bcast_mcast_slots =
3511 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3512 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3513 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3514 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3515 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3516 	cmd->remove_sta_slot_interval =
3517 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3518 	/* TODO add MBSSID support */
3519 	cmd->mbss_support = 0;
3520 
3521 	ret = ath12k_wmi_cmd_send(wmi, skb,
3522 				  WMI_TWT_ENABLE_CMDID);
3523 	if (ret) {
3524 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3525 		dev_kfree_skb(skb);
3526 	}
3527 	return ret;
3528 }
3529 
3530 int
3531 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3532 {
3533 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3534 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3535 	struct wmi_twt_disable_params_cmd *cmd;
3536 	struct sk_buff *skb;
3537 	int ret, len;
3538 
3539 	len = sizeof(*cmd);
3540 
3541 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3542 	if (!skb)
3543 		return -ENOMEM;
3544 
3545 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3546 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3547 						 len);
3548 	cmd->pdev_id = cpu_to_le32(pdev_id);
3549 
3550 	ret = ath12k_wmi_cmd_send(wmi, skb,
3551 				  WMI_TWT_DISABLE_CMDID);
3552 	if (ret) {
3553 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3554 		dev_kfree_skb(skb);
3555 	}
3556 	return ret;
3557 }
3558 
3559 int
3560 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3561 			     struct ieee80211_he_obss_pd *he_obss_pd)
3562 {
3563 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3564 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3565 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3566 	struct sk_buff *skb;
3567 	int ret, len;
3568 
3569 	len = sizeof(*cmd);
3570 
3571 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3572 	if (!skb)
3573 		return -ENOMEM;
3574 
3575 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3576 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3577 						 len);
3578 	cmd->vdev_id = cpu_to_le32(vdev_id);
3579 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3580 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3581 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3582 
3583 	ret = ath12k_wmi_cmd_send(wmi, skb,
3584 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3585 	if (ret) {
3586 		ath12k_warn(ab,
3587 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3588 		dev_kfree_skb(skb);
3589 	}
3590 	return ret;
3591 }
3592 
3593 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3594 				  u8 bss_color, u32 period,
3595 				  bool enable)
3596 {
3597 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3598 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3599 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3600 	struct sk_buff *skb;
3601 	int ret, len;
3602 
3603 	len = sizeof(*cmd);
3604 
3605 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3606 	if (!skb)
3607 		return -ENOMEM;
3608 
3609 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3610 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3611 						 len);
3612 	cmd->vdev_id = cpu_to_le32(vdev_id);
3613 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3614 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3615 	cmd->current_bss_color = cpu_to_le32(bss_color);
3616 	cmd->detection_period_ms = cpu_to_le32(period);
3617 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3618 	cmd->free_slot_expiry_time_ms = 0;
3619 	cmd->flags = 0;
3620 
3621 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3622 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3623 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3624 		   cmd->detection_period_ms, cmd->scan_period_ms);
3625 
3626 	ret = ath12k_wmi_cmd_send(wmi, skb,
3627 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3628 	if (ret) {
3629 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3630 		dev_kfree_skb(skb);
3631 	}
3632 	return ret;
3633 }
3634 
3635 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3636 						bool enable)
3637 {
3638 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3639 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3640 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3641 	struct sk_buff *skb;
3642 	int ret, len;
3643 
3644 	len = sizeof(*cmd);
3645 
3646 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3647 	if (!skb)
3648 		return -ENOMEM;
3649 
3650 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3651 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3652 						 len);
3653 	cmd->vdev_id = cpu_to_le32(vdev_id);
3654 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3655 
3656 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3657 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3658 		   cmd->vdev_id, cmd->enable);
3659 
3660 	ret = ath12k_wmi_cmd_send(wmi, skb,
3661 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3662 	if (ret) {
3663 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3664 		dev_kfree_skb(skb);
3665 	}
3666 	return ret;
3667 }
3668 
3669 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3670 				   struct sk_buff *tmpl)
3671 {
3672 	struct wmi_tlv *tlv;
3673 	struct sk_buff *skb;
3674 	void *ptr;
3675 	int ret, len;
3676 	size_t aligned_len;
3677 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3678 
3679 	aligned_len = roundup(tmpl->len, 4);
3680 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3681 
3682 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3683 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3684 
3685 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3686 	if (!skb)
3687 		return -ENOMEM;
3688 
3689 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3690 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3691 						 sizeof(*cmd));
3692 	cmd->vdev_id = cpu_to_le32(vdev_id);
3693 	cmd->buf_len = cpu_to_le32(tmpl->len);
3694 	ptr = skb->data + sizeof(*cmd);
3695 
3696 	tlv = ptr;
3697 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3698 	memcpy(tlv->value, tmpl->data, tmpl->len);
3699 
3700 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3701 	if (ret) {
3702 		ath12k_warn(ar->ab,
3703 			    "WMI vdev %i failed to send FILS discovery template command\n",
3704 			    vdev_id);
3705 		dev_kfree_skb(skb);
3706 	}
3707 	return ret;
3708 }
3709 
3710 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3711 			       struct sk_buff *tmpl)
3712 {
3713 	struct wmi_probe_tmpl_cmd *cmd;
3714 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3715 	struct wmi_tlv *tlv;
3716 	struct sk_buff *skb;
3717 	void *ptr;
3718 	int ret, len;
3719 	size_t aligned_len = roundup(tmpl->len, 4);
3720 
3721 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3722 		   "WMI vdev %i set probe response template\n", vdev_id);
3723 
3724 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3725 
3726 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3727 	if (!skb)
3728 		return -ENOMEM;
3729 
3730 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3731 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3732 						 sizeof(*cmd));
3733 	cmd->vdev_id = cpu_to_le32(vdev_id);
3734 	cmd->buf_len = cpu_to_le32(tmpl->len);
3735 
3736 	ptr = skb->data + sizeof(*cmd);
3737 
3738 	probe_info = ptr;
3739 	len = sizeof(*probe_info);
3740 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3741 							len);
3742 	probe_info->caps = 0;
3743 	probe_info->erp = 0;
3744 
3745 	ptr += sizeof(*probe_info);
3746 
3747 	tlv = ptr;
3748 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3749 	memcpy(tlv->value, tmpl->data, tmpl->len);
3750 
3751 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3752 	if (ret) {
3753 		ath12k_warn(ar->ab,
3754 			    "WMI vdev %i failed to send probe response template command\n",
3755 			    vdev_id);
3756 		dev_kfree_skb(skb);
3757 	}
3758 	return ret;
3759 }
3760 
3761 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3762 			      bool unsol_bcast_probe_resp_enabled)
3763 {
3764 	struct sk_buff *skb;
3765 	int ret, len;
3766 	struct wmi_fils_discovery_cmd *cmd;
3767 
3768 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3769 		   "WMI vdev %i set %s interval to %u TU\n",
3770 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3771 		   "unsolicited broadcast probe response" : "FILS discovery",
3772 		   interval);
3773 
3774 	len = sizeof(*cmd);
3775 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3776 	if (!skb)
3777 		return -ENOMEM;
3778 
3779 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3780 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3781 						 len);
3782 	cmd->vdev_id = cpu_to_le32(vdev_id);
3783 	cmd->interval = cpu_to_le32(interval);
3784 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3785 
3786 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3787 	if (ret) {
3788 		ath12k_warn(ar->ab,
3789 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3790 			    vdev_id);
3791 		dev_kfree_skb(skb);
3792 	}
3793 	return ret;
3794 }
3795 
3796 static void
3797 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3798 			      struct ath12k_wmi_pdev_band_arg *arg)
3799 {
3800 	u8 i;
3801 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3802 	struct ath12k_pdev *pdev;
3803 
3804 	for (i = 0; i < soc->num_radios; i++) {
3805 		pdev = &soc->pdevs[i];
3806 		hal_reg_cap = &soc->hal_reg_cap[i];
3807 		arg[i].pdev_id = pdev->pdev_id;
3808 
3809 		switch (pdev->cap.supported_bands) {
3810 		case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
3811 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3812 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3813 			break;
3814 		case WMI_HOST_WLAN_2GHZ_CAP:
3815 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3816 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3817 			break;
3818 		case WMI_HOST_WLAN_5GHZ_CAP:
3819 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3820 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3821 			break;
3822 		default:
3823 			break;
3824 		}
3825 	}
3826 }
3827 
3828 static void
3829 ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
3830 				struct ath12k_wmi_resource_config_params *wmi_cfg,
3831 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3832 {
3833 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3834 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3835 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3836 	wmi_cfg->num_offload_reorder_buffs =
3837 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3838 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3839 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3840 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3841 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3842 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3843 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3844 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3845 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3846 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3847 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3848 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3849 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3850 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3851 	wmi_cfg->roam_offload_max_ap_profiles =
3852 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3853 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3854 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3855 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3856 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3857 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3858 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3859 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3860 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3861 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3862 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3863 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3864 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3865 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3866 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3867 	wmi_cfg->num_tdls_conn_table_entries =
3868 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3869 	wmi_cfg->beacon_tx_offload_max_vdev =
3870 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3871 	wmi_cfg->num_multicast_filter_entries =
3872 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3873 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3874 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3875 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3876 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3877 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3878 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3879 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3880 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3881 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3882 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3883 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3884 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3885 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3886 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3887 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3888 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 |
3889 				     WMI_RSRC_CFG_FLAG1_ACK_RSSI);
3890 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3891 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3892 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3893 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3894 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3895 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3896 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3897 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3898 	if (ab->hw_params->reoq_lut_support)
3899 		wmi_cfg->host_service_flags |=
3900 			cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
3901 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3902 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3903 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3904 }
3905 
3906 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3907 				struct ath12k_wmi_init_cmd_arg *arg)
3908 {
3909 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3910 	struct sk_buff *skb;
3911 	struct wmi_init_cmd *cmd;
3912 	struct ath12k_wmi_resource_config_params *cfg;
3913 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3914 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3915 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3916 	struct wmi_tlv *tlv;
3917 	size_t ret, len;
3918 	void *ptr;
3919 	u32 hw_mode_len = 0;
3920 	u16 idx;
3921 
3922 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3923 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3924 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3925 
3926 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3927 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3928 
3929 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3930 	if (!skb)
3931 		return -ENOMEM;
3932 
3933 	cmd = (struct wmi_init_cmd *)skb->data;
3934 
3935 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3936 						 sizeof(*cmd));
3937 
3938 	ptr = skb->data + sizeof(*cmd);
3939 	cfg = ptr;
3940 
3941 	ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
3942 
3943 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3944 						 sizeof(*cfg));
3945 
3946 	ptr += sizeof(*cfg);
3947 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3948 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3949 
3950 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3951 		host_mem_chunks[idx].tlv_header =
3952 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3953 					   len);
3954 
3955 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3956 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3957 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3958 
3959 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3960 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3961 			   arg->mem_chunks[idx].req_id,
3962 			   (u64)arg->mem_chunks[idx].paddr,
3963 			   arg->mem_chunks[idx].len);
3964 	}
3965 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3966 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3967 
3968 	/* num_mem_chunks is zero */
3969 	tlv = ptr;
3970 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3971 	ptr += TLV_HDR_SIZE + len;
3972 
3973 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3974 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3975 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3976 							     sizeof(*hw_mode));
3977 
3978 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3979 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3980 
3981 		ptr += sizeof(*hw_mode);
3982 
3983 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3984 		tlv = ptr;
3985 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3986 
3987 		ptr += TLV_HDR_SIZE;
3988 		len = sizeof(*band_to_mac);
3989 
3990 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3991 			band_to_mac = (void *)ptr;
3992 
3993 			band_to_mac->tlv_header =
3994 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3995 						       len);
3996 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3997 			band_to_mac->start_freq =
3998 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3999 			band_to_mac->end_freq =
4000 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
4001 			ptr += sizeof(*band_to_mac);
4002 		}
4003 	}
4004 
4005 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
4006 	if (ret) {
4007 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
4008 		dev_kfree_skb(skb);
4009 	}
4010 
4011 	return ret;
4012 }
4013 
4014 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
4015 			    int pdev_id)
4016 {
4017 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
4018 	struct sk_buff *skb;
4019 	int ret;
4020 
4021 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4022 	if (!skb)
4023 		return -ENOMEM;
4024 
4025 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
4026 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
4027 						 sizeof(*cmd));
4028 
4029 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
4030 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
4031 
4032 	cmd->pdev_id = cpu_to_le32(pdev_id);
4033 
4034 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4035 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
4036 
4037 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
4038 	if (ret) {
4039 		ath12k_warn(ar->ab,
4040 			    "failed to send lro cfg req wmi cmd\n");
4041 		goto err;
4042 	}
4043 
4044 	return 0;
4045 err:
4046 	dev_kfree_skb(skb);
4047 	return ret;
4048 }
4049 
4050 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
4051 {
4052 	unsigned long time_left;
4053 
4054 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
4055 						WMI_SERVICE_READY_TIMEOUT_HZ);
4056 	if (!time_left)
4057 		return -ETIMEDOUT;
4058 
4059 	return 0;
4060 }
4061 
4062 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
4063 {
4064 	unsigned long time_left;
4065 
4066 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
4067 						WMI_SERVICE_READY_TIMEOUT_HZ);
4068 	if (!time_left)
4069 		return -ETIMEDOUT;
4070 
4071 	return 0;
4072 }
4073 
4074 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
4075 			   enum wmi_host_hw_mode_config_type mode)
4076 {
4077 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
4078 	struct sk_buff *skb;
4079 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4080 	int len;
4081 	int ret;
4082 
4083 	len = sizeof(*cmd);
4084 
4085 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
4086 	if (!skb)
4087 		return -ENOMEM;
4088 
4089 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
4090 
4091 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
4092 						 sizeof(*cmd));
4093 
4094 	cmd->pdev_id = WMI_PDEV_ID_SOC;
4095 	cmd->hw_mode_index = cpu_to_le32(mode);
4096 
4097 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
4098 	if (ret) {
4099 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
4100 		dev_kfree_skb(skb);
4101 	}
4102 
4103 	return ret;
4104 }
4105 
4106 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
4107 {
4108 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4109 	struct ath12k_wmi_init_cmd_arg arg = {};
4110 
4111 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
4112 		     ab->wmi_ab.svc_map))
4113 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
4114 
4115 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
4116 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
4117 
4118 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
4119 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
4120 	arg.mem_chunks = wmi_ab->mem_chunks;
4121 
4122 	if (ab->hw_params->single_pdev_only)
4123 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
4124 
4125 	arg.num_band_to_mac = ab->num_radios;
4126 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
4127 
4128 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
4129 
4130 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
4131 }
4132 
4133 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
4134 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
4135 {
4136 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
4137 	struct sk_buff *skb;
4138 	int ret;
4139 
4140 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4141 	if (!skb)
4142 		return -ENOMEM;
4143 
4144 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
4145 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
4146 						 sizeof(*cmd));
4147 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
4148 	cmd->scan_count = cpu_to_le32(arg->scan_count);
4149 	cmd->scan_period = cpu_to_le32(arg->scan_period);
4150 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
4151 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
4152 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
4153 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
4154 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
4155 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
4156 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
4157 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
4158 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
4159 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
4160 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
4161 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
4162 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
4163 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
4164 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
4165 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
4166 
4167 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4168 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
4169 		   arg->vdev_id);
4170 
4171 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4172 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4173 	if (ret) {
4174 		ath12k_warn(ar->ab,
4175 			    "failed to send spectral scan config wmi cmd\n");
4176 		goto err;
4177 	}
4178 
4179 	return 0;
4180 err:
4181 	dev_kfree_skb(skb);
4182 	return ret;
4183 }
4184 
4185 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
4186 				    u32 trigger, u32 enable)
4187 {
4188 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
4189 	struct sk_buff *skb;
4190 	int ret;
4191 
4192 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4193 	if (!skb)
4194 		return -ENOMEM;
4195 
4196 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
4197 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
4198 						 sizeof(*cmd));
4199 
4200 	cmd->vdev_id = cpu_to_le32(vdev_id);
4201 	cmd->trigger_cmd = cpu_to_le32(trigger);
4202 	cmd->enable_cmd = cpu_to_le32(enable);
4203 
4204 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4205 		   "WMI spectral enable cmd vdev id 0x%x\n",
4206 		   vdev_id);
4207 
4208 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4209 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4210 	if (ret) {
4211 		ath12k_warn(ar->ab,
4212 			    "failed to send spectral enable wmi cmd\n");
4213 		goto err;
4214 	}
4215 
4216 	return 0;
4217 err:
4218 	dev_kfree_skb(skb);
4219 	return ret;
4220 }
4221 
4222 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
4223 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
4224 {
4225 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4226 	struct sk_buff *skb;
4227 	int ret;
4228 
4229 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4230 	if (!skb)
4231 		return -ENOMEM;
4232 
4233 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4234 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4235 						 sizeof(*cmd));
4236 
4237 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4238 	cmd->module_id = cpu_to_le32(arg->module_id);
4239 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4240 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4241 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4242 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4243 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4244 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4245 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4246 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4247 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4248 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4249 
4250 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4251 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4252 		   arg->pdev_id);
4253 
4254 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4255 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4256 	if (ret) {
4257 		ath12k_warn(ar->ab,
4258 			    "failed to send dma ring cfg req wmi cmd\n");
4259 		goto err;
4260 	}
4261 
4262 	return 0;
4263 err:
4264 	dev_kfree_skb(skb);
4265 	return ret;
4266 }
4267 
4268 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4269 					  u16 tag, u16 len,
4270 					  const void *ptr, void *data)
4271 {
4272 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4273 
4274 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4275 		return -EPROTO;
4276 
4277 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4278 		return -ENOBUFS;
4279 
4280 	arg->num_buf_entry++;
4281 	return 0;
4282 }
4283 
4284 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4285 					 u16 tag, u16 len,
4286 					 const void *ptr, void *data)
4287 {
4288 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4289 
4290 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4291 		return -EPROTO;
4292 
4293 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4294 		return -ENOBUFS;
4295 
4296 	arg->num_meta++;
4297 
4298 	return 0;
4299 }
4300 
4301 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4302 				    u16 tag, u16 len,
4303 				    const void *ptr, void *data)
4304 {
4305 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4306 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4307 	u32 pdev_id;
4308 	int ret;
4309 
4310 	switch (tag) {
4311 	case WMI_TAG_DMA_BUF_RELEASE:
4312 		fixed = ptr;
4313 		arg->fixed = *fixed;
4314 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4315 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4316 		break;
4317 	case WMI_TAG_ARRAY_STRUCT:
4318 		if (!arg->buf_entry_done) {
4319 			arg->num_buf_entry = 0;
4320 			arg->buf_entry = ptr;
4321 
4322 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4323 						  ath12k_wmi_dma_buf_entry_parse,
4324 						  arg);
4325 			if (ret) {
4326 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4327 					    ret);
4328 				return ret;
4329 			}
4330 
4331 			arg->buf_entry_done = true;
4332 		} else if (!arg->meta_data_done) {
4333 			arg->num_meta = 0;
4334 			arg->meta_data = ptr;
4335 
4336 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4337 						  ath12k_wmi_dma_buf_meta_parse,
4338 						  arg);
4339 			if (ret) {
4340 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4341 					    ret);
4342 				return ret;
4343 			}
4344 
4345 			arg->meta_data_done = true;
4346 		}
4347 		break;
4348 	default:
4349 		break;
4350 	}
4351 	return 0;
4352 }
4353 
4354 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4355 						       struct sk_buff *skb)
4356 {
4357 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4358 	struct ath12k_dbring_buf_release_event param;
4359 	int ret;
4360 
4361 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4362 				  ath12k_wmi_dma_buf_parse,
4363 				  &arg);
4364 	if (ret) {
4365 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4366 		return;
4367 	}
4368 
4369 	param.fixed = arg.fixed;
4370 	param.buf_entry = arg.buf_entry;
4371 	param.num_buf_entry = arg.num_buf_entry;
4372 	param.meta_data = arg.meta_data;
4373 	param.num_meta = arg.num_meta;
4374 
4375 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4376 	if (ret) {
4377 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4378 		return;
4379 	}
4380 }
4381 
4382 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4383 					 u16 tag, u16 len,
4384 					 const void *ptr, void *data)
4385 {
4386 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4387 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4388 	u32 phy_map = 0;
4389 
4390 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4391 		return -EPROTO;
4392 
4393 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4394 		return -ENOBUFS;
4395 
4396 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4397 				   hw_mode_id);
4398 	svc_rdy_ext->n_hw_mode_caps++;
4399 
4400 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4401 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4402 
4403 	return 0;
4404 }
4405 
4406 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4407 				   u16 len, const void *ptr, void *data)
4408 {
4409 	struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info;
4410 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4411 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4412 	enum wmi_host_hw_mode_config_type mode, pref;
4413 	u32 i;
4414 	int ret;
4415 
4416 	svc_rdy_ext->n_hw_mode_caps = 0;
4417 	svc_rdy_ext->hw_mode_caps = ptr;
4418 
4419 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4420 				  ath12k_wmi_hw_mode_caps_parse,
4421 				  svc_rdy_ext);
4422 	if (ret) {
4423 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4424 		return ret;
4425 	}
4426 
4427 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4428 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4429 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4430 
4431 		if (mode >= WMI_HOST_HW_MODE_MAX)
4432 			continue;
4433 
4434 		pref = soc->wmi_ab.preferred_hw_mode;
4435 
4436 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4437 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4438 			soc->wmi_ab.preferred_hw_mode = mode;
4439 		}
4440 	}
4441 
4442 	svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps;
4443 
4444 	ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n",
4445 		   svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode);
4446 
4447 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4448 		return -EINVAL;
4449 
4450 	return 0;
4451 }
4452 
4453 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4454 					 u16 tag, u16 len,
4455 					 const void *ptr, void *data)
4456 {
4457 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4458 
4459 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4460 		return -EPROTO;
4461 
4462 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4463 		return -ENOBUFS;
4464 
4465 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4466 	if (!svc_rdy_ext->n_mac_phy_caps) {
4467 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4468 						    GFP_ATOMIC);
4469 		if (!svc_rdy_ext->mac_phy_caps)
4470 			return -ENOMEM;
4471 	}
4472 
4473 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4474 	svc_rdy_ext->n_mac_phy_caps++;
4475 	return 0;
4476 }
4477 
4478 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4479 					     u16 tag, u16 len,
4480 					     const void *ptr, void *data)
4481 {
4482 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4483 
4484 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4485 		return -EPROTO;
4486 
4487 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4488 		return -ENOBUFS;
4489 
4490 	svc_rdy_ext->n_ext_hal_reg_caps++;
4491 	return 0;
4492 }
4493 
4494 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4495 				       u16 len, const void *ptr, void *data)
4496 {
4497 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4498 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4499 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4500 	int ret;
4501 	u32 i;
4502 
4503 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4504 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4505 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4506 				  ath12k_wmi_ext_hal_reg_caps_parse,
4507 				  svc_rdy_ext);
4508 	if (ret) {
4509 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4510 		return ret;
4511 	}
4512 
4513 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4514 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4515 						      svc_rdy_ext->soc_hal_reg_caps,
4516 						      svc_rdy_ext->ext_hal_reg_caps, i,
4517 						      &reg_cap);
4518 		if (ret) {
4519 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4520 			return ret;
4521 		}
4522 
4523 		if (reg_cap.phy_id >= MAX_RADIOS) {
4524 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4525 			return -EINVAL;
4526 		}
4527 
4528 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4529 	}
4530 	return 0;
4531 }
4532 
4533 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4534 						 u16 len, const void *ptr,
4535 						 void *data)
4536 {
4537 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4538 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4539 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4540 	u32 phy_id_map;
4541 	int pdev_index = 0;
4542 	int ret;
4543 
4544 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4545 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4546 
4547 	soc->num_radios = 0;
4548 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4549 	soc->fw_pdev_count = 0;
4550 
4551 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4552 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4553 							    svc_rdy_ext,
4554 							    hw_mode_id, soc->num_radios,
4555 							    &soc->pdevs[pdev_index]);
4556 		if (ret) {
4557 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4558 				    soc->num_radios);
4559 			return ret;
4560 		}
4561 
4562 		soc->num_radios++;
4563 
4564 		/* For single_pdev_only targets,
4565 		 * save mac_phy capability in the same pdev
4566 		 */
4567 		if (soc->hw_params->single_pdev_only)
4568 			pdev_index = 0;
4569 		else
4570 			pdev_index = soc->num_radios;
4571 
4572 		/* TODO: mac_phy_cap prints */
4573 		phy_id_map >>= 1;
4574 	}
4575 
4576 	if (soc->hw_params->single_pdev_only) {
4577 		soc->num_radios = 1;
4578 		soc->pdevs[0].pdev_id = 0;
4579 	}
4580 
4581 	return 0;
4582 }
4583 
4584 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4585 					  u16 tag, u16 len,
4586 					  const void *ptr, void *data)
4587 {
4588 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4589 
4590 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4591 		return -EPROTO;
4592 
4593 	parse->n_dma_ring_caps++;
4594 	return 0;
4595 }
4596 
4597 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4598 					u32 num_cap)
4599 {
4600 	size_t sz;
4601 	void *ptr;
4602 
4603 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4604 	ptr = kzalloc(sz, GFP_ATOMIC);
4605 	if (!ptr)
4606 		return -ENOMEM;
4607 
4608 	ab->db_caps = ptr;
4609 	ab->num_db_cap = num_cap;
4610 
4611 	return 0;
4612 }
4613 
4614 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4615 {
4616 	kfree(ab->db_caps);
4617 	ab->db_caps = NULL;
4618 	ab->num_db_cap = 0;
4619 }
4620 
4621 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4622 				    u16 len, const void *ptr, void *data)
4623 {
4624 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4625 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4626 	struct ath12k_dbring_cap *dir_buff_caps;
4627 	int ret;
4628 	u32 i;
4629 
4630 	dma_caps_parse->n_dma_ring_caps = 0;
4631 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4632 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4633 				  ath12k_wmi_dma_ring_caps_parse,
4634 				  dma_caps_parse);
4635 	if (ret) {
4636 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4637 		return ret;
4638 	}
4639 
4640 	if (!dma_caps_parse->n_dma_ring_caps)
4641 		return 0;
4642 
4643 	if (ab->num_db_cap) {
4644 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4645 		return 0;
4646 	}
4647 
4648 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4649 	if (ret)
4650 		return ret;
4651 
4652 	dir_buff_caps = ab->db_caps;
4653 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4654 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4655 			ath12k_warn(ab, "Invalid module id %d\n",
4656 				    le32_to_cpu(dma_caps[i].module_id));
4657 			ret = -EINVAL;
4658 			goto free_dir_buff;
4659 		}
4660 
4661 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4662 		dir_buff_caps[i].pdev_id =
4663 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4664 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4665 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4666 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4667 	}
4668 
4669 	return 0;
4670 
4671 free_dir_buff:
4672 	ath12k_wmi_free_dbring_caps(ab);
4673 	return ret;
4674 }
4675 
4676 static void
4677 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab,
4678 			     const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap,
4679 			     struct ath12k_svc_ext_mac_phy_info *mac_phy_info)
4680 {
4681 	mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id);
4682 	mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands);
4683 	mac_phy_info->hw_freq_range.low_2ghz_freq =
4684 					__le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq);
4685 	mac_phy_info->hw_freq_range.high_2ghz_freq =
4686 					__le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq);
4687 	mac_phy_info->hw_freq_range.low_5ghz_freq =
4688 					__le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq);
4689 	mac_phy_info->hw_freq_range.high_5ghz_freq =
4690 					__le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq);
4691 }
4692 
4693 static void
4694 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab,
4695 				 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
4696 {
4697 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
4698 	const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap;
4699 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4700 	struct ath12k_svc_ext_mac_phy_info *mac_phy_info;
4701 	u32 hw_mode_id, phy_bit_map;
4702 	u8 hw_idx;
4703 
4704 	mac_phy_info = &svc_ext_info->mac_phy_info[0];
4705 	mac_phy_cap = svc_rdy_ext->mac_phy_caps;
4706 
4707 	for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) {
4708 		hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx];
4709 		hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id);
4710 		phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map);
4711 
4712 		while (phy_bit_map) {
4713 			ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info);
4714 			mac_phy_info->hw_mode_config_type =
4715 					le32_get_bits(hw_mode_cap->hw_mode_config_type,
4716 						      WMI_HW_MODE_CAP_CFG_TYPE);
4717 			ath12k_dbg(ab, ATH12K_DBG_WMI,
4718 				   "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n",
4719 				   hw_idx, hw_mode_id,
4720 				   mac_phy_info->hw_mode_config_type,
4721 				   mac_phy_info->supported_bands, mac_phy_info->phy_id,
4722 				   mac_phy_info->hw_freq_range.low_2ghz_freq,
4723 				   mac_phy_info->hw_freq_range.high_2ghz_freq,
4724 				   mac_phy_info->hw_freq_range.low_5ghz_freq,
4725 				   mac_phy_info->hw_freq_range.high_5ghz_freq);
4726 
4727 			mac_phy_cap++;
4728 			mac_phy_info++;
4729 
4730 			phy_bit_map >>= 1;
4731 		}
4732 	}
4733 }
4734 
4735 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4736 					u16 tag, u16 len,
4737 					const void *ptr, void *data)
4738 {
4739 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4740 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4741 	int ret;
4742 
4743 	switch (tag) {
4744 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4745 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4746 						&svc_rdy_ext->arg);
4747 		if (ret) {
4748 			ath12k_warn(ab, "unable to extract ext params\n");
4749 			return ret;
4750 		}
4751 		break;
4752 
4753 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4754 		svc_rdy_ext->hw_caps = ptr;
4755 		svc_rdy_ext->arg.num_hw_modes =
4756 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4757 		break;
4758 
4759 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4760 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4761 							    svc_rdy_ext);
4762 		if (ret)
4763 			return ret;
4764 		break;
4765 
4766 	case WMI_TAG_ARRAY_STRUCT:
4767 		if (!svc_rdy_ext->hw_mode_done) {
4768 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4769 			if (ret)
4770 				return ret;
4771 
4772 			svc_rdy_ext->hw_mode_done = true;
4773 		} else if (!svc_rdy_ext->mac_phy_done) {
4774 			svc_rdy_ext->n_mac_phy_caps = 0;
4775 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4776 						  ath12k_wmi_mac_phy_caps_parse,
4777 						  svc_rdy_ext);
4778 			if (ret) {
4779 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4780 				return ret;
4781 			}
4782 
4783 			ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext);
4784 
4785 			svc_rdy_ext->mac_phy_done = true;
4786 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4787 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4788 			if (ret)
4789 				return ret;
4790 
4791 			svc_rdy_ext->ext_hal_reg_done = true;
4792 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4793 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4794 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4795 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4796 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4797 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4798 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4799 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4800 						       &svc_rdy_ext->dma_caps_parse);
4801 			if (ret)
4802 				return ret;
4803 
4804 			svc_rdy_ext->dma_ring_cap_done = true;
4805 		}
4806 		break;
4807 
4808 	default:
4809 		break;
4810 	}
4811 	return 0;
4812 }
4813 
4814 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4815 					  struct sk_buff *skb)
4816 {
4817 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4818 	int ret;
4819 
4820 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4821 				  ath12k_wmi_svc_rdy_ext_parse,
4822 				  &svc_rdy_ext);
4823 	if (ret) {
4824 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4825 		goto err;
4826 	}
4827 
4828 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4829 		complete(&ab->wmi_ab.service_ready);
4830 
4831 	kfree(svc_rdy_ext.mac_phy_caps);
4832 	return 0;
4833 
4834 err:
4835 	kfree(svc_rdy_ext.mac_phy_caps);
4836 	ath12k_wmi_free_dbring_caps(ab);
4837 	return ret;
4838 }
4839 
4840 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4841 				      const void *ptr,
4842 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4843 {
4844 	const struct wmi_service_ready_ext2_event *ev = ptr;
4845 
4846 	if (!ev)
4847 		return -EINVAL;
4848 
4849 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4850 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4851 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4852 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4853 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4854 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4855 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4856 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4857 	return 0;
4858 }
4859 
4860 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4861 				      const __le32 cap_mac_info[],
4862 				      const __le32 cap_phy_info[],
4863 				      const __le32 supp_mcs[],
4864 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4865 				       __le32 cap_info_internal)
4866 {
4867 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4868 	u32 support_320mhz;
4869 	u8 i;
4870 
4871 	if (band == NL80211_BAND_6GHZ)
4872 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4873 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4874 
4875 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4876 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4877 
4878 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4879 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4880 
4881 	if (band == NL80211_BAND_6GHZ)
4882 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4883 
4884 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4885 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4886 	if (band != NL80211_BAND_2GHZ) {
4887 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4888 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4889 	}
4890 
4891 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4892 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4893 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4894 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4895 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4896 
4897 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4898 }
4899 
4900 static int
4901 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4902 				      const struct ath12k_wmi_caps_ext_params *caps,
4903 				      struct ath12k_pdev *pdev)
4904 {
4905 	struct ath12k_band_cap *cap_band;
4906 	u32 bands, support_320mhz;
4907 	int i;
4908 
4909 	if (ab->hw_params->single_pdev_only) {
4910 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4911 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4912 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4913 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4914 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4915 			return 0;
4916 		}
4917 
4918 		for (i = 0; i < ab->fw_pdev_count; i++) {
4919 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4920 
4921 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4922 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4923 				bands = fw_pdev->supported_bands;
4924 				break;
4925 			}
4926 		}
4927 
4928 		if (i == ab->fw_pdev_count)
4929 			return -EINVAL;
4930 	} else {
4931 		bands = pdev->cap.supported_bands;
4932 	}
4933 
4934 	if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
4935 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4936 					  caps->eht_cap_mac_info_2ghz,
4937 					  caps->eht_cap_phy_info_2ghz,
4938 					  caps->eht_supp_mcs_ext_2ghz,
4939 					  &caps->eht_ppet_2ghz,
4940 					  caps->eht_cap_info_internal);
4941 	}
4942 
4943 	if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
4944 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4945 					  caps->eht_cap_mac_info_5ghz,
4946 					  caps->eht_cap_phy_info_5ghz,
4947 					  caps->eht_supp_mcs_ext_5ghz,
4948 					  &caps->eht_ppet_5ghz,
4949 					  caps->eht_cap_info_internal);
4950 
4951 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4952 					  caps->eht_cap_mac_info_5ghz,
4953 					  caps->eht_cap_phy_info_5ghz,
4954 					  caps->eht_supp_mcs_ext_5ghz,
4955 					  &caps->eht_ppet_5ghz,
4956 					  caps->eht_cap_info_internal);
4957 	}
4958 
4959 	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
4960 	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
4961 
4962 	return 0;
4963 }
4964 
4965 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4966 					   u16 len, const void *ptr,
4967 					   void *data)
4968 {
4969 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4970 	int i = 0, ret;
4971 
4972 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4973 		return -EPROTO;
4974 
4975 	if (ab->hw_params->single_pdev_only) {
4976 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4977 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4978 			return 0;
4979 	} else {
4980 		for (i = 0; i < ab->num_radios; i++) {
4981 			if (ab->pdevs[i].pdev_id ==
4982 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4983 				break;
4984 		}
4985 
4986 		if (i == ab->num_radios)
4987 			return -EINVAL;
4988 	}
4989 
4990 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4991 	if (ret) {
4992 		ath12k_warn(ab,
4993 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4994 			    ret, ab->pdevs[i].pdev_id);
4995 		return ret;
4996 	}
4997 
4998 	return 0;
4999 }
5000 
5001 static void
5002 ath12k_wmi_update_freq_info(struct ath12k_base *ab,
5003 			    struct ath12k_svc_ext_mac_phy_info *mac_cap,
5004 			    enum ath12k_hw_mode mode,
5005 			    u32 phy_id)
5006 {
5007 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5008 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5009 
5010 	mac_range = &hw_mode_info->freq_range_caps[mode][phy_id];
5011 
5012 	if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
5013 		mac_range->low_2ghz_freq = max_t(u32,
5014 						 mac_cap->hw_freq_range.low_2ghz_freq,
5015 						 ATH12K_MIN_2GHZ_FREQ);
5016 		mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ?
5017 					    min_t(u32,
5018 						  mac_cap->hw_freq_range.high_2ghz_freq,
5019 						  ATH12K_MAX_2GHZ_FREQ) :
5020 					    ATH12K_MAX_2GHZ_FREQ;
5021 	}
5022 
5023 	if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
5024 		mac_range->low_5ghz_freq = max_t(u32,
5025 						 mac_cap->hw_freq_range.low_5ghz_freq,
5026 						 ATH12K_MIN_5GHZ_FREQ);
5027 		mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ?
5028 					    min_t(u32,
5029 						  mac_cap->hw_freq_range.high_5ghz_freq,
5030 						  ATH12K_MAX_6GHZ_FREQ) :
5031 					    ATH12K_MAX_6GHZ_FREQ;
5032 	}
5033 }
5034 
5035 static bool
5036 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab,
5037 				 enum ath12k_hw_mode hwmode)
5038 {
5039 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5040 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5041 	u8 phy_id;
5042 
5043 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5044 		mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id];
5045 		/* modify SBS/DBS range only when both phy for DBS are filled */
5046 		if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq)
5047 			return false;
5048 	}
5049 
5050 	return true;
5051 }
5052 
5053 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab)
5054 {
5055 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5056 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5057 	u8 phy_id;
5058 
5059 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS];
5060 	/* Reset 5 GHz range for shared mac for DBS */
5061 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5062 		if (mac_range[phy_id].low_2ghz_freq &&
5063 		    mac_range[phy_id].low_5ghz_freq) {
5064 			mac_range[phy_id].low_5ghz_freq = 0;
5065 			mac_range[phy_id].high_5ghz_freq = 0;
5066 		}
5067 	}
5068 }
5069 
5070 static u32
5071 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5072 {
5073 	u32 highest_freq = 0;
5074 	u8 phy_id;
5075 
5076 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5077 		if (range[phy_id].high_5ghz_freq > highest_freq)
5078 			highest_freq = range[phy_id].high_5ghz_freq;
5079 	}
5080 
5081 	return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ;
5082 }
5083 
5084 static u32
5085 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5086 {
5087 	u32 lowest_freq = 0;
5088 	u8 phy_id;
5089 
5090 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5091 		if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
5092 		    range[phy_id].low_5ghz_freq < lowest_freq)
5093 			lowest_freq = range[phy_id].low_5ghz_freq;
5094 	}
5095 
5096 	return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ;
5097 }
5098 
5099 static void
5100 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab,
5101 				     u16 sbs_range_sep,
5102 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5103 {
5104 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5105 	struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range;
5106 	u8 phy_id;
5107 
5108 	upper_sbs_freq_range =
5109 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
5110 
5111 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5112 		upper_sbs_freq_range[phy_id].low_2ghz_freq =
5113 						ref_freq[phy_id].low_2ghz_freq;
5114 		upper_sbs_freq_range[phy_id].high_2ghz_freq =
5115 						ref_freq[phy_id].high_2ghz_freq;
5116 
5117 		/* update for shared mac */
5118 		if (upper_sbs_freq_range[phy_id].low_2ghz_freq) {
5119 			upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5120 			upper_sbs_freq_range[phy_id].high_5ghz_freq =
5121 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5122 		} else {
5123 			upper_sbs_freq_range[phy_id].low_5ghz_freq =
5124 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5125 			upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5126 		}
5127 	}
5128 }
5129 
5130 static void
5131 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab,
5132 				     u16 sbs_range_sep,
5133 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5134 {
5135 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5136 	struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range;
5137 	u8 phy_id;
5138 
5139 	lower_sbs_freq_range =
5140 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
5141 
5142 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5143 		lower_sbs_freq_range[phy_id].low_2ghz_freq =
5144 						ref_freq[phy_id].low_2ghz_freq;
5145 		lower_sbs_freq_range[phy_id].high_2ghz_freq =
5146 						ref_freq[phy_id].high_2ghz_freq;
5147 
5148 		/* update for shared mac */
5149 		if (lower_sbs_freq_range[phy_id].low_2ghz_freq) {
5150 			lower_sbs_freq_range[phy_id].low_5ghz_freq =
5151 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5152 			lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5153 		} else {
5154 			lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5155 			lower_sbs_freq_range[phy_id].high_5ghz_freq =
5156 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5157 		}
5158 	}
5159 }
5160 
5161 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)
5162 {
5163 	static const char * const mode_str[] = {
5164 		[ATH12K_HW_MODE_SMM] = "SMM",
5165 		[ATH12K_HW_MODE_DBS] = "DBS",
5166 		[ATH12K_HW_MODE_SBS] = "SBS",
5167 		[ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE",
5168 		[ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE",
5169 	};
5170 
5171 	if (hw_mode >= ARRAY_SIZE(mode_str))
5172 		return "Unknown";
5173 
5174 	return mode_str[hw_mode];
5175 }
5176 
5177 static void
5178 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab,
5179 				   struct ath12k_hw_mode_freq_range_arg *freq_range,
5180 				   enum ath12k_hw_mode hw_mode)
5181 {
5182 	u8 i;
5183 
5184 	for (i = 0; i < MAX_RADIOS; i++)
5185 		if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq)
5186 			ath12k_dbg(ab, ATH12K_DBG_WMI,
5187 				   "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5188 				   ath12k_wmi_hw_mode_to_str(hw_mode),
5189 				   hw_mode, i,
5190 				   freq_range[i].low_2ghz_freq,
5191 				   freq_range[i].high_2ghz_freq,
5192 				   freq_range[i].low_5ghz_freq,
5193 				   freq_range[i].high_5ghz_freq);
5194 }
5195 
5196 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab)
5197 {
5198 	struct ath12k_hw_mode_freq_range_arg *freq_range;
5199 	u8 i;
5200 
5201 	for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) {
5202 		freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i];
5203 		ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i);
5204 	}
5205 }
5206 
5207 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id)
5208 {
5209 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5210 	struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range;
5211 	struct ath12k_hw_mode_freq_range_arg *non_shared_range;
5212 	u8 shared_phy_id;
5213 
5214 	sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id];
5215 
5216 	/* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id
5217 	 * keep the range as it is in SBS
5218 	 */
5219 	if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq)
5220 		return 0;
5221 
5222 	if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) {
5223 		ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz");
5224 		ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS);
5225 		return -EINVAL;
5226 	}
5227 
5228 	non_shared_range = sbs_mac_range;
5229 	/* if SBS mac range has only 5 GHz then it's the non-shared phy, so
5230 	 * modify the range as per the shared mac.
5231 	 */
5232 	shared_phy_id = phy_id ? 0 : 1;
5233 	shared_mac_range =
5234 		&hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id];
5235 
5236 	if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) {
5237 		ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared");
5238 		/* If the shared mac lower 5 GHz frequency is greater than
5239 		 * non-shared mac lower 5 GHz frequency then the shared mac has
5240 		 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high
5241 		 * freq should be less than the shared mac's low 5 GHz freq.
5242 		 */
5243 		if (non_shared_range->high_5ghz_freq >=
5244 		    shared_mac_range->low_5ghz_freq)
5245 			non_shared_range->high_5ghz_freq =
5246 				max_t(u32, shared_mac_range->low_5ghz_freq - 10,
5247 				      non_shared_range->low_5ghz_freq);
5248 	} else if (shared_mac_range->high_5ghz_freq <
5249 		   non_shared_range->high_5ghz_freq) {
5250 		ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared");
5251 		/* If the shared mac high 5 GHz frequency is less than
5252 		 * non-shared mac high 5 GHz frequency then the shared mac has
5253 		 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low
5254 		 * freq should be greater than the shared mac's high 5 GHz freq.
5255 		 */
5256 		if (shared_mac_range->high_5ghz_freq >=
5257 		    non_shared_range->low_5ghz_freq)
5258 			non_shared_range->low_5ghz_freq =
5259 				min_t(u32, shared_mac_range->high_5ghz_freq + 10,
5260 				      non_shared_range->high_5ghz_freq);
5261 	} else {
5262 		ath12k_warn(ab, "invalid SBS range with all 5 GHz shared");
5263 		return -EINVAL;
5264 	}
5265 
5266 	return 0;
5267 }
5268 
5269 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab)
5270 {
5271 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5272 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5273 	u16 sbs_range_sep;
5274 	u8 phy_id;
5275 	int ret;
5276 
5277 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS];
5278 
5279 	/* If sbs_lower_band_end_freq has a value, then the frequency range
5280 	 * will be split using that value.
5281 	 */
5282 	sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq;
5283 	if (sbs_range_sep) {
5284 		ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep,
5285 						     mac_range);
5286 		ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep,
5287 						     mac_range);
5288 		/* Hardware specifies the range boundary with sbs_range_sep,
5289 		 * (i.e. the boundary between 5 GHz high and 5 GHz low),
5290 		 * reset the original one to make sure it will not get used.
5291 		 */
5292 		memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5293 		return;
5294 	}
5295 
5296 	/* If sbs_lower_band_end_freq is not set that means firmware will send one
5297 	 * shared mac range and one non-shared mac range. so update that freq.
5298 	 */
5299 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5300 		ret = ath12k_wmi_modify_sbs_freq(ab, phy_id);
5301 		if (ret) {
5302 			memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5303 			break;
5304 		}
5305 	}
5306 }
5307 
5308 static void
5309 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab,
5310 				enum wmi_host_hw_mode_config_type hw_config_type,
5311 				u32 phy_id,
5312 				struct ath12k_svc_ext_mac_phy_info *mac_cap)
5313 {
5314 	if (phy_id >= MAX_RADIOS) {
5315 		ath12k_err(ab, "mac more than two not supported: %d", phy_id);
5316 		return;
5317 	}
5318 
5319 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5320 		   "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5321 		   hw_config_type, phy_id, mac_cap->supported_bands,
5322 		   ab->wmi_ab.sbs_lower_band_end_freq,
5323 		   mac_cap->hw_freq_range.low_2ghz_freq,
5324 		   mac_cap->hw_freq_range.high_2ghz_freq,
5325 		   mac_cap->hw_freq_range.low_5ghz_freq,
5326 		   mac_cap->hw_freq_range.high_5ghz_freq);
5327 
5328 	switch (hw_config_type) {
5329 	case WMI_HOST_HW_MODE_SINGLE:
5330 		if (phy_id) {
5331 			ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported");
5332 			break;
5333 		}
5334 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id);
5335 		break;
5336 
5337 	case WMI_HOST_HW_MODE_DBS:
5338 		if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5339 			ath12k_wmi_update_freq_info(ab, mac_cap,
5340 						    ATH12K_HW_MODE_DBS, phy_id);
5341 		break;
5342 	case WMI_HOST_HW_MODE_DBS_SBS:
5343 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
5344 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id);
5345 		if (ab->wmi_ab.sbs_lower_band_end_freq ||
5346 		    mac_cap->hw_freq_range.low_5ghz_freq ||
5347 		    mac_cap->hw_freq_range.low_2ghz_freq)
5348 			ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS,
5349 						    phy_id);
5350 
5351 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5352 			ath12k_wmi_update_dbs_freq_info(ab);
5353 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5354 			ath12k_wmi_update_sbs_freq_info(ab);
5355 		break;
5356 	case WMI_HOST_HW_MODE_SBS:
5357 	case WMI_HOST_HW_MODE_SBS_PASSIVE:
5358 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id);
5359 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5360 			ath12k_wmi_update_sbs_freq_info(ab);
5361 
5362 		break;
5363 	default:
5364 		break;
5365 	}
5366 }
5367 
5368 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab)
5369 {
5370 	if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) ||
5371 	    (ab->wmi_ab.sbs_lower_band_end_freq &&
5372 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) &&
5373 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE)))
5374 		return true;
5375 
5376 	return false;
5377 }
5378 
5379 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab)
5380 {
5381 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
5382 	struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
5383 	enum wmi_host_hw_mode_config_type hw_config_type;
5384 	struct ath12k_svc_ext_mac_phy_info *tmp;
5385 	bool dbs_mode = false, sbs_mode = false;
5386 	u32 i, j = 0;
5387 
5388 	if (!svc_ext_info->num_hw_modes) {
5389 		ath12k_err(ab, "invalid number of hw modes");
5390 		return -EINVAL;
5391 	}
5392 
5393 	ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d",
5394 		   svc_ext_info->num_hw_modes);
5395 
5396 	memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps));
5397 
5398 	for (i = 0; i < svc_ext_info->num_hw_modes; i++) {
5399 		if (j >= ATH12K_MAX_MAC_PHY_CAP)
5400 			return -EINVAL;
5401 
5402 		/* Update for MAC0 */
5403 		tmp = &svc_ext_info->mac_phy_info[j++];
5404 		hw_config_type = tmp->hw_mode_config_type;
5405 		ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp);
5406 
5407 		/* SBS and DBS have dual MAC. Up to 2 MACs are considered. */
5408 		if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5409 		    hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5410 		    hw_config_type == WMI_HOST_HW_MODE_SBS ||
5411 		    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) {
5412 			if (j >= ATH12K_MAX_MAC_PHY_CAP)
5413 				return -EINVAL;
5414 			/* Update for MAC1 */
5415 			tmp = &svc_ext_info->mac_phy_info[j++];
5416 			ath12k_wmi_update_mac_freq_info(ab, hw_config_type,
5417 							tmp->phy_id, tmp);
5418 
5419 			if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5420 			    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)
5421 				dbs_mode = true;
5422 
5423 			if (ath12k_wmi_sbs_range_present(ab) &&
5424 			    (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5425 			     hw_config_type == WMI_HOST_HW_MODE_SBS ||
5426 			     hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS))
5427 				sbs_mode = true;
5428 		}
5429 	}
5430 
5431 	info->support_dbs = dbs_mode;
5432 	info->support_sbs = sbs_mode;
5433 
5434 	ath12k_wmi_dump_freq_range(ab);
5435 
5436 	return 0;
5437 }
5438 
5439 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
5440 					 u16 tag, u16 len,
5441 					 const void *ptr, void *data)
5442 {
5443 	const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps;
5444 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
5445 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
5446 	int ret;
5447 
5448 	switch (tag) {
5449 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
5450 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
5451 						 &parse->arg);
5452 		if (ret) {
5453 			ath12k_warn(ab,
5454 				    "failed to extract wmi service ready ext2 parameters: %d\n",
5455 				    ret);
5456 			return ret;
5457 		}
5458 		break;
5459 
5460 	case WMI_TAG_ARRAY_STRUCT:
5461 		if (!parse->dma_ring_cap_done) {
5462 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
5463 						       &parse->dma_caps_parse);
5464 			if (ret)
5465 				return ret;
5466 
5467 			parse->dma_ring_cap_done = true;
5468 		} else if (!parse->spectral_bin_scaling_done) {
5469 			/* TODO: This is a place-holder as WMI tag for
5470 			 * spectral scaling is before
5471 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
5472 			 */
5473 			parse->spectral_bin_scaling_done = true;
5474 		} else if (!parse->mac_phy_caps_ext_done) {
5475 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
5476 						  ath12k_wmi_tlv_mac_phy_caps_ext,
5477 						  parse);
5478 			if (ret) {
5479 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
5480 					    ret);
5481 				return ret;
5482 			}
5483 
5484 			parse->mac_phy_caps_ext_done = true;
5485 		} else if (!parse->hal_reg_caps_ext2_done) {
5486 			parse->hal_reg_caps_ext2_done = true;
5487 		} else if (!parse->scan_radio_caps_ext2_done) {
5488 			parse->scan_radio_caps_ext2_done = true;
5489 		} else if (!parse->twt_caps_done) {
5490 			parse->twt_caps_done = true;
5491 		} else if (!parse->htt_msdu_idx_to_qtype_map_done) {
5492 			parse->htt_msdu_idx_to_qtype_map_done = true;
5493 		} else if (!parse->dbs_or_sbs_cap_ext_done) {
5494 			dbs_or_sbs_caps = ptr;
5495 			ab->wmi_ab.sbs_lower_band_end_freq =
5496 				__le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq);
5497 
5498 			ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n",
5499 				   ab->wmi_ab.sbs_lower_band_end_freq);
5500 
5501 			ret = ath12k_wmi_update_hw_mode_list(ab);
5502 			if (ret) {
5503 				ath12k_warn(ab, "failed to update hw mode list: %d\n",
5504 					    ret);
5505 				return ret;
5506 			}
5507 
5508 			parse->dbs_or_sbs_cap_ext_done = true;
5509 		}
5510 
5511 		break;
5512 	default:
5513 		break;
5514 	}
5515 
5516 	return 0;
5517 }
5518 
5519 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
5520 					   struct sk_buff *skb)
5521 {
5522 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
5523 	int ret;
5524 
5525 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5526 				  ath12k_wmi_svc_rdy_ext2_parse,
5527 				  &svc_rdy_ext2);
5528 	if (ret) {
5529 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
5530 		goto err;
5531 	}
5532 
5533 	complete(&ab->wmi_ab.service_ready);
5534 
5535 	return 0;
5536 
5537 err:
5538 	ath12k_wmi_free_dbring_caps(ab);
5539 	return ret;
5540 }
5541 
5542 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5543 					   struct wmi_vdev_start_resp_event *vdev_rsp)
5544 {
5545 	const void **tb;
5546 	const struct wmi_vdev_start_resp_event *ev;
5547 	int ret;
5548 
5549 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5550 	if (IS_ERR(tb)) {
5551 		ret = PTR_ERR(tb);
5552 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5553 		return ret;
5554 	}
5555 
5556 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
5557 	if (!ev) {
5558 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
5559 		kfree(tb);
5560 		return -EPROTO;
5561 	}
5562 
5563 	*vdev_rsp = *ev;
5564 
5565 	kfree(tb);
5566 	return 0;
5567 }
5568 
5569 static struct ath12k_reg_rule
5570 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
5571 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
5572 {
5573 	struct ath12k_reg_rule *reg_rule_ptr;
5574 	u32 count;
5575 
5576 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
5577 			       GFP_ATOMIC);
5578 
5579 	if (!reg_rule_ptr)
5580 		return NULL;
5581 
5582 	for (count = 0; count < num_reg_rules; count++) {
5583 		reg_rule_ptr[count].start_freq =
5584 			le32_get_bits(wmi_reg_rule[count].freq_info,
5585 				      REG_RULE_START_FREQ);
5586 		reg_rule_ptr[count].end_freq =
5587 			le32_get_bits(wmi_reg_rule[count].freq_info,
5588 				      REG_RULE_END_FREQ);
5589 		reg_rule_ptr[count].max_bw =
5590 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5591 				      REG_RULE_MAX_BW);
5592 		reg_rule_ptr[count].reg_power =
5593 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5594 				      REG_RULE_REG_PWR);
5595 		reg_rule_ptr[count].ant_gain =
5596 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5597 				      REG_RULE_ANT_GAIN);
5598 		reg_rule_ptr[count].flags =
5599 			le32_get_bits(wmi_reg_rule[count].flag_info,
5600 				      REG_RULE_FLAGS);
5601 		reg_rule_ptr[count].psd_flag =
5602 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5603 				      REG_RULE_PSD_INFO);
5604 		reg_rule_ptr[count].psd_eirp =
5605 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5606 				      REG_RULE_PSD_EIRP);
5607 	}
5608 
5609 	return reg_rule_ptr;
5610 }
5611 
5612 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
5613 					    u32 num_reg_rules)
5614 {
5615 	u8 num_invalid_5ghz_rules = 0;
5616 	u32 count, start_freq;
5617 
5618 	for (count = 0; count < num_reg_rules; count++) {
5619 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
5620 
5621 		if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
5622 			num_invalid_5ghz_rules++;
5623 	}
5624 
5625 	return num_invalid_5ghz_rules;
5626 }
5627 
5628 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
5629 						   struct sk_buff *skb,
5630 						   struct ath12k_reg_info *reg_info)
5631 {
5632 	const void **tb;
5633 	const struct wmi_reg_chan_list_cc_ext_event *ev;
5634 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
5635 	u32 num_2g_reg_rules, num_5g_reg_rules;
5636 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
5637 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
5638 	u8 num_invalid_5ghz_ext_rules;
5639 	u32 total_reg_rules = 0;
5640 	int ret, i, j;
5641 
5642 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
5643 
5644 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5645 	if (IS_ERR(tb)) {
5646 		ret = PTR_ERR(tb);
5647 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5648 		return ret;
5649 	}
5650 
5651 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
5652 	if (!ev) {
5653 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
5654 		kfree(tb);
5655 		return -EPROTO;
5656 	}
5657 
5658 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
5659 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
5660 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
5661 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
5662 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
5663 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
5664 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
5665 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
5666 
5667 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5668 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5669 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
5670 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5671 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
5672 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5673 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
5674 	}
5675 
5676 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
5677 	total_reg_rules += num_2g_reg_rules;
5678 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
5679 	total_reg_rules += num_5g_reg_rules;
5680 
5681 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
5682 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
5683 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
5684 		kfree(tb);
5685 		return -EINVAL;
5686 	}
5687 
5688 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5689 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
5690 
5691 		if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
5692 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
5693 				    i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
5694 			kfree(tb);
5695 			return -EINVAL;
5696 		}
5697 
5698 		total_reg_rules += num_6g_reg_rules_ap[i];
5699 	}
5700 
5701 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5702 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5703 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5704 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5705 
5706 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5707 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5708 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5709 
5710 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5711 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5712 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5713 
5714 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
5715 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
5716 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6GHZ_REG_RULES) {
5717 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
5718 				    i);
5719 			kfree(tb);
5720 			return -EINVAL;
5721 		}
5722 	}
5723 
5724 	if (!total_reg_rules) {
5725 		ath12k_warn(ab, "No reg rules available\n");
5726 		kfree(tb);
5727 		return -EINVAL;
5728 	}
5729 
5730 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
5731 
5732 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
5733 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
5734 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
5735 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
5736 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
5737 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
5738 
5739 	switch (le32_to_cpu(ev->status_code)) {
5740 	case WMI_REG_SET_CC_STATUS_PASS:
5741 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
5742 		break;
5743 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
5744 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
5745 		break;
5746 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
5747 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
5748 		break;
5749 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
5750 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
5751 		break;
5752 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
5753 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
5754 		break;
5755 	case WMI_REG_SET_CC_STATUS_FAIL:
5756 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
5757 		break;
5758 	}
5759 
5760 	reg_info->is_ext_reg_event = true;
5761 
5762 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
5763 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
5764 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
5765 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
5766 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
5767 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
5768 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
5769 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
5770 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
5771 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
5772 
5773 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5774 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5775 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
5776 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5777 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
5778 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5779 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
5780 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5781 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
5782 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
5783 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
5784 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
5785 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
5786 	}
5787 
5788 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5789 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
5790 		   __func__, reg_info->alpha2, reg_info->dfs_region,
5791 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
5792 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
5793 		   reg_info->phybitmap);
5794 
5795 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5796 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
5797 		   num_2g_reg_rules, num_5g_reg_rules);
5798 
5799 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5800 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
5801 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
5802 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
5803 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
5804 
5805 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5806 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5807 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
5808 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
5809 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
5810 
5811 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5812 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5813 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
5814 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
5815 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
5816 
5817 	ext_wmi_reg_rule =
5818 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
5819 			+ sizeof(*ev)
5820 			+ sizeof(struct wmi_tlv));
5821 
5822 	if (num_2g_reg_rules) {
5823 		reg_info->reg_rules_2g_ptr =
5824 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
5825 						      ext_wmi_reg_rule);
5826 
5827 		if (!reg_info->reg_rules_2g_ptr) {
5828 			kfree(tb);
5829 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
5830 			return -ENOMEM;
5831 		}
5832 	}
5833 
5834 	ext_wmi_reg_rule += num_2g_reg_rules;
5835 
5836 	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
5837 	 * for few countries along with separate 6 GHz rule.
5838 	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
5839 	 * causes intersect check to be true, and same rules will be
5840 	 * shown multiple times in iw cmd.
5841 	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
5842 	 */
5843 	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
5844 								       num_5g_reg_rules);
5845 
5846 	if (num_invalid_5ghz_ext_rules) {
5847 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5848 			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
5849 			   reg_info->alpha2, reg_info->num_5g_reg_rules,
5850 			   num_invalid_5ghz_ext_rules);
5851 
5852 		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
5853 		reg_info->num_5g_reg_rules = num_5g_reg_rules;
5854 	}
5855 
5856 	if (num_5g_reg_rules) {
5857 		reg_info->reg_rules_5g_ptr =
5858 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
5859 						      ext_wmi_reg_rule);
5860 
5861 		if (!reg_info->reg_rules_5g_ptr) {
5862 			kfree(tb);
5863 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5864 			return -ENOMEM;
5865 		}
5866 	}
5867 
5868 	/* We have adjusted the number of 5 GHz reg rules above. But still those
5869 	 * many rules needs to be adjusted in ext_wmi_reg_rule.
5870 	 *
5871 	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
5872 	 */
5873 	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
5874 
5875 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5876 		reg_info->reg_rules_6g_ap_ptr[i] =
5877 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
5878 						      ext_wmi_reg_rule);
5879 
5880 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
5881 			kfree(tb);
5882 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
5883 			return -ENOMEM;
5884 		}
5885 
5886 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
5887 	}
5888 
5889 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
5890 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5891 			reg_info->reg_rules_6g_client_ptr[j][i] =
5892 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
5893 							      ext_wmi_reg_rule);
5894 
5895 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
5896 				kfree(tb);
5897 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
5898 				return -ENOMEM;
5899 			}
5900 
5901 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
5902 		}
5903 	}
5904 
5905 	reg_info->client_type = le32_to_cpu(ev->client_type);
5906 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
5907 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
5908 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
5909 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
5910 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
5911 		le32_to_cpu(ev->domain_code_6g_ap_sp);
5912 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
5913 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
5914 
5915 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5916 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5917 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5918 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5919 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5920 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5921 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5922 	}
5923 
5924 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5925 
5926 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5927 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
5928 
5929 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5930 
5931 	kfree(tb);
5932 	return 0;
5933 }
5934 
5935 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5936 					struct wmi_peer_delete_resp_event *peer_del_resp)
5937 {
5938 	const void **tb;
5939 	const struct wmi_peer_delete_resp_event *ev;
5940 	int ret;
5941 
5942 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5943 	if (IS_ERR(tb)) {
5944 		ret = PTR_ERR(tb);
5945 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5946 		return ret;
5947 	}
5948 
5949 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5950 	if (!ev) {
5951 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
5952 		kfree(tb);
5953 		return -EPROTO;
5954 	}
5955 
5956 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5957 
5958 	peer_del_resp->vdev_id = ev->vdev_id;
5959 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5960 			ev->peer_macaddr.addr);
5961 
5962 	kfree(tb);
5963 	return 0;
5964 }
5965 
5966 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5967 					struct sk_buff *skb,
5968 					u32 *vdev_id)
5969 {
5970 	const void **tb;
5971 	const struct wmi_vdev_delete_resp_event *ev;
5972 	int ret;
5973 
5974 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5975 	if (IS_ERR(tb)) {
5976 		ret = PTR_ERR(tb);
5977 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5978 		return ret;
5979 	}
5980 
5981 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5982 	if (!ev) {
5983 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5984 		kfree(tb);
5985 		return -EPROTO;
5986 	}
5987 
5988 	*vdev_id = le32_to_cpu(ev->vdev_id);
5989 
5990 	kfree(tb);
5991 	return 0;
5992 }
5993 
5994 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5995 					struct sk_buff *skb,
5996 					u32 *vdev_id, u32 *tx_status)
5997 {
5998 	const void **tb;
5999 	const struct wmi_bcn_tx_status_event *ev;
6000 	int ret;
6001 
6002 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6003 	if (IS_ERR(tb)) {
6004 		ret = PTR_ERR(tb);
6005 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6006 		return ret;
6007 	}
6008 
6009 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
6010 	if (!ev) {
6011 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
6012 		kfree(tb);
6013 		return -EPROTO;
6014 	}
6015 
6016 	*vdev_id = le32_to_cpu(ev->vdev_id);
6017 	*tx_status = le32_to_cpu(ev->tx_status);
6018 
6019 	kfree(tb);
6020 	return 0;
6021 }
6022 
6023 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
6024 					      u32 *vdev_id)
6025 {
6026 	const void **tb;
6027 	const struct wmi_vdev_stopped_event *ev;
6028 	int ret;
6029 
6030 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6031 	if (IS_ERR(tb)) {
6032 		ret = PTR_ERR(tb);
6033 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6034 		return ret;
6035 	}
6036 
6037 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
6038 	if (!ev) {
6039 		ath12k_warn(ab, "failed to fetch vdev stop ev");
6040 		kfree(tb);
6041 		return -EPROTO;
6042 	}
6043 
6044 	*vdev_id = le32_to_cpu(ev->vdev_id);
6045 
6046 	kfree(tb);
6047 	return 0;
6048 }
6049 
6050 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
6051 					u16 tag, u16 len,
6052 					const void *ptr, void *data)
6053 {
6054 	struct wmi_tlv_mgmt_rx_parse *parse = data;
6055 
6056 	switch (tag) {
6057 	case WMI_TAG_MGMT_RX_HDR:
6058 		parse->fixed = ptr;
6059 		break;
6060 	case WMI_TAG_ARRAY_BYTE:
6061 		if (!parse->frame_buf_done) {
6062 			parse->frame_buf = ptr;
6063 			parse->frame_buf_done = true;
6064 		}
6065 		break;
6066 	}
6067 	return 0;
6068 }
6069 
6070 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
6071 					  struct sk_buff *skb,
6072 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
6073 {
6074 	struct wmi_tlv_mgmt_rx_parse parse = { };
6075 	const struct ath12k_wmi_mgmt_rx_params *ev;
6076 	const u8 *frame;
6077 	int i, ret;
6078 
6079 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6080 				  ath12k_wmi_tlv_mgmt_rx_parse,
6081 				  &parse);
6082 	if (ret) {
6083 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
6084 		return ret;
6085 	}
6086 
6087 	ev = parse.fixed;
6088 	frame = parse.frame_buf;
6089 
6090 	if (!ev || !frame) {
6091 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
6092 		return -EPROTO;
6093 	}
6094 
6095 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
6096 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
6097 	hdr->channel = le32_to_cpu(ev->channel);
6098 	hdr->snr = le32_to_cpu(ev->snr);
6099 	hdr->rate = le32_to_cpu(ev->rate);
6100 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
6101 	hdr->buf_len = le32_to_cpu(ev->buf_len);
6102 	hdr->status = le32_to_cpu(ev->status);
6103 	hdr->flags = le32_to_cpu(ev->flags);
6104 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
6105 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
6106 
6107 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
6108 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
6109 
6110 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
6111 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
6112 		return -EPROTO;
6113 	}
6114 
6115 	/* shift the sk_buff to point to `frame` */
6116 	skb_trim(skb, 0);
6117 	skb_put(skb, frame - skb->data);
6118 	skb_pull(skb, frame - skb->data);
6119 	skb_put(skb, hdr->buf_len);
6120 
6121 	return 0;
6122 }
6123 
6124 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
6125 				    u32 status, u32 ack_rssi)
6126 {
6127 	struct sk_buff *msdu;
6128 	struct ieee80211_tx_info *info;
6129 	struct ath12k_skb_cb *skb_cb;
6130 	int num_mgmt;
6131 
6132 	spin_lock_bh(&ar->txmgmt_idr_lock);
6133 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
6134 
6135 	if (!msdu) {
6136 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
6137 			    desc_id);
6138 		spin_unlock_bh(&ar->txmgmt_idr_lock);
6139 		return -ENOENT;
6140 	}
6141 
6142 	idr_remove(&ar->txmgmt_idr, desc_id);
6143 	spin_unlock_bh(&ar->txmgmt_idr_lock);
6144 
6145 	skb_cb = ATH12K_SKB_CB(msdu);
6146 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
6147 
6148 	info = IEEE80211_SKB_CB(msdu);
6149 	memset(&info->status, 0, sizeof(info->status));
6150 
6151 	/* skip tx rate update from ieee80211_status*/
6152 	info->status.rates[0].idx = -1;
6153 
6154 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) {
6155 		info->flags |= IEEE80211_TX_STAT_ACK;
6156 		info->status.ack_signal = ack_rssi;
6157 		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
6158 	}
6159 
6160 	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
6161 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
6162 
6163 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
6164 
6165 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
6166 
6167 	/* WARN when we received this event without doing any mgmt tx */
6168 	if (num_mgmt < 0)
6169 		WARN_ON_ONCE(1);
6170 
6171 	if (!num_mgmt)
6172 		wake_up(&ar->txmgmt_empty_waitq);
6173 
6174 	return 0;
6175 }
6176 
6177 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
6178 					       struct sk_buff *skb,
6179 					       struct wmi_mgmt_tx_compl_event *param)
6180 {
6181 	const void **tb;
6182 	const struct wmi_mgmt_tx_compl_event *ev;
6183 	int ret;
6184 
6185 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6186 	if (IS_ERR(tb)) {
6187 		ret = PTR_ERR(tb);
6188 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6189 		return ret;
6190 	}
6191 
6192 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
6193 	if (!ev) {
6194 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
6195 		kfree(tb);
6196 		return -EPROTO;
6197 	}
6198 
6199 	param->pdev_id = ev->pdev_id;
6200 	param->desc_id = ev->desc_id;
6201 	param->status = ev->status;
6202 	param->ppdu_id = ev->ppdu_id;
6203 	param->ack_rssi = ev->ack_rssi;
6204 
6205 	kfree(tb);
6206 	return 0;
6207 }
6208 
6209 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
6210 {
6211 	lockdep_assert_held(&ar->data_lock);
6212 
6213 	switch (ar->scan.state) {
6214 	case ATH12K_SCAN_IDLE:
6215 	case ATH12K_SCAN_RUNNING:
6216 	case ATH12K_SCAN_ABORTING:
6217 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
6218 			    ath12k_scan_state_str(ar->scan.state),
6219 			    ar->scan.state);
6220 		break;
6221 	case ATH12K_SCAN_STARTING:
6222 		ar->scan.state = ATH12K_SCAN_RUNNING;
6223 
6224 		if (ar->scan.is_roc)
6225 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
6226 
6227 		complete(&ar->scan.started);
6228 		break;
6229 	}
6230 }
6231 
6232 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
6233 {
6234 	lockdep_assert_held(&ar->data_lock);
6235 
6236 	switch (ar->scan.state) {
6237 	case ATH12K_SCAN_IDLE:
6238 	case ATH12K_SCAN_RUNNING:
6239 	case ATH12K_SCAN_ABORTING:
6240 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
6241 			    ath12k_scan_state_str(ar->scan.state),
6242 			    ar->scan.state);
6243 		break;
6244 	case ATH12K_SCAN_STARTING:
6245 		complete(&ar->scan.started);
6246 		__ath12k_mac_scan_finish(ar);
6247 		break;
6248 	}
6249 }
6250 
6251 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
6252 {
6253 	lockdep_assert_held(&ar->data_lock);
6254 
6255 	switch (ar->scan.state) {
6256 	case ATH12K_SCAN_IDLE:
6257 	case ATH12K_SCAN_STARTING:
6258 		/* One suspected reason scan can be completed while starting is
6259 		 * if firmware fails to deliver all scan events to the host,
6260 		 * e.g. when transport pipe is full. This has been observed
6261 		 * with spectral scan phyerr events starving wmi transport
6262 		 * pipe. In such case the "scan completed" event should be (and
6263 		 * is) ignored by the host as it may be just firmware's scan
6264 		 * state machine recovering.
6265 		 */
6266 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
6267 			    ath12k_scan_state_str(ar->scan.state),
6268 			    ar->scan.state);
6269 		break;
6270 	case ATH12K_SCAN_RUNNING:
6271 	case ATH12K_SCAN_ABORTING:
6272 		__ath12k_mac_scan_finish(ar);
6273 		break;
6274 	}
6275 }
6276 
6277 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
6278 {
6279 	lockdep_assert_held(&ar->data_lock);
6280 
6281 	switch (ar->scan.state) {
6282 	case ATH12K_SCAN_IDLE:
6283 	case ATH12K_SCAN_STARTING:
6284 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
6285 			    ath12k_scan_state_str(ar->scan.state),
6286 			    ar->scan.state);
6287 		break;
6288 	case ATH12K_SCAN_RUNNING:
6289 	case ATH12K_SCAN_ABORTING:
6290 		ar->scan_channel = NULL;
6291 		break;
6292 	}
6293 }
6294 
6295 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
6296 {
6297 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6298 
6299 	lockdep_assert_held(&ar->data_lock);
6300 
6301 	switch (ar->scan.state) {
6302 	case ATH12K_SCAN_IDLE:
6303 	case ATH12K_SCAN_STARTING:
6304 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
6305 			    ath12k_scan_state_str(ar->scan.state),
6306 			    ar->scan.state);
6307 		break;
6308 	case ATH12K_SCAN_RUNNING:
6309 	case ATH12K_SCAN_ABORTING:
6310 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
6311 
6312 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
6313 			complete(&ar->scan.on_channel);
6314 
6315 		break;
6316 	}
6317 }
6318 
6319 static const char *
6320 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
6321 			       enum wmi_scan_completion_reason reason)
6322 {
6323 	switch (type) {
6324 	case WMI_SCAN_EVENT_STARTED:
6325 		return "started";
6326 	case WMI_SCAN_EVENT_COMPLETED:
6327 		switch (reason) {
6328 		case WMI_SCAN_REASON_COMPLETED:
6329 			return "completed";
6330 		case WMI_SCAN_REASON_CANCELLED:
6331 			return "completed [cancelled]";
6332 		case WMI_SCAN_REASON_PREEMPTED:
6333 			return "completed [preempted]";
6334 		case WMI_SCAN_REASON_TIMEDOUT:
6335 			return "completed [timedout]";
6336 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
6337 			return "completed [internal err]";
6338 		case WMI_SCAN_REASON_MAX:
6339 			break;
6340 		}
6341 		return "completed [unknown]";
6342 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6343 		return "bss channel";
6344 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6345 		return "foreign channel";
6346 	case WMI_SCAN_EVENT_DEQUEUED:
6347 		return "dequeued";
6348 	case WMI_SCAN_EVENT_PREEMPTED:
6349 		return "preempted";
6350 	case WMI_SCAN_EVENT_START_FAILED:
6351 		return "start failed";
6352 	case WMI_SCAN_EVENT_RESTARTED:
6353 		return "restarted";
6354 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6355 		return "foreign channel exit";
6356 	default:
6357 		return "unknown";
6358 	}
6359 }
6360 
6361 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
6362 			       struct wmi_scan_event *scan_evt_param)
6363 {
6364 	const void **tb;
6365 	const struct wmi_scan_event *ev;
6366 	int ret;
6367 
6368 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6369 	if (IS_ERR(tb)) {
6370 		ret = PTR_ERR(tb);
6371 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6372 		return ret;
6373 	}
6374 
6375 	ev = tb[WMI_TAG_SCAN_EVENT];
6376 	if (!ev) {
6377 		ath12k_warn(ab, "failed to fetch scan ev");
6378 		kfree(tb);
6379 		return -EPROTO;
6380 	}
6381 
6382 	scan_evt_param->event_type = ev->event_type;
6383 	scan_evt_param->reason = ev->reason;
6384 	scan_evt_param->channel_freq = ev->channel_freq;
6385 	scan_evt_param->scan_req_id = ev->scan_req_id;
6386 	scan_evt_param->scan_id = ev->scan_id;
6387 	scan_evt_param->vdev_id = ev->vdev_id;
6388 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
6389 
6390 	kfree(tb);
6391 	return 0;
6392 }
6393 
6394 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
6395 					   struct wmi_peer_sta_kickout_arg *arg)
6396 {
6397 	const void **tb;
6398 	const struct wmi_peer_sta_kickout_event *ev;
6399 	int ret;
6400 
6401 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6402 	if (IS_ERR(tb)) {
6403 		ret = PTR_ERR(tb);
6404 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6405 		return ret;
6406 	}
6407 
6408 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
6409 	if (!ev) {
6410 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
6411 		kfree(tb);
6412 		return -EPROTO;
6413 	}
6414 
6415 	arg->mac_addr = ev->peer_macaddr.addr;
6416 
6417 	kfree(tb);
6418 	return 0;
6419 }
6420 
6421 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
6422 			       struct wmi_roam_event *roam_ev)
6423 {
6424 	const void **tb;
6425 	const struct wmi_roam_event *ev;
6426 	int ret;
6427 
6428 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6429 	if (IS_ERR(tb)) {
6430 		ret = PTR_ERR(tb);
6431 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6432 		return ret;
6433 	}
6434 
6435 	ev = tb[WMI_TAG_ROAM_EVENT];
6436 	if (!ev) {
6437 		ath12k_warn(ab, "failed to fetch roam ev");
6438 		kfree(tb);
6439 		return -EPROTO;
6440 	}
6441 
6442 	roam_ev->vdev_id = ev->vdev_id;
6443 	roam_ev->reason = ev->reason;
6444 	roam_ev->rssi = ev->rssi;
6445 
6446 	kfree(tb);
6447 	return 0;
6448 }
6449 
6450 static int freq_to_idx(struct ath12k *ar, int freq)
6451 {
6452 	struct ieee80211_supported_band *sband;
6453 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6454 	int band, ch, idx = 0;
6455 
6456 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6457 		if (!ar->mac.sbands[band].channels)
6458 			continue;
6459 
6460 		sband = hw->wiphy->bands[band];
6461 		if (!sband)
6462 			continue;
6463 
6464 		for (ch = 0; ch < sband->n_channels; ch++, idx++) {
6465 			if (sband->channels[ch].center_freq <
6466 			    KHZ_TO_MHZ(ar->freq_range.start_freq) ||
6467 			    sband->channels[ch].center_freq >
6468 			    KHZ_TO_MHZ(ar->freq_range.end_freq))
6469 				continue;
6470 
6471 			if (sband->channels[ch].center_freq == freq)
6472 				goto exit;
6473 		}
6474 	}
6475 
6476 exit:
6477 	return idx;
6478 }
6479 
6480 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6481 				    struct wmi_chan_info_event *ch_info_ev)
6482 {
6483 	const void **tb;
6484 	const struct wmi_chan_info_event *ev;
6485 	int ret;
6486 
6487 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6488 	if (IS_ERR(tb)) {
6489 		ret = PTR_ERR(tb);
6490 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6491 		return ret;
6492 	}
6493 
6494 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
6495 	if (!ev) {
6496 		ath12k_warn(ab, "failed to fetch chan info ev");
6497 		kfree(tb);
6498 		return -EPROTO;
6499 	}
6500 
6501 	ch_info_ev->err_code = ev->err_code;
6502 	ch_info_ev->freq = ev->freq;
6503 	ch_info_ev->cmd_flags = ev->cmd_flags;
6504 	ch_info_ev->noise_floor = ev->noise_floor;
6505 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
6506 	ch_info_ev->cycle_count = ev->cycle_count;
6507 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
6508 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
6509 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
6510 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
6511 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
6512 	ch_info_ev->vdev_id = ev->vdev_id;
6513 
6514 	kfree(tb);
6515 	return 0;
6516 }
6517 
6518 static int
6519 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6520 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
6521 {
6522 	const void **tb;
6523 	const struct wmi_pdev_bss_chan_info_event *ev;
6524 	int ret;
6525 
6526 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6527 	if (IS_ERR(tb)) {
6528 		ret = PTR_ERR(tb);
6529 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6530 		return ret;
6531 	}
6532 
6533 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
6534 	if (!ev) {
6535 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
6536 		kfree(tb);
6537 		return -EPROTO;
6538 	}
6539 
6540 	bss_ch_info_ev->pdev_id = ev->pdev_id;
6541 	bss_ch_info_ev->freq = ev->freq;
6542 	bss_ch_info_ev->noise_floor = ev->noise_floor;
6543 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
6544 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
6545 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
6546 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
6547 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
6548 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
6549 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
6550 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
6551 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
6552 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
6553 
6554 	kfree(tb);
6555 	return 0;
6556 }
6557 
6558 static int
6559 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
6560 				      struct wmi_vdev_install_key_complete_arg *arg)
6561 {
6562 	const void **tb;
6563 	const struct wmi_vdev_install_key_compl_event *ev;
6564 	int ret;
6565 
6566 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6567 	if (IS_ERR(tb)) {
6568 		ret = PTR_ERR(tb);
6569 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6570 		return ret;
6571 	}
6572 
6573 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
6574 	if (!ev) {
6575 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
6576 		kfree(tb);
6577 		return -EPROTO;
6578 	}
6579 
6580 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
6581 	arg->macaddr = ev->peer_macaddr.addr;
6582 	arg->key_idx = le32_to_cpu(ev->key_idx);
6583 	arg->key_flags = le32_to_cpu(ev->key_flags);
6584 	arg->status = le32_to_cpu(ev->status);
6585 
6586 	kfree(tb);
6587 	return 0;
6588 }
6589 
6590 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
6591 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
6592 {
6593 	const void **tb;
6594 	const struct wmi_peer_assoc_conf_event *ev;
6595 	int ret;
6596 
6597 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6598 	if (IS_ERR(tb)) {
6599 		ret = PTR_ERR(tb);
6600 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6601 		return ret;
6602 	}
6603 
6604 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
6605 	if (!ev) {
6606 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
6607 		kfree(tb);
6608 		return -EPROTO;
6609 	}
6610 
6611 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
6612 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
6613 
6614 	kfree(tb);
6615 	return 0;
6616 }
6617 
6618 static int
6619 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
6620 			 const struct wmi_pdev_temperature_event *ev)
6621 {
6622 	const void **tb;
6623 	int ret;
6624 
6625 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6626 	if (IS_ERR(tb)) {
6627 		ret = PTR_ERR(tb);
6628 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6629 		return ret;
6630 	}
6631 
6632 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
6633 	if (!ev) {
6634 		ath12k_warn(ab, "failed to fetch pdev temp ev");
6635 		kfree(tb);
6636 		return -EPROTO;
6637 	}
6638 
6639 	kfree(tb);
6640 	return 0;
6641 }
6642 
6643 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
6644 {
6645 	/* try to send pending beacons first. they take priority */
6646 	wake_up(&ab->wmi_ab.tx_credits_wq);
6647 }
6648 
6649 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
6650 {
6651 	const struct wmi_11d_new_cc_event *ev;
6652 	struct ath12k *ar;
6653 	struct ath12k_pdev *pdev;
6654 	const void **tb;
6655 	int ret, i;
6656 
6657 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6658 	if (IS_ERR(tb)) {
6659 		ret = PTR_ERR(tb);
6660 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6661 		return ret;
6662 	}
6663 
6664 	ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
6665 	if (!ev) {
6666 		kfree(tb);
6667 		ath12k_warn(ab, "failed to fetch 11d new cc ev");
6668 		return -EPROTO;
6669 	}
6670 
6671 	spin_lock_bh(&ab->base_lock);
6672 	memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
6673 	spin_unlock_bh(&ab->base_lock);
6674 
6675 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
6676 		   ab->new_alpha2[0],
6677 		   ab->new_alpha2[1]);
6678 
6679 	kfree(tb);
6680 
6681 	for (i = 0; i < ab->num_radios; i++) {
6682 		pdev = &ab->pdevs[i];
6683 		ar = pdev->ar;
6684 		ar->state_11d = ATH12K_11D_IDLE;
6685 		ar->ah->regd_updated = false;
6686 		complete(&ar->completed_11d_scan);
6687 	}
6688 
6689 	queue_work(ab->workqueue, &ab->update_11d_work);
6690 
6691 	return 0;
6692 }
6693 
6694 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
6695 				       struct sk_buff *skb)
6696 {
6697 	dev_kfree_skb(skb);
6698 }
6699 
6700 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
6701 {
6702 	struct ath12k_reg_info *reg_info;
6703 	struct ath12k *ar = NULL;
6704 	u8 pdev_idx = 255;
6705 	int ret;
6706 
6707 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
6708 	if (!reg_info) {
6709 		ret = -ENOMEM;
6710 		goto fallback;
6711 	}
6712 
6713 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
6714 	if (ret) {
6715 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
6716 		goto mem_free;
6717 	}
6718 
6719 	ret = ath12k_reg_validate_reg_info(ab, reg_info);
6720 	if (ret == ATH12K_REG_STATUS_FALLBACK) {
6721 		ath12k_warn(ab, "failed to validate reg info %d\n", ret);
6722 		/* firmware has successfully switches to new regd but host can not
6723 		 * continue, so free reginfo and fallback to old regd
6724 		 */
6725 		goto mem_free;
6726 	} else if (ret == ATH12K_REG_STATUS_DROP) {
6727 		/* reg info is valid but we will not store it and
6728 		 * not going to create new regd for it
6729 		 */
6730 		ret = ATH12K_REG_STATUS_VALID;
6731 		goto mem_free;
6732 	}
6733 
6734 	/* free old reg_info if it exist */
6735 	pdev_idx = reg_info->phy_id;
6736 	if (ab->reg_info[pdev_idx]) {
6737 		ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
6738 		kfree(ab->reg_info[pdev_idx]);
6739 	}
6740 	/* reg_info is valid, we store it for later use
6741 	 * even below regd build failed
6742 	 */
6743 	ab->reg_info[pdev_idx] = reg_info;
6744 
6745 	ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
6746 					  IEEE80211_REG_UNSET_AP);
6747 	if (ret) {
6748 		ath12k_warn(ab, "failed to handle chan list %d\n", ret);
6749 		goto fallback;
6750 	}
6751 
6752 	goto out;
6753 
6754 mem_free:
6755 	ath12k_reg_reset_reg_info(reg_info);
6756 	kfree(reg_info);
6757 
6758 	if (ret == ATH12K_REG_STATUS_VALID)
6759 		goto out;
6760 
6761 fallback:
6762 	/* Fallback to older reg (by sending previous country setting
6763 	 * again if fw has succeeded and we failed to process here.
6764 	 * The Regdomain should be uniform across driver and fw. Since the
6765 	 * FW has processed the command and sent a success status, we expect
6766 	 * this function to succeed as well. If it doesn't, CTRY needs to be
6767 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
6768 	 */
6769 	/* TODO: This is rare, but still should also be handled */
6770 	WARN_ON(1);
6771 
6772 out:
6773 	/* In some error cases, even a valid pdev_idx might not be available */
6774 	if (pdev_idx != 255)
6775 		ar = ab->pdevs[pdev_idx].ar;
6776 
6777 	/* During the boot-time update, 'ar' might not be allocated,
6778 	 * so the completion cannot be marked at that point.
6779 	 * This boot-time update is handled in ath12k_mac_hw_register()
6780 	 * before registering the hardware.
6781 	 */
6782 	if (ar)
6783 		complete_all(&ar->regd_update_completed);
6784 
6785 	return ret;
6786 }
6787 
6788 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
6789 				const void *ptr, void *data)
6790 {
6791 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
6792 	struct wmi_ready_event fixed_param;
6793 	struct ath12k_wmi_mac_addr_params *addr_list;
6794 	struct ath12k_pdev *pdev;
6795 	u32 num_mac_addr;
6796 	int i;
6797 
6798 	switch (tag) {
6799 	case WMI_TAG_READY_EVENT:
6800 		memset(&fixed_param, 0, sizeof(fixed_param));
6801 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
6802 		       min_t(u16, sizeof(fixed_param), len));
6803 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
6804 		rdy_parse->num_extra_mac_addr =
6805 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
6806 
6807 		ether_addr_copy(ab->mac_addr,
6808 				fixed_param.ready_event_min.mac_addr.addr);
6809 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
6810 		ab->wmi_ready = true;
6811 		break;
6812 	case WMI_TAG_ARRAY_FIXED_STRUCT:
6813 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
6814 		num_mac_addr = rdy_parse->num_extra_mac_addr;
6815 
6816 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6817 			break;
6818 
6819 		for (i = 0; i < ab->num_radios; i++) {
6820 			pdev = &ab->pdevs[i];
6821 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6822 		}
6823 		ab->pdevs_macaddr_valid = true;
6824 		break;
6825 	default:
6826 		break;
6827 	}
6828 
6829 	return 0;
6830 }
6831 
6832 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
6833 {
6834 	struct ath12k_wmi_rdy_parse rdy_parse = { };
6835 	int ret;
6836 
6837 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6838 				  ath12k_wmi_rdy_parse, &rdy_parse);
6839 	if (ret) {
6840 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
6841 		return ret;
6842 	}
6843 
6844 	complete(&ab->wmi_ab.unified_ready);
6845 	return 0;
6846 }
6847 
6848 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6849 {
6850 	struct wmi_peer_delete_resp_event peer_del_resp;
6851 	struct ath12k *ar;
6852 
6853 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6854 		ath12k_warn(ab, "failed to extract peer delete resp");
6855 		return;
6856 	}
6857 
6858 	rcu_read_lock();
6859 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
6860 	if (!ar) {
6861 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6862 			    peer_del_resp.vdev_id);
6863 		rcu_read_unlock();
6864 		return;
6865 	}
6866 
6867 	complete(&ar->peer_delete_done);
6868 	rcu_read_unlock();
6869 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6870 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6871 }
6872 
6873 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
6874 					  struct sk_buff *skb)
6875 {
6876 	struct ath12k *ar;
6877 	u32 vdev_id = 0;
6878 
6879 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6880 		ath12k_warn(ab, "failed to extract vdev delete resp");
6881 		return;
6882 	}
6883 
6884 	rcu_read_lock();
6885 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6886 	if (!ar) {
6887 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6888 			    vdev_id);
6889 		rcu_read_unlock();
6890 		return;
6891 	}
6892 
6893 	complete(&ar->vdev_delete_done);
6894 
6895 	rcu_read_unlock();
6896 
6897 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6898 		   vdev_id);
6899 }
6900 
6901 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
6902 {
6903 	switch (vdev_resp_status) {
6904 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6905 		return "invalid vdev id";
6906 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6907 		return "not supported";
6908 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6909 		return "dfs violation";
6910 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6911 		return "invalid regdomain";
6912 	default:
6913 		return "unknown";
6914 	}
6915 }
6916 
6917 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6918 {
6919 	struct wmi_vdev_start_resp_event vdev_start_resp;
6920 	struct ath12k *ar;
6921 	u32 status;
6922 
6923 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6924 		ath12k_warn(ab, "failed to extract vdev start resp");
6925 		return;
6926 	}
6927 
6928 	rcu_read_lock();
6929 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
6930 	if (!ar) {
6931 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6932 			    vdev_start_resp.vdev_id);
6933 		rcu_read_unlock();
6934 		return;
6935 	}
6936 
6937 	ar->last_wmi_vdev_start_status = 0;
6938 
6939 	status = le32_to_cpu(vdev_start_resp.status);
6940 	if (WARN_ON_ONCE(status)) {
6941 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6942 			    status, ath12k_wmi_vdev_resp_print(status));
6943 		ar->last_wmi_vdev_start_status = status;
6944 	}
6945 
6946 	ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
6947 
6948 	complete(&ar->vdev_setup_done);
6949 
6950 	rcu_read_unlock();
6951 
6952 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6953 		   vdev_start_resp.vdev_id);
6954 }
6955 
6956 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6957 {
6958 	u32 vdev_id, tx_status;
6959 
6960 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6961 		ath12k_warn(ab, "failed to extract bcn tx status");
6962 		return;
6963 	}
6964 }
6965 
6966 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6967 {
6968 	struct ath12k *ar;
6969 	u32 vdev_id = 0;
6970 
6971 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6972 		ath12k_warn(ab, "failed to extract vdev stopped event");
6973 		return;
6974 	}
6975 
6976 	rcu_read_lock();
6977 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6978 	if (!ar) {
6979 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6980 			    vdev_id);
6981 		rcu_read_unlock();
6982 		return;
6983 	}
6984 
6985 	complete(&ar->vdev_setup_done);
6986 
6987 	rcu_read_unlock();
6988 
6989 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6990 }
6991 
6992 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6993 {
6994 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6995 	struct ath12k *ar;
6996 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6997 	struct ieee80211_hdr *hdr;
6998 	u16 fc;
6999 	struct ieee80211_supported_band *sband;
7000 	s32 noise_floor;
7001 
7002 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
7003 		ath12k_warn(ab, "failed to extract mgmt rx event");
7004 		dev_kfree_skb(skb);
7005 		return;
7006 	}
7007 
7008 	memset(status, 0, sizeof(*status));
7009 
7010 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
7011 		   rx_ev.status);
7012 
7013 	rcu_read_lock();
7014 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
7015 
7016 	if (!ar) {
7017 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
7018 			    rx_ev.pdev_id);
7019 		dev_kfree_skb(skb);
7020 		goto exit;
7021 	}
7022 
7023 	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
7024 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
7025 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
7026 			     WMI_RX_STATUS_ERR_CRC))) {
7027 		dev_kfree_skb(skb);
7028 		goto exit;
7029 	}
7030 
7031 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
7032 		status->flag |= RX_FLAG_MMIC_ERROR;
7033 
7034 	if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
7035 	    rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
7036 		status->band = NL80211_BAND_6GHZ;
7037 		status->freq = rx_ev.chan_freq;
7038 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
7039 		status->band = NL80211_BAND_2GHZ;
7040 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
7041 		status->band = NL80211_BAND_5GHZ;
7042 	} else {
7043 		/* Shouldn't happen unless list of advertised channels to
7044 		 * mac80211 has been changed.
7045 		 */
7046 		WARN_ON_ONCE(1);
7047 		dev_kfree_skb(skb);
7048 		goto exit;
7049 	}
7050 
7051 	if (rx_ev.phy_mode == MODE_11B &&
7052 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
7053 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7054 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
7055 
7056 	sband = &ar->mac.sbands[status->band];
7057 
7058 	if (status->band != NL80211_BAND_6GHZ)
7059 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
7060 							      status->band);
7061 
7062 	spin_lock_bh(&ar->data_lock);
7063 	noise_floor = ath12k_pdev_get_noise_floor(ar);
7064 	spin_unlock_bh(&ar->data_lock);
7065 
7066 	status->signal = rx_ev.snr + noise_floor;
7067 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
7068 
7069 	hdr = (struct ieee80211_hdr *)skb->data;
7070 	fc = le16_to_cpu(hdr->frame_control);
7071 
7072 	/* Firmware is guaranteed to report all essential management frames via
7073 	 * WMI while it can deliver some extra via HTT. Since there can be
7074 	 * duplicates split the reporting wrt monitor/sniffing.
7075 	 */
7076 	status->flag |= RX_FLAG_SKIP_MONITOR;
7077 
7078 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
7079 	 * including group privacy action frames.
7080 	 */
7081 	if (ieee80211_has_protected(hdr->frame_control)) {
7082 		status->flag |= RX_FLAG_DECRYPTED;
7083 
7084 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
7085 			status->flag |= RX_FLAG_IV_STRIPPED |
7086 					RX_FLAG_MMIC_STRIPPED;
7087 			hdr->frame_control = __cpu_to_le16(fc &
7088 					     ~IEEE80211_FCTL_PROTECTED);
7089 		}
7090 	}
7091 
7092 	if (ieee80211_is_beacon(hdr->frame_control))
7093 		ath12k_mac_handle_beacon(ar, skb);
7094 
7095 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7096 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
7097 		   skb, skb->len,
7098 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
7099 
7100 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7101 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
7102 		   status->freq, status->band, status->signal,
7103 		   status->rate_idx);
7104 
7105 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
7106 
7107 exit:
7108 	rcu_read_unlock();
7109 }
7110 
7111 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
7112 {
7113 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
7114 	struct ath12k *ar;
7115 
7116 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
7117 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
7118 		return;
7119 	}
7120 
7121 	rcu_read_lock();
7122 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
7123 	if (!ar) {
7124 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
7125 			    tx_compl_param.pdev_id);
7126 		goto exit;
7127 	}
7128 
7129 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
7130 				 le32_to_cpu(tx_compl_param.status),
7131 				 le32_to_cpu(tx_compl_param.ack_rssi));
7132 
7133 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7134 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
7135 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
7136 		   tx_compl_param.status);
7137 
7138 exit:
7139 	rcu_read_unlock();
7140 }
7141 
7142 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
7143 						  u32 vdev_id,
7144 						  enum ath12k_scan_state state)
7145 {
7146 	int i;
7147 	struct ath12k_pdev *pdev;
7148 	struct ath12k *ar;
7149 
7150 	for (i = 0; i < ab->num_radios; i++) {
7151 		pdev = rcu_dereference(ab->pdevs_active[i]);
7152 		if (pdev && pdev->ar) {
7153 			ar = pdev->ar;
7154 
7155 			spin_lock_bh(&ar->data_lock);
7156 			if (ar->scan.state == state &&
7157 			    ar->scan.arvif &&
7158 			    ar->scan.arvif->vdev_id == vdev_id) {
7159 				spin_unlock_bh(&ar->data_lock);
7160 				return ar;
7161 			}
7162 			spin_unlock_bh(&ar->data_lock);
7163 		}
7164 	}
7165 	return NULL;
7166 }
7167 
7168 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
7169 {
7170 	struct ath12k *ar;
7171 	struct wmi_scan_event scan_ev = {0};
7172 
7173 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
7174 		ath12k_warn(ab, "failed to extract scan event");
7175 		return;
7176 	}
7177 
7178 	rcu_read_lock();
7179 
7180 	/* In case the scan was cancelled, ex. during interface teardown,
7181 	 * the interface will not be found in active interfaces.
7182 	 * Rather, in such scenarios, iterate over the active pdev's to
7183 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
7184 	 * aborting scan's vdev id matches this event info.
7185 	 */
7186 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
7187 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
7188 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7189 						 ATH12K_SCAN_ABORTING);
7190 		if (!ar)
7191 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7192 							 ATH12K_SCAN_RUNNING);
7193 	} else {
7194 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
7195 	}
7196 
7197 	if (!ar) {
7198 		ath12k_warn(ab, "Received scan event for unknown vdev");
7199 		rcu_read_unlock();
7200 		return;
7201 	}
7202 
7203 	spin_lock_bh(&ar->data_lock);
7204 
7205 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7206 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
7207 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
7208 						  le32_to_cpu(scan_ev.reason)),
7209 		   le32_to_cpu(scan_ev.event_type),
7210 		   le32_to_cpu(scan_ev.reason),
7211 		   le32_to_cpu(scan_ev.channel_freq),
7212 		   le32_to_cpu(scan_ev.scan_req_id),
7213 		   le32_to_cpu(scan_ev.scan_id),
7214 		   le32_to_cpu(scan_ev.vdev_id),
7215 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
7216 
7217 	switch (le32_to_cpu(scan_ev.event_type)) {
7218 	case WMI_SCAN_EVENT_STARTED:
7219 		ath12k_wmi_event_scan_started(ar);
7220 		break;
7221 	case WMI_SCAN_EVENT_COMPLETED:
7222 		ath12k_wmi_event_scan_completed(ar);
7223 		break;
7224 	case WMI_SCAN_EVENT_BSS_CHANNEL:
7225 		ath12k_wmi_event_scan_bss_chan(ar);
7226 		break;
7227 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
7228 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
7229 		break;
7230 	case WMI_SCAN_EVENT_START_FAILED:
7231 		ath12k_warn(ab, "received scan start failure event\n");
7232 		ath12k_wmi_event_scan_start_failed(ar);
7233 		break;
7234 	case WMI_SCAN_EVENT_DEQUEUED:
7235 		__ath12k_mac_scan_finish(ar);
7236 		break;
7237 	case WMI_SCAN_EVENT_PREEMPTED:
7238 	case WMI_SCAN_EVENT_RESTARTED:
7239 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
7240 	default:
7241 		break;
7242 	}
7243 
7244 	spin_unlock_bh(&ar->data_lock);
7245 
7246 	rcu_read_unlock();
7247 }
7248 
7249 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
7250 {
7251 	struct wmi_peer_sta_kickout_arg arg = {};
7252 	struct ieee80211_sta *sta;
7253 	struct ath12k_peer *peer;
7254 	struct ath12k *ar;
7255 
7256 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
7257 		ath12k_warn(ab, "failed to extract peer sta kickout event");
7258 		return;
7259 	}
7260 
7261 	rcu_read_lock();
7262 
7263 	spin_lock_bh(&ab->base_lock);
7264 
7265 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
7266 
7267 	if (!peer) {
7268 		ath12k_warn(ab, "peer not found %pM\n",
7269 			    arg.mac_addr);
7270 		goto exit;
7271 	}
7272 
7273 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
7274 	if (!ar) {
7275 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
7276 			    peer->vdev_id);
7277 		goto exit;
7278 	}
7279 
7280 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
7281 					   arg.mac_addr, NULL);
7282 	if (!sta) {
7283 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
7284 			    arg.mac_addr);
7285 		goto exit;
7286 	}
7287 
7288 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
7289 		   arg.mac_addr);
7290 
7291 	ieee80211_report_low_ack(sta, 10);
7292 
7293 exit:
7294 	spin_unlock_bh(&ab->base_lock);
7295 	rcu_read_unlock();
7296 }
7297 
7298 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
7299 {
7300 	struct wmi_roam_event roam_ev = {};
7301 	struct ath12k *ar;
7302 	u32 vdev_id;
7303 	u8 roam_reason;
7304 
7305 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
7306 		ath12k_warn(ab, "failed to extract roam event");
7307 		return;
7308 	}
7309 
7310 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
7311 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
7312 				   WMI_ROAM_REASON_MASK);
7313 
7314 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7315 		   "wmi roam event vdev %u reason %d rssi %d\n",
7316 		   vdev_id, roam_reason, roam_ev.rssi);
7317 
7318 	rcu_read_lock();
7319 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
7320 	if (!ar) {
7321 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
7322 		rcu_read_unlock();
7323 		return;
7324 	}
7325 
7326 	if (roam_reason >= WMI_ROAM_REASON_MAX)
7327 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
7328 			    roam_reason, vdev_id);
7329 
7330 	switch (roam_reason) {
7331 	case WMI_ROAM_REASON_BEACON_MISS:
7332 		ath12k_mac_handle_beacon_miss(ar, vdev_id);
7333 		break;
7334 	case WMI_ROAM_REASON_BETTER_AP:
7335 	case WMI_ROAM_REASON_LOW_RSSI:
7336 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
7337 	case WMI_ROAM_REASON_HO_FAILED:
7338 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
7339 			    roam_reason, vdev_id);
7340 		break;
7341 	}
7342 
7343 	rcu_read_unlock();
7344 }
7345 
7346 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7347 {
7348 	struct wmi_chan_info_event ch_info_ev = {0};
7349 	struct ath12k *ar;
7350 	struct survey_info *survey;
7351 	int idx;
7352 	/* HW channel counters frequency value in hertz */
7353 	u32 cc_freq_hz = ab->cc_freq_hz;
7354 
7355 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
7356 		ath12k_warn(ab, "failed to extract chan info event");
7357 		return;
7358 	}
7359 
7360 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7361 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
7362 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
7363 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
7364 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
7365 		   ch_info_ev.mac_clk_mhz);
7366 
7367 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
7368 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
7369 		return;
7370 	}
7371 
7372 	rcu_read_lock();
7373 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
7374 	if (!ar) {
7375 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
7376 			    ch_info_ev.vdev_id);
7377 		rcu_read_unlock();
7378 		return;
7379 	}
7380 	spin_lock_bh(&ar->data_lock);
7381 
7382 	switch (ar->scan.state) {
7383 	case ATH12K_SCAN_IDLE:
7384 	case ATH12K_SCAN_STARTING:
7385 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
7386 		goto exit;
7387 	case ATH12K_SCAN_RUNNING:
7388 	case ATH12K_SCAN_ABORTING:
7389 		break;
7390 	}
7391 
7392 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
7393 	if (idx >= ARRAY_SIZE(ar->survey)) {
7394 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
7395 			    ch_info_ev.freq, idx);
7396 		goto exit;
7397 	}
7398 
7399 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
7400 	 * HW channel counters frequency value
7401 	 */
7402 	if (ch_info_ev.mac_clk_mhz)
7403 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
7404 
7405 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
7406 		survey = &ar->survey[idx];
7407 		memset(survey, 0, sizeof(*survey));
7408 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
7409 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
7410 				 SURVEY_INFO_TIME_BUSY;
7411 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
7412 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
7413 					    cc_freq_hz);
7414 	}
7415 exit:
7416 	spin_unlock_bh(&ar->data_lock);
7417 	rcu_read_unlock();
7418 }
7419 
7420 static void
7421 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7422 {
7423 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
7424 	struct survey_info *survey;
7425 	struct ath12k *ar;
7426 	u32 cc_freq_hz = ab->cc_freq_hz;
7427 	u64 busy, total, tx, rx, rx_bss;
7428 	int idx;
7429 
7430 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
7431 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
7432 		return;
7433 	}
7434 
7435 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
7436 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
7437 
7438 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
7439 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
7440 
7441 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
7442 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
7443 
7444 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
7445 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
7446 
7447 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
7448 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
7449 
7450 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7451 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
7452 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
7453 		   bss_ch_info_ev.noise_floor, busy, total,
7454 		   tx, rx, rx_bss);
7455 
7456 	rcu_read_lock();
7457 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
7458 
7459 	if (!ar) {
7460 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
7461 			    bss_ch_info_ev.pdev_id);
7462 		rcu_read_unlock();
7463 		return;
7464 	}
7465 
7466 	spin_lock_bh(&ar->data_lock);
7467 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
7468 	if (idx >= ARRAY_SIZE(ar->survey)) {
7469 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
7470 			    bss_ch_info_ev.freq, idx);
7471 		goto exit;
7472 	}
7473 
7474 	survey = &ar->survey[idx];
7475 
7476 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
7477 	survey->time      = div_u64(total, cc_freq_hz);
7478 	survey->time_busy = div_u64(busy, cc_freq_hz);
7479 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
7480 	survey->time_tx   = div_u64(tx, cc_freq_hz);
7481 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
7482 			     SURVEY_INFO_TIME |
7483 			     SURVEY_INFO_TIME_BUSY |
7484 			     SURVEY_INFO_TIME_RX |
7485 			     SURVEY_INFO_TIME_TX);
7486 exit:
7487 	spin_unlock_bh(&ar->data_lock);
7488 	complete(&ar->bss_survey_done);
7489 
7490 	rcu_read_unlock();
7491 }
7492 
7493 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
7494 						struct sk_buff *skb)
7495 {
7496 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
7497 	struct ath12k *ar;
7498 
7499 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
7500 		ath12k_warn(ab, "failed to extract install key compl event");
7501 		return;
7502 	}
7503 
7504 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7505 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
7506 		   install_key_compl.key_idx, install_key_compl.key_flags,
7507 		   install_key_compl.macaddr, install_key_compl.status);
7508 
7509 	rcu_read_lock();
7510 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
7511 	if (!ar) {
7512 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
7513 			    install_key_compl.vdev_id);
7514 		rcu_read_unlock();
7515 		return;
7516 	}
7517 
7518 	ar->install_key_status = 0;
7519 
7520 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
7521 		ath12k_warn(ab, "install key failed for %pM status %d\n",
7522 			    install_key_compl.macaddr, install_key_compl.status);
7523 		ar->install_key_status = install_key_compl.status;
7524 	}
7525 
7526 	complete(&ar->install_key_done);
7527 	rcu_read_unlock();
7528 }
7529 
7530 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
7531 					  u16 tag, u16 len,
7532 					  const void *ptr,
7533 					  void *data)
7534 {
7535 	const struct wmi_service_available_event *ev;
7536 	u32 *wmi_ext2_service_bitmap;
7537 	int i, j;
7538 	u16 expected_len;
7539 
7540 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
7541 	if (len < expected_len) {
7542 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
7543 			    len, tag);
7544 		return -EINVAL;
7545 	}
7546 
7547 	switch (tag) {
7548 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
7549 		ev = (struct wmi_service_available_event *)ptr;
7550 		for (i = 0, j = WMI_MAX_SERVICE;
7551 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
7552 		     i++) {
7553 			do {
7554 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
7555 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7556 					set_bit(j, ab->wmi_ab.svc_map);
7557 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7558 		}
7559 
7560 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7561 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
7562 			   ev->wmi_service_segment_bitmap[0],
7563 			   ev->wmi_service_segment_bitmap[1],
7564 			   ev->wmi_service_segment_bitmap[2],
7565 			   ev->wmi_service_segment_bitmap[3]);
7566 		break;
7567 	case WMI_TAG_ARRAY_UINT32:
7568 		wmi_ext2_service_bitmap = (u32 *)ptr;
7569 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
7570 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
7571 		     i++) {
7572 			do {
7573 				if (wmi_ext2_service_bitmap[i] &
7574 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7575 					set_bit(j, ab->wmi_ab.svc_map);
7576 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7577 		}
7578 
7579 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7580 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
7581 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
7582 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
7583 		break;
7584 	}
7585 	return 0;
7586 }
7587 
7588 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
7589 {
7590 	int ret;
7591 
7592 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7593 				  ath12k_wmi_tlv_services_parser,
7594 				  NULL);
7595 	return ret;
7596 }
7597 
7598 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
7599 {
7600 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
7601 	struct ath12k *ar;
7602 
7603 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
7604 		ath12k_warn(ab, "failed to extract peer assoc conf event");
7605 		return;
7606 	}
7607 
7608 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7609 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
7610 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
7611 
7612 	rcu_read_lock();
7613 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
7614 
7615 	if (!ar) {
7616 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
7617 			    peer_assoc_conf.vdev_id);
7618 		rcu_read_unlock();
7619 		return;
7620 	}
7621 
7622 	complete(&ar->peer_assoc_done);
7623 	rcu_read_unlock();
7624 }
7625 
7626 static void
7627 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
7628 			      struct ath12k_fw_stats *fw_stats,
7629 			      char *buf, u32 *length)
7630 {
7631 	const struct ath12k_fw_stats_vdev *vdev;
7632 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7633 	struct ath12k_link_vif *arvif;
7634 	u32 len = *length;
7635 	u8 *vif_macaddr;
7636 	int i;
7637 
7638 	len += scnprintf(buf + len, buf_len - len, "\n");
7639 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7640 			 "ath12k VDEV stats");
7641 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7642 			 "=================");
7643 
7644 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
7645 		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
7646 		if (!arvif)
7647 			continue;
7648 		vif_macaddr = arvif->ahvif->vif->addr;
7649 
7650 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7651 				 "VDEV ID", vdev->vdev_id);
7652 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7653 				 "VDEV MAC address", vif_macaddr);
7654 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7655 				 "beacon snr", vdev->beacon_snr);
7656 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7657 				 "data snr", vdev->data_snr);
7658 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7659 				 "num rx frames", vdev->num_rx_frames);
7660 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7661 				 "num rts fail", vdev->num_rts_fail);
7662 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7663 				 "num rts success", vdev->num_rts_success);
7664 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7665 				 "num rx err", vdev->num_rx_err);
7666 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7667 				 "num rx discard", vdev->num_rx_discard);
7668 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7669 				 "num tx not acked", vdev->num_tx_not_acked);
7670 
7671 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7672 			len += scnprintf(buf + len, buf_len - len,
7673 					"%25s [%02d] %u\n",
7674 					"num tx frames", i,
7675 					vdev->num_tx_frames[i]);
7676 
7677 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7678 			len += scnprintf(buf + len, buf_len - len,
7679 					"%25s [%02d] %u\n",
7680 					"num tx frames retries", i,
7681 					vdev->num_tx_frames_retries[i]);
7682 
7683 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7684 			len += scnprintf(buf + len, buf_len - len,
7685 					"%25s [%02d] %u\n",
7686 					"num tx frames failures", i,
7687 					vdev->num_tx_frames_failures[i]);
7688 
7689 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7690 			len += scnprintf(buf + len, buf_len - len,
7691 					"%25s [%02d] 0x%08x\n",
7692 					"tx rate history", i,
7693 					vdev->tx_rate_history[i]);
7694 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7695 			len += scnprintf(buf + len, buf_len - len,
7696 					"%25s [%02d] %u\n",
7697 					"beacon rssi history", i,
7698 					vdev->beacon_rssi_history[i]);
7699 
7700 		len += scnprintf(buf + len, buf_len - len, "\n");
7701 		*length = len;
7702 	}
7703 }
7704 
7705 static void
7706 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
7707 			     struct ath12k_fw_stats *fw_stats,
7708 			     char *buf, u32 *length)
7709 {
7710 	const struct ath12k_fw_stats_bcn *bcn;
7711 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7712 	struct ath12k_link_vif *arvif;
7713 	u32 len = *length;
7714 	size_t num_bcn;
7715 
7716 	num_bcn = list_count_nodes(&fw_stats->bcn);
7717 
7718 	len += scnprintf(buf + len, buf_len - len, "\n");
7719 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
7720 			 "ath12k Beacon stats", num_bcn);
7721 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7722 			 "===================");
7723 
7724 	list_for_each_entry(bcn, &fw_stats->bcn, list) {
7725 		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
7726 		if (!arvif)
7727 			continue;
7728 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7729 				 "VDEV ID", bcn->vdev_id);
7730 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7731 				 "VDEV MAC address", arvif->ahvif->vif->addr);
7732 		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7733 				 "================");
7734 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7735 				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
7736 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7737 				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
7738 
7739 		len += scnprintf(buf + len, buf_len - len, "\n");
7740 		*length = len;
7741 	}
7742 }
7743 
7744 static void
7745 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7746 				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
7747 {
7748 	u32 len = *length;
7749 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7750 
7751 	len = scnprintf(buf + len, buf_len - len, "\n");
7752 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7753 			"ath12k PDEV stats");
7754 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7755 			"=================");
7756 
7757 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7758 			"Channel noise floor", pdev->ch_noise_floor);
7759 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7760 			"Channel TX power", pdev->chan_tx_power);
7761 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7762 			"TX frame count", pdev->tx_frame_count);
7763 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7764 			"RX frame count", pdev->rx_frame_count);
7765 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7766 			"RX clear count", pdev->rx_clear_count);
7767 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7768 			"Cycle count", pdev->cycle_count);
7769 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7770 			"PHY error count", pdev->phy_err_count);
7771 	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
7772 			"soc drop count", fw_soc_drop_cnt);
7773 
7774 	*length = len;
7775 }
7776 
7777 static void
7778 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7779 				 char *buf, u32 *length)
7780 {
7781 	u32 len = *length;
7782 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7783 
7784 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7785 			 "ath12k PDEV TX stats");
7786 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7787 			 "====================");
7788 
7789 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7790 			 "HTT cookies queued", pdev->comp_queued);
7791 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7792 			 "HTT cookies disp.", pdev->comp_delivered);
7793 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7794 			 "MSDU queued", pdev->msdu_enqued);
7795 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7796 			 "MPDU queued", pdev->mpdu_enqued);
7797 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7798 			 "MSDUs dropped", pdev->wmm_drop);
7799 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7800 			 "Local enqued", pdev->local_enqued);
7801 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7802 			 "Local freed", pdev->local_freed);
7803 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7804 			 "HW queued", pdev->hw_queued);
7805 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7806 			 "PPDUs reaped", pdev->hw_reaped);
7807 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7808 			 "Num underruns", pdev->underrun);
7809 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7810 			 "PPDUs cleaned", pdev->tx_abort);
7811 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7812 			 "MPDUs requeued", pdev->mpdus_requed);
7813 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7814 			 "Excessive retries", pdev->tx_ko);
7815 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7816 			 "HW rate", pdev->data_rc);
7817 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7818 			 "Sched self triggers", pdev->self_triggers);
7819 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7820 			 "Dropped due to SW retries",
7821 			 pdev->sw_retry_failure);
7822 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7823 			 "Illegal rate phy errors",
7824 			 pdev->illgl_rate_phy_err);
7825 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7826 			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
7827 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7828 			 "TX timeout", pdev->pdev_tx_timeout);
7829 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7830 			 "PDEV resets", pdev->pdev_resets);
7831 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7832 			 "Stateless TIDs alloc failures",
7833 			 pdev->stateless_tid_alloc_failure);
7834 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7835 			 "PHY underrun", pdev->phy_underrun);
7836 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7837 			 "MPDU is more than txop limit", pdev->txop_ovf);
7838 	*length = len;
7839 }
7840 
7841 static void
7842 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7843 				 char *buf, u32 *length)
7844 {
7845 	u32 len = *length;
7846 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7847 
7848 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7849 			 "ath12k PDEV RX stats");
7850 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7851 			 "====================");
7852 
7853 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7854 			 "Mid PPDU route change",
7855 			 pdev->mid_ppdu_route_change);
7856 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7857 			 "Tot. number of statuses", pdev->status_rcvd);
7858 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7859 			 "Extra frags on rings 0", pdev->r0_frags);
7860 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7861 			 "Extra frags on rings 1", pdev->r1_frags);
7862 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7863 			 "Extra frags on rings 2", pdev->r2_frags);
7864 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7865 			 "Extra frags on rings 3", pdev->r3_frags);
7866 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7867 			 "MSDUs delivered to HTT", pdev->htt_msdus);
7868 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7869 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
7870 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7871 			 "MSDUs delivered to stack", pdev->loc_msdus);
7872 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7873 			 "MPDUs delivered to stack", pdev->loc_mpdus);
7874 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7875 			 "Oversized AMSUs", pdev->oversize_amsdu);
7876 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7877 			 "PHY errors", pdev->phy_errs);
7878 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7879 			 "PHY errors drops", pdev->phy_err_drop);
7880 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7881 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
7882 	*length = len;
7883 }
7884 
7885 static void
7886 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
7887 			      struct ath12k_fw_stats *fw_stats,
7888 			      char *buf, u32 *length)
7889 {
7890 	const struct ath12k_fw_stats_pdev *pdev;
7891 	u32 len = *length;
7892 
7893 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
7894 					struct ath12k_fw_stats_pdev, list);
7895 	if (!pdev) {
7896 		ath12k_warn(ar->ab, "failed to get pdev stats\n");
7897 		return;
7898 	}
7899 
7900 	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
7901 					   ar->ab->fw_soc_drop_count);
7902 	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
7903 	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
7904 
7905 	*length = len;
7906 }
7907 
7908 void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
7909 			      struct ath12k_fw_stats *fw_stats,
7910 			      u32 stats_id, char *buf)
7911 {
7912 	u32 len = 0;
7913 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7914 
7915 	spin_lock_bh(&ar->data_lock);
7916 
7917 	switch (stats_id) {
7918 	case WMI_REQUEST_VDEV_STAT:
7919 		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
7920 		break;
7921 	case WMI_REQUEST_BCN_STAT:
7922 		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
7923 		break;
7924 	case WMI_REQUEST_PDEV_STAT:
7925 		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
7926 		break;
7927 	default:
7928 		break;
7929 	}
7930 
7931 	spin_unlock_bh(&ar->data_lock);
7932 
7933 	if (len >= buf_len)
7934 		buf[len - 1] = 0;
7935 	else
7936 		buf[len] = 0;
7937 
7938 	ath12k_fw_stats_reset(ar);
7939 }
7940 
7941 static void
7942 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
7943 			   struct ath12k_fw_stats_vdev *dst)
7944 {
7945 	int i;
7946 
7947 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7948 	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
7949 	dst->data_snr = le32_to_cpu(src->data_snr);
7950 	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
7951 	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
7952 	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
7953 	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
7954 	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
7955 	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
7956 
7957 	for (i = 0; i < WLAN_MAX_AC; i++)
7958 		dst->num_tx_frames[i] =
7959 			le32_to_cpu(src->num_tx_frames[i]);
7960 
7961 	for (i = 0; i < WLAN_MAX_AC; i++)
7962 		dst->num_tx_frames_retries[i] =
7963 			le32_to_cpu(src->num_tx_frames_retries[i]);
7964 
7965 	for (i = 0; i < WLAN_MAX_AC; i++)
7966 		dst->num_tx_frames_failures[i] =
7967 			le32_to_cpu(src->num_tx_frames_failures[i]);
7968 
7969 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7970 		dst->tx_rate_history[i] =
7971 			le32_to_cpu(src->tx_rate_history[i]);
7972 
7973 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7974 		dst->beacon_rssi_history[i] =
7975 			le32_to_cpu(src->beacon_rssi_history[i]);
7976 }
7977 
7978 static void
7979 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
7980 			  struct ath12k_fw_stats_bcn *dst)
7981 {
7982 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7983 	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
7984 	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
7985 }
7986 
7987 static void
7988 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
7989 				struct ath12k_fw_stats_pdev *dst)
7990 {
7991 	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
7992 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
7993 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
7994 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
7995 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
7996 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
7997 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
7998 }
7999 
8000 static void
8001 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
8002 			      struct ath12k_fw_stats_pdev *dst)
8003 {
8004 	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
8005 	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
8006 	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
8007 	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
8008 	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
8009 	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
8010 	dst->local_freed = a_sle32_to_cpu(src->local_freed);
8011 	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
8012 	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
8013 	dst->underrun = a_sle32_to_cpu(src->underrun);
8014 	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
8015 	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
8016 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
8017 	dst->data_rc = __le32_to_cpu(src->data_rc);
8018 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
8019 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
8020 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
8021 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
8022 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
8023 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
8024 	dst->stateless_tid_alloc_failure =
8025 		__le32_to_cpu(src->stateless_tid_alloc_failure);
8026 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
8027 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
8028 }
8029 
8030 static void
8031 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
8032 			      struct ath12k_fw_stats_pdev *dst)
8033 {
8034 	dst->mid_ppdu_route_change =
8035 		a_sle32_to_cpu(src->mid_ppdu_route_change);
8036 	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
8037 	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
8038 	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
8039 	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
8040 	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
8041 	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
8042 	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
8043 	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
8044 	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
8045 	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
8046 	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
8047 	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
8048 	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
8049 }
8050 
8051 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
8052 					      struct wmi_tlv_fw_stats_parse *parse,
8053 					      const void *ptr,
8054 					      u16 len)
8055 {
8056 	const struct wmi_stats_event *ev = parse->ev;
8057 	struct ath12k_fw_stats *stats = parse->stats;
8058 	struct ath12k *ar;
8059 	struct ath12k_link_vif *arvif;
8060 	struct ieee80211_sta *sta;
8061 	struct ath12k_sta *ahsta;
8062 	struct ath12k_link_sta *arsta;
8063 	int i, ret = 0;
8064 	const void *data = ptr;
8065 
8066 	if (!ev) {
8067 		ath12k_warn(ab, "failed to fetch update stats ev");
8068 		return -EPROTO;
8069 	}
8070 
8071 	if (!stats)
8072 		return -EINVAL;
8073 
8074 	rcu_read_lock();
8075 
8076 	stats->pdev_id = le32_to_cpu(ev->pdev_id);
8077 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
8078 	if (!ar) {
8079 		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
8080 			    le32_to_cpu(ev->pdev_id));
8081 		ret = -EPROTO;
8082 		goto exit;
8083 	}
8084 
8085 	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
8086 		const struct wmi_vdev_stats_params *src;
8087 		struct ath12k_fw_stats_vdev *dst;
8088 
8089 		src = data;
8090 		if (len < sizeof(*src)) {
8091 			ret = -EPROTO;
8092 			goto exit;
8093 		}
8094 
8095 		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
8096 		if (arvif) {
8097 			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
8098 							   arvif->bssid,
8099 							   NULL);
8100 			if (sta) {
8101 				ahsta = ath12k_sta_to_ahsta(sta);
8102 				arsta = &ahsta->deflink;
8103 				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
8104 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8105 					   "wmi stats vdev id %d snr %d\n",
8106 					   src->vdev_id, src->beacon_snr);
8107 			} else {
8108 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8109 					   "not found station bssid %pM for vdev stat\n",
8110 					   arvif->bssid);
8111 			}
8112 		}
8113 
8114 		data += sizeof(*src);
8115 		len -= sizeof(*src);
8116 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8117 		if (!dst)
8118 			continue;
8119 		ath12k_wmi_pull_vdev_stats(src, dst);
8120 		stats->stats_id = WMI_REQUEST_VDEV_STAT;
8121 		list_add_tail(&dst->list, &stats->vdevs);
8122 	}
8123 	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
8124 		const struct ath12k_wmi_bcn_stats_params *src;
8125 		struct ath12k_fw_stats_bcn *dst;
8126 
8127 		src = data;
8128 		if (len < sizeof(*src)) {
8129 			ret = -EPROTO;
8130 			goto exit;
8131 		}
8132 
8133 		data += sizeof(*src);
8134 		len -= sizeof(*src);
8135 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8136 		if (!dst)
8137 			continue;
8138 		ath12k_wmi_pull_bcn_stats(src, dst);
8139 		stats->stats_id = WMI_REQUEST_BCN_STAT;
8140 		list_add_tail(&dst->list, &stats->bcn);
8141 	}
8142 	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
8143 		const struct ath12k_wmi_pdev_stats_params *src;
8144 		struct ath12k_fw_stats_pdev *dst;
8145 
8146 		src = data;
8147 		if (len < sizeof(*src)) {
8148 			ret = -EPROTO;
8149 			goto exit;
8150 		}
8151 
8152 		stats->stats_id = WMI_REQUEST_PDEV_STAT;
8153 
8154 		data += sizeof(*src);
8155 		len -= sizeof(*src);
8156 
8157 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8158 		if (!dst)
8159 			continue;
8160 
8161 		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
8162 		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
8163 		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
8164 		list_add_tail(&dst->list, &stats->pdevs);
8165 	}
8166 
8167 exit:
8168 	rcu_read_unlock();
8169 	return ret;
8170 }
8171 
8172 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
8173 					 u16 tag, u16 len,
8174 					 const void *ptr, void *data)
8175 {
8176 	struct wmi_tlv_fw_stats_parse *parse = data;
8177 	int ret = 0;
8178 
8179 	switch (tag) {
8180 	case WMI_TAG_STATS_EVENT:
8181 		parse->ev = ptr;
8182 		break;
8183 	case WMI_TAG_ARRAY_BYTE:
8184 		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
8185 		break;
8186 	default:
8187 		break;
8188 	}
8189 	return ret;
8190 }
8191 
8192 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
8193 				    struct ath12k_fw_stats *stats)
8194 {
8195 	struct wmi_tlv_fw_stats_parse parse = {};
8196 
8197 	stats->stats_id = 0;
8198 	parse.stats = stats;
8199 
8200 	return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8201 				   ath12k_wmi_tlv_fw_stats_parse,
8202 				   &parse);
8203 }
8204 
8205 static void ath12k_wmi_fw_stats_process(struct ath12k *ar,
8206 					struct ath12k_fw_stats *stats)
8207 {
8208 	struct ath12k_base *ab = ar->ab;
8209 	struct ath12k_pdev *pdev;
8210 	bool is_end = true;
8211 	size_t total_vdevs_started = 0;
8212 	int i;
8213 
8214 	if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
8215 		if (list_empty(&stats->vdevs)) {
8216 			ath12k_warn(ab, "empty vdev stats");
8217 			return;
8218 		}
8219 		/* FW sends all the active VDEV stats irrespective of PDEV,
8220 		 * hence limit until the count of all VDEVs started
8221 		 */
8222 		rcu_read_lock();
8223 		for (i = 0; i < ab->num_radios; i++) {
8224 			pdev = rcu_dereference(ab->pdevs_active[i]);
8225 			if (pdev && pdev->ar)
8226 				total_vdevs_started += pdev->ar->num_started_vdevs;
8227 		}
8228 		rcu_read_unlock();
8229 
8230 		if (total_vdevs_started)
8231 			is_end = ((++ar->fw_stats.num_vdev_recvd) ==
8232 				  total_vdevs_started);
8233 
8234 		list_splice_tail_init(&stats->vdevs,
8235 				      &ar->fw_stats.vdevs);
8236 
8237 		if (is_end)
8238 			complete(&ar->fw_stats_done);
8239 
8240 		return;
8241 	}
8242 
8243 	if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
8244 		if (list_empty(&stats->bcn)) {
8245 			ath12k_warn(ab, "empty beacon stats");
8246 			return;
8247 		}
8248 		/* Mark end until we reached the count of all started VDEVs
8249 		 * within the PDEV
8250 		 */
8251 		if (ar->num_started_vdevs)
8252 			is_end = ((++ar->fw_stats.num_bcn_recvd) ==
8253 				  ar->num_started_vdevs);
8254 
8255 		list_splice_tail_init(&stats->bcn,
8256 				      &ar->fw_stats.bcn);
8257 
8258 		if (is_end)
8259 			complete(&ar->fw_stats_done);
8260 	}
8261 }
8262 
8263 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
8264 {
8265 	struct ath12k_fw_stats stats = {};
8266 	struct ath12k *ar;
8267 	int ret;
8268 
8269 	INIT_LIST_HEAD(&stats.pdevs);
8270 	INIT_LIST_HEAD(&stats.vdevs);
8271 	INIT_LIST_HEAD(&stats.bcn);
8272 
8273 	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
8274 	if (ret) {
8275 		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
8276 		goto free;
8277 	}
8278 
8279 	ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
8280 
8281 	rcu_read_lock();
8282 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
8283 	if (!ar) {
8284 		rcu_read_unlock();
8285 		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
8286 			    stats.pdev_id, ret);
8287 		goto free;
8288 	}
8289 
8290 	spin_lock_bh(&ar->data_lock);
8291 
8292 	/* Handle WMI_REQUEST_PDEV_STAT status update */
8293 	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
8294 		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
8295 		complete(&ar->fw_stats_done);
8296 		goto complete;
8297 	}
8298 
8299 	/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
8300 	ath12k_wmi_fw_stats_process(ar, &stats);
8301 
8302 complete:
8303 	complete(&ar->fw_stats_complete);
8304 	spin_unlock_bh(&ar->data_lock);
8305 	rcu_read_unlock();
8306 
8307 	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
8308 	 * at this point, no need to free the individual list.
8309 	 */
8310 	return;
8311 
8312 free:
8313 	ath12k_fw_stats_free(&stats);
8314 }
8315 
8316 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
8317  * is not part of BDF CTL(Conformance test limits) table entries.
8318  */
8319 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
8320 						 struct sk_buff *skb)
8321 {
8322 	const void **tb;
8323 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
8324 	int ret;
8325 
8326 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8327 	if (IS_ERR(tb)) {
8328 		ret = PTR_ERR(tb);
8329 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8330 		return;
8331 	}
8332 
8333 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
8334 	if (!ev) {
8335 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
8336 		kfree(tb);
8337 		return;
8338 	}
8339 
8340 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8341 		   "pdev ctl failsafe check ev status %d\n",
8342 		   ev->ctl_failsafe_status);
8343 
8344 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
8345 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
8346 	 */
8347 	if (ev->ctl_failsafe_status != 0)
8348 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
8349 			    ev->ctl_failsafe_status);
8350 
8351 	kfree(tb);
8352 }
8353 
8354 static void
8355 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
8356 					  const struct ath12k_wmi_pdev_csa_event *ev,
8357 					  const u32 *vdev_ids)
8358 {
8359 	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
8360 	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
8361 	struct ieee80211_bss_conf *conf;
8362 	struct ath12k_link_vif *arvif;
8363 	struct ath12k_vif *ahvif;
8364 	int i;
8365 
8366 	rcu_read_lock();
8367 	for (i = 0; i < num_vdevs; i++) {
8368 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
8369 
8370 		if (!arvif) {
8371 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
8372 				    vdev_ids[i]);
8373 			continue;
8374 		}
8375 		ahvif = arvif->ahvif;
8376 
8377 		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
8378 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
8379 				    arvif->link_id);
8380 			continue;
8381 		}
8382 
8383 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
8384 		if (!conf) {
8385 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
8386 				    ahvif->vif->addr, arvif->link_id);
8387 			continue;
8388 		}
8389 
8390 		if (!arvif->is_up || !conf->csa_active)
8391 			continue;
8392 
8393 		/* Finish CSA when counter reaches zero */
8394 		if (!current_switch_count) {
8395 			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
8396 			arvif->current_cntdown_counter = 0;
8397 		} else if (current_switch_count > 1) {
8398 			/* If the count in event is not what we expect, don't update the
8399 			 * mac80211 count. Since during beacon Tx failure, count in the
8400 			 * firmware will not decrement and this event will come with the
8401 			 * previous count value again
8402 			 */
8403 			if (current_switch_count != arvif->current_cntdown_counter)
8404 				continue;
8405 
8406 			arvif->current_cntdown_counter =
8407 				ieee80211_beacon_update_cntdwn(ahvif->vif,
8408 							       arvif->link_id);
8409 		}
8410 	}
8411 	rcu_read_unlock();
8412 }
8413 
8414 static void
8415 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
8416 					      struct sk_buff *skb)
8417 {
8418 	const void **tb;
8419 	const struct ath12k_wmi_pdev_csa_event *ev;
8420 	const u32 *vdev_ids;
8421 	int ret;
8422 
8423 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8424 	if (IS_ERR(tb)) {
8425 		ret = PTR_ERR(tb);
8426 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8427 		return;
8428 	}
8429 
8430 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
8431 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
8432 
8433 	if (!ev || !vdev_ids) {
8434 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
8435 		kfree(tb);
8436 		return;
8437 	}
8438 
8439 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8440 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
8441 		   ev->current_switch_count, ev->pdev_id,
8442 		   ev->num_vdevs);
8443 
8444 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
8445 
8446 	kfree(tb);
8447 }
8448 
8449 static void
8450 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
8451 {
8452 	const void **tb;
8453 	struct ath12k_mac_get_any_chanctx_conf_arg arg;
8454 	const struct ath12k_wmi_pdev_radar_event *ev;
8455 	struct ath12k *ar;
8456 	int ret;
8457 
8458 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8459 	if (IS_ERR(tb)) {
8460 		ret = PTR_ERR(tb);
8461 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8462 		return;
8463 	}
8464 
8465 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
8466 
8467 	if (!ev) {
8468 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
8469 		kfree(tb);
8470 		return;
8471 	}
8472 
8473 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8474 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
8475 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
8476 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
8477 		   ev->freq_offset, ev->sidx);
8478 
8479 	rcu_read_lock();
8480 
8481 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
8482 
8483 	if (!ar) {
8484 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
8485 			    ev->pdev_id);
8486 		goto exit;
8487 	}
8488 
8489 	arg.ar = ar;
8490 	arg.chanctx_conf = NULL;
8491 	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
8492 					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
8493 	if (!arg.chanctx_conf) {
8494 		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
8495 		goto exit;
8496 	}
8497 
8498 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
8499 		   ev->pdev_id);
8500 
8501 	if (ar->dfs_block_radar_events)
8502 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
8503 	else
8504 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
8505 
8506 exit:
8507 	rcu_read_unlock();
8508 
8509 	kfree(tb);
8510 }
8511 
8512 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
8513 					  struct sk_buff *skb)
8514 {
8515 	const struct ath12k_wmi_ftm_event *ev;
8516 	const void **tb;
8517 	int ret;
8518 	u16 length;
8519 
8520 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8521 
8522 	if (IS_ERR(tb)) {
8523 		ret = PTR_ERR(tb);
8524 		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
8525 		return;
8526 	}
8527 
8528 	ev = tb[WMI_TAG_ARRAY_BYTE];
8529 	if (!ev) {
8530 		ath12k_warn(ab, "failed to fetch ftm msg\n");
8531 		kfree(tb);
8532 		return;
8533 	}
8534 
8535 	length = skb->len - TLV_HDR_SIZE;
8536 	ath12k_tm_process_event(ab, cmd_id, ev, length);
8537 	kfree(tb);
8538 	tb = NULL;
8539 }
8540 
8541 static void
8542 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
8543 				  struct sk_buff *skb)
8544 {
8545 	struct ath12k *ar;
8546 	struct wmi_pdev_temperature_event ev = {0};
8547 
8548 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
8549 		ath12k_warn(ab, "failed to extract pdev temperature event");
8550 		return;
8551 	}
8552 
8553 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8554 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
8555 
8556 	rcu_read_lock();
8557 
8558 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
8559 	if (!ar) {
8560 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
8561 		goto exit;
8562 	}
8563 
8564 exit:
8565 	rcu_read_unlock();
8566 }
8567 
8568 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
8569 					struct sk_buff *skb)
8570 {
8571 	const void **tb;
8572 	const struct wmi_fils_discovery_event *ev;
8573 	int ret;
8574 
8575 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8576 	if (IS_ERR(tb)) {
8577 		ret = PTR_ERR(tb);
8578 		ath12k_warn(ab,
8579 			    "failed to parse FILS discovery event tlv %d\n",
8580 			    ret);
8581 		return;
8582 	}
8583 
8584 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
8585 	if (!ev) {
8586 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
8587 		kfree(tb);
8588 		return;
8589 	}
8590 
8591 	ath12k_warn(ab,
8592 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
8593 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
8594 
8595 	kfree(tb);
8596 }
8597 
8598 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
8599 					      struct sk_buff *skb)
8600 {
8601 	const void **tb;
8602 	const struct wmi_probe_resp_tx_status_event *ev;
8603 	int ret;
8604 
8605 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8606 	if (IS_ERR(tb)) {
8607 		ret = PTR_ERR(tb);
8608 		ath12k_warn(ab,
8609 			    "failed to parse probe response transmission status event tlv: %d\n",
8610 			    ret);
8611 		return;
8612 	}
8613 
8614 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
8615 	if (!ev) {
8616 		ath12k_warn(ab,
8617 			    "failed to fetch probe response transmission status event");
8618 		kfree(tb);
8619 		return;
8620 	}
8621 
8622 	if (ev->tx_status)
8623 		ath12k_warn(ab,
8624 			    "Probe response transmission failed for vdev_id %u, status %u\n",
8625 			    ev->vdev_id, ev->tx_status);
8626 
8627 	kfree(tb);
8628 }
8629 
8630 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
8631 				    struct sk_buff *skb)
8632 {
8633 	const void **tb;
8634 	const struct wmi_p2p_noa_event *ev;
8635 	const struct ath12k_wmi_p2p_noa_info *noa;
8636 	struct ath12k *ar;
8637 	int ret, vdev_id;
8638 
8639 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8640 	if (IS_ERR(tb)) {
8641 		ret = PTR_ERR(tb);
8642 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
8643 		return ret;
8644 	}
8645 
8646 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
8647 	noa = tb[WMI_TAG_P2P_NOA_INFO];
8648 
8649 	if (!ev || !noa) {
8650 		ret = -EPROTO;
8651 		goto out;
8652 	}
8653 
8654 	vdev_id = __le32_to_cpu(ev->vdev_id);
8655 
8656 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8657 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
8658 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
8659 
8660 	rcu_read_lock();
8661 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
8662 	if (!ar) {
8663 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
8664 			    vdev_id);
8665 		ret = -EINVAL;
8666 		goto unlock;
8667 	}
8668 
8669 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
8670 
8671 	ret = 0;
8672 
8673 unlock:
8674 	rcu_read_unlock();
8675 out:
8676 	kfree(tb);
8677 	return ret;
8678 }
8679 
8680 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
8681 					     struct sk_buff *skb)
8682 {
8683 	const struct wmi_rfkill_state_change_event *ev;
8684 	const void **tb;
8685 	int ret;
8686 
8687 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8688 	if (IS_ERR(tb)) {
8689 		ret = PTR_ERR(tb);
8690 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8691 		return;
8692 	}
8693 
8694 	ev = tb[WMI_TAG_RFKILL_EVENT];
8695 	if (!ev) {
8696 		kfree(tb);
8697 		return;
8698 	}
8699 
8700 	ath12k_dbg(ab, ATH12K_DBG_MAC,
8701 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
8702 		   le32_to_cpu(ev->gpio_pin_num),
8703 		   le32_to_cpu(ev->int_type),
8704 		   le32_to_cpu(ev->radio_state));
8705 
8706 	spin_lock_bh(&ab->base_lock);
8707 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
8708 	spin_unlock_bh(&ab->base_lock);
8709 
8710 	queue_work(ab->workqueue, &ab->rfkill_work);
8711 	kfree(tb);
8712 }
8713 
8714 static void
8715 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
8716 {
8717 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
8718 }
8719 
8720 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
8721 					struct sk_buff *skb)
8722 {
8723 	const void **tb;
8724 	const struct wmi_twt_enable_event *ev;
8725 	int ret;
8726 
8727 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8728 	if (IS_ERR(tb)) {
8729 		ret = PTR_ERR(tb);
8730 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
8731 			    ret);
8732 		return;
8733 	}
8734 
8735 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
8736 	if (!ev) {
8737 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
8738 		goto exit;
8739 	}
8740 
8741 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
8742 		   le32_to_cpu(ev->pdev_id),
8743 		   le32_to_cpu(ev->status));
8744 
8745 exit:
8746 	kfree(tb);
8747 }
8748 
8749 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
8750 					 struct sk_buff *skb)
8751 {
8752 	const void **tb;
8753 	const struct wmi_twt_disable_event *ev;
8754 	int ret;
8755 
8756 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8757 	if (IS_ERR(tb)) {
8758 		ret = PTR_ERR(tb);
8759 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
8760 			    ret);
8761 		return;
8762 	}
8763 
8764 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
8765 	if (!ev) {
8766 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
8767 		goto exit;
8768 	}
8769 
8770 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
8771 		   le32_to_cpu(ev->pdev_id),
8772 		   le32_to_cpu(ev->status));
8773 
8774 exit:
8775 	kfree(tb);
8776 }
8777 
8778 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
8779 					    u16 tag, u16 len,
8780 					    const void *ptr, void *data)
8781 {
8782 	const struct wmi_wow_ev_pg_fault_param *pf_param;
8783 	const struct wmi_wow_ev_param *param;
8784 	struct wmi_wow_ev_arg *arg = data;
8785 	int pf_len;
8786 
8787 	switch (tag) {
8788 	case WMI_TAG_WOW_EVENT_INFO:
8789 		param = ptr;
8790 		arg->wake_reason = le32_to_cpu(param->wake_reason);
8791 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
8792 			   arg->wake_reason, wow_reason(arg->wake_reason));
8793 		break;
8794 
8795 	case WMI_TAG_ARRAY_BYTE:
8796 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
8797 			pf_param = ptr;
8798 			pf_len = le32_to_cpu(pf_param->len);
8799 			if (pf_len > len - sizeof(pf_len) ||
8800 			    pf_len < 0) {
8801 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
8802 					    pf_len);
8803 				return -EINVAL;
8804 			}
8805 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
8806 				   pf_len);
8807 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
8808 					"wow_reason_page_fault packet present",
8809 					"wow_pg_fault ",
8810 					pf_param->data,
8811 					pf_len);
8812 		}
8813 		break;
8814 	default:
8815 		break;
8816 	}
8817 
8818 	return 0;
8819 }
8820 
8821 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
8822 {
8823 	struct wmi_wow_ev_arg arg = { };
8824 	int ret;
8825 
8826 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8827 				  ath12k_wmi_wow_wakeup_host_parse,
8828 				  &arg);
8829 	if (ret) {
8830 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
8831 			    ret);
8832 		return;
8833 	}
8834 
8835 	complete(&ab->wow.wakeup_completed);
8836 }
8837 
8838 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
8839 						struct sk_buff *skb)
8840 {
8841 	const struct wmi_gtk_offload_status_event *ev;
8842 	struct ath12k_link_vif *arvif;
8843 	__be64 replay_ctr_be;
8844 	u64 replay_ctr;
8845 	const void **tb;
8846 	int ret;
8847 
8848 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8849 	if (IS_ERR(tb)) {
8850 		ret = PTR_ERR(tb);
8851 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8852 		return;
8853 	}
8854 
8855 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
8856 	if (!ev) {
8857 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
8858 		kfree(tb);
8859 		return;
8860 	}
8861 
8862 	rcu_read_lock();
8863 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
8864 	if (!arvif) {
8865 		rcu_read_unlock();
8866 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
8867 			    le32_to_cpu(ev->vdev_id));
8868 		kfree(tb);
8869 		return;
8870 	}
8871 
8872 	replay_ctr = le64_to_cpu(ev->replay_ctr);
8873 	arvif->rekey_data.replay_ctr = replay_ctr;
8874 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
8875 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
8876 
8877 	/* supplicant expects big-endian replay counter */
8878 	replay_ctr_be = cpu_to_be64(replay_ctr);
8879 
8880 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
8881 				   (void *)&replay_ctr_be, GFP_ATOMIC);
8882 
8883 	rcu_read_unlock();
8884 
8885 	kfree(tb);
8886 }
8887 
8888 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
8889 						struct sk_buff *skb)
8890 {
8891 	const struct wmi_mlo_setup_complete_event *ev;
8892 	struct ath12k *ar = NULL;
8893 	struct ath12k_pdev *pdev;
8894 	const void **tb;
8895 	int ret, i;
8896 
8897 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8898 	if (IS_ERR(tb)) {
8899 		ret = PTR_ERR(tb);
8900 		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
8901 			    ret);
8902 		return;
8903 	}
8904 
8905 	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
8906 	if (!ev) {
8907 		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
8908 		kfree(tb);
8909 		return;
8910 	}
8911 
8912 	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
8913 		goto skip_lookup;
8914 
8915 	for (i = 0; i < ab->num_radios; i++) {
8916 		pdev = &ab->pdevs[i];
8917 		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
8918 			ar = pdev->ar;
8919 			break;
8920 		}
8921 	}
8922 
8923 skip_lookup:
8924 	if (!ar) {
8925 		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
8926 			    ev->pdev_id, ev->status);
8927 		goto out;
8928 	}
8929 
8930 	ar->mlo_setup_status = le32_to_cpu(ev->status);
8931 	complete(&ar->mlo_setup_done);
8932 
8933 out:
8934 	kfree(tb);
8935 }
8936 
8937 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
8938 					       struct sk_buff *skb)
8939 {
8940 	const struct wmi_mlo_teardown_complete_event *ev;
8941 	const void **tb;
8942 	int ret;
8943 
8944 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8945 	if (IS_ERR(tb)) {
8946 		ret = PTR_ERR(tb);
8947 		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
8948 		return;
8949 	}
8950 
8951 	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
8952 	if (!ev) {
8953 		ath12k_warn(ab, "failed to fetch teardown complete event\n");
8954 		kfree(tb);
8955 		return;
8956 	}
8957 
8958 	kfree(tb);
8959 }
8960 
8961 #ifdef CONFIG_ATH12K_DEBUGFS
8962 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
8963 					    const void *ptr, u16 tag, u16 len,
8964 					    struct wmi_tpc_stats_arg *tpc_stats)
8965 {
8966 	u32 len1, len2, len3, len4;
8967 	s16 *dst_ptr;
8968 	s8 *dst_ptr_ctl;
8969 
8970 	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
8971 	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
8972 	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
8973 	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
8974 
8975 	switch (tpc_stats->event_count) {
8976 	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
8977 		if (len1 > len)
8978 			return -ENOBUFS;
8979 
8980 		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
8981 			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
8982 			memcpy(dst_ptr, ptr, len1);
8983 		}
8984 		break;
8985 	case ATH12K_TPC_STATS_RATES_EVENT1:
8986 		if (len2 > len)
8987 			return -ENOBUFS;
8988 
8989 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
8990 			dst_ptr = tpc_stats->rates_array1.rate_array;
8991 			memcpy(dst_ptr, ptr, len2);
8992 		}
8993 		break;
8994 	case ATH12K_TPC_STATS_RATES_EVENT2:
8995 		if (len3 > len)
8996 			return -ENOBUFS;
8997 
8998 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
8999 			dst_ptr = tpc_stats->rates_array2.rate_array;
9000 			memcpy(dst_ptr, ptr, len3);
9001 		}
9002 		break;
9003 	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
9004 		if (len4 > len)
9005 			return -ENOBUFS;
9006 
9007 		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
9008 			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
9009 			memcpy(dst_ptr_ctl, ptr, len4);
9010 		}
9011 		break;
9012 	}
9013 	return 0;
9014 }
9015 
9016 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
9017 				  struct wmi_tpc_stats_arg *tpc_stats,
9018 				  struct wmi_max_reg_power_fixed_params *ev)
9019 {
9020 	struct wmi_max_reg_power_allowed_arg *reg_pwr;
9021 	u32 total_size;
9022 
9023 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9024 		   "Received reg power array type %d length %d for tpc stats\n",
9025 		   ev->reg_power_type, ev->reg_array_len);
9026 
9027 	switch (le32_to_cpu(ev->reg_power_type)) {
9028 	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
9029 		reg_pwr = &tpc_stats->max_reg_allowed_power;
9030 		break;
9031 	default:
9032 		return -EINVAL;
9033 	}
9034 
9035 	/* Each entry is 2 byte hence multiplying the indices with 2 */
9036 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9037 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
9038 	if (le32_to_cpu(ev->reg_array_len) != total_size) {
9039 		ath12k_warn(ab,
9040 			    "Total size and reg_array_len doesn't match for tpc stats\n");
9041 		return -EINVAL;
9042 	}
9043 
9044 	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
9045 
9046 	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
9047 					 GFP_ATOMIC);
9048 	if (!reg_pwr->reg_pwr_array)
9049 		return -ENOMEM;
9050 
9051 	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
9052 
9053 	return 0;
9054 }
9055 
9056 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
9057 				     struct wmi_tpc_stats_arg *tpc_stats,
9058 				     struct wmi_tpc_rates_array_fixed_params *ev)
9059 {
9060 	struct wmi_tpc_rates_array_arg *rates_array;
9061 	u32 flag = 0, rate_array_len;
9062 
9063 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9064 		   "Received rates array type %d length %d for tpc stats\n",
9065 		   ev->rate_array_type, ev->rate_array_len);
9066 
9067 	switch (le32_to_cpu(ev->rate_array_type)) {
9068 	case ATH12K_TPC_STATS_RATES_ARRAY1:
9069 		rates_array = &tpc_stats->rates_array1;
9070 		flag = WMI_TPC_RATES_ARRAY1;
9071 		break;
9072 	case ATH12K_TPC_STATS_RATES_ARRAY2:
9073 		rates_array = &tpc_stats->rates_array2;
9074 		flag = WMI_TPC_RATES_ARRAY2;
9075 		break;
9076 	default:
9077 		ath12k_warn(ab,
9078 			    "Received invalid type of rates array for tpc stats\n");
9079 		return -EINVAL;
9080 	}
9081 	memcpy(&rates_array->tpc_rates_array, ev,
9082 	       sizeof(struct wmi_tpc_rates_array_fixed_params));
9083 	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
9084 	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
9085 	if (!rates_array->rate_array)
9086 		return -ENOMEM;
9087 
9088 	tpc_stats->tlvs_rcvd |= flag;
9089 	return 0;
9090 }
9091 
9092 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
9093 				      struct wmi_tpc_stats_arg *tpc_stats,
9094 				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
9095 {
9096 	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
9097 	u32 total_size, ctl_array_len, flag = 0;
9098 
9099 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9100 		   "Received ctl array type %d length %d for tpc stats\n",
9101 		   ev->ctl_array_type, ev->ctl_array_len);
9102 
9103 	switch (le32_to_cpu(ev->ctl_array_type)) {
9104 	case ATH12K_TPC_STATS_CTL_ARRAY:
9105 		ctl_array = &tpc_stats->ctl_array;
9106 		flag = WMI_TPC_CTL_PWR_ARRAY;
9107 		break;
9108 	default:
9109 		ath12k_warn(ab,
9110 			    "Received invalid type of ctl pwr table for tpc stats\n");
9111 		return -EINVAL;
9112 	}
9113 
9114 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9115 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
9116 	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
9117 		ath12k_warn(ab,
9118 			    "Total size and ctl_array_len doesn't match for tpc stats\n");
9119 		return -EINVAL;
9120 	}
9121 
9122 	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
9123 	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
9124 	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
9125 	if (!ctl_array->ctl_pwr_table)
9126 		return -ENOMEM;
9127 
9128 	tpc_stats->tlvs_rcvd |= flag;
9129 	return 0;
9130 }
9131 
9132 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
9133 					      u16 tag, u16 len,
9134 					      const void *ptr, void *data)
9135 {
9136 	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
9137 	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
9138 	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
9139 	struct wmi_tpc_stats_arg *tpc_stats = data;
9140 	struct wmi_tpc_config_params *tpc_config;
9141 	int ret = 0;
9142 
9143 	if (!tpc_stats) {
9144 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9145 		return -EINVAL;
9146 	}
9147 
9148 	switch (tag) {
9149 	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
9150 		tpc_config = (struct wmi_tpc_config_params *)ptr;
9151 		memcpy(&tpc_stats->tpc_config, tpc_config,
9152 		       sizeof(struct wmi_tpc_config_params));
9153 		break;
9154 	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
9155 		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
9156 		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
9157 		break;
9158 	case WMI_TAG_TPC_STATS_RATES_ARRAY:
9159 		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
9160 		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
9161 		break;
9162 	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
9163 		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
9164 		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
9165 		break;
9166 	default:
9167 		ath12k_warn(ab,
9168 			    "Received invalid tag for tpc stats in subtlvs\n");
9169 		return -EINVAL;
9170 	}
9171 	return ret;
9172 }
9173 
9174 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
9175 					     u16 tag, u16 len,
9176 					     const void *ptr, void *data)
9177 {
9178 	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
9179 	int ret;
9180 
9181 	switch (tag) {
9182 	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
9183 		ret = 0;
9184 		/* Fixed param is already processed*/
9185 		break;
9186 	case WMI_TAG_ARRAY_STRUCT:
9187 		/* len 0 is expected for array of struct when there
9188 		 * is no content of that type to pack inside that tlv
9189 		 */
9190 		if (len == 0)
9191 			return 0;
9192 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9193 					  ath12k_wmi_tpc_stats_subtlv_parser,
9194 					  tpc_stats);
9195 		break;
9196 	case WMI_TAG_ARRAY_INT16:
9197 		if (len == 0)
9198 			return 0;
9199 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9200 						       WMI_TAG_ARRAY_INT16,
9201 						       len, tpc_stats);
9202 		break;
9203 	case WMI_TAG_ARRAY_BYTE:
9204 		if (len == 0)
9205 			return 0;
9206 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9207 						       WMI_TAG_ARRAY_BYTE,
9208 						       len, tpc_stats);
9209 		break;
9210 	default:
9211 		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
9212 		ret = -EINVAL;
9213 		break;
9214 	}
9215 	return ret;
9216 }
9217 
9218 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
9219 {
9220 	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
9221 
9222 	lockdep_assert_held(&ar->data_lock);
9223 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
9224 	if (tpc_stats) {
9225 		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
9226 		kfree(tpc_stats->rates_array1.rate_array);
9227 		kfree(tpc_stats->rates_array2.rate_array);
9228 		kfree(tpc_stats->ctl_array.ctl_pwr_table);
9229 		kfree(tpc_stats);
9230 		ar->debug.tpc_stats = NULL;
9231 	}
9232 }
9233 
9234 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9235 					 struct sk_buff *skb)
9236 {
9237 	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
9238 	struct wmi_tpc_stats_arg *tpc_stats;
9239 	const struct wmi_tlv *tlv;
9240 	void *ptr = skb->data;
9241 	struct ath12k *ar;
9242 	u16 tlv_tag;
9243 	u32 event_count;
9244 	int ret;
9245 
9246 	if (!skb->data) {
9247 		ath12k_warn(ab, "No data present in tpc stats event\n");
9248 		return;
9249 	}
9250 
9251 	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9252 		ath12k_warn(ab, "TPC stats event size invalid\n");
9253 		return;
9254 	}
9255 
9256 	tlv = (struct wmi_tlv *)ptr;
9257 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9258 	ptr += sizeof(*tlv);
9259 
9260 	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
9261 		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
9262 		return;
9263 	}
9264 
9265 	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
9266 	rcu_read_lock();
9267 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
9268 	if (!ar) {
9269 		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
9270 		rcu_read_unlock();
9271 		return;
9272 	}
9273 	spin_lock_bh(&ar->data_lock);
9274 	if (!ar->debug.tpc_request) {
9275 		/* Event is received either without request or the
9276 		 * timeout, if memory is already allocated free it
9277 		 */
9278 		if (ar->debug.tpc_stats) {
9279 			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
9280 			ath12k_wmi_free_tpc_stats_mem(ar);
9281 		}
9282 		goto unlock;
9283 	}
9284 
9285 	event_count = le32_to_cpu(fixed_param->event_count);
9286 	if (event_count == 0) {
9287 		if (ar->debug.tpc_stats) {
9288 			ath12k_warn(ab,
9289 				    "Invalid tpc memory present\n");
9290 			goto unlock;
9291 		}
9292 		ar->debug.tpc_stats =
9293 			kzalloc(sizeof(struct wmi_tpc_stats_arg),
9294 				GFP_ATOMIC);
9295 		if (!ar->debug.tpc_stats) {
9296 			ath12k_warn(ab,
9297 				    "Failed to allocate memory for tpc stats\n");
9298 			goto unlock;
9299 		}
9300 	}
9301 
9302 	tpc_stats = ar->debug.tpc_stats;
9303 	if (!tpc_stats) {
9304 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9305 		goto unlock;
9306 	}
9307 
9308 	if (!(event_count == 0)) {
9309 		if (event_count != tpc_stats->event_count + 1) {
9310 			ath12k_warn(ab,
9311 				    "Invalid tpc event received\n");
9312 			goto unlock;
9313 		}
9314 	}
9315 	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
9316 	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
9317 	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
9318 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9319 		   "tpc stats event_count %d\n",
9320 		   tpc_stats->event_count);
9321 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9322 				  ath12k_wmi_tpc_stats_event_parser,
9323 				  tpc_stats);
9324 	if (ret) {
9325 		ath12k_wmi_free_tpc_stats_mem(ar);
9326 		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
9327 		goto unlock;
9328 	}
9329 
9330 	if (tpc_stats->end_of_event)
9331 		complete(&ar->debug.tpc_complete);
9332 
9333 unlock:
9334 	spin_unlock_bh(&ar->data_lock);
9335 	rcu_read_unlock();
9336 }
9337 #else
9338 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9339 					 struct sk_buff *skb)
9340 {
9341 }
9342 #endif
9343 
9344 static int
9345 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab,
9346 						u16 tag, u16 len,
9347 						const void *ptr, void *data)
9348 {
9349 	const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info;
9350 	const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info;
9351 	struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data;
9352 	struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg;
9353 	s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM];
9354 	u8 num_20mhz_segments;
9355 	s8 min_nf, *nf_ptr;
9356 	int i, j;
9357 
9358 	switch (tag) {
9359 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO:
9360 		if (len < sizeof(*param_info)) {
9361 			ath12k_warn(ab,
9362 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9363 				    tag, len);
9364 			return -EINVAL;
9365 		}
9366 
9367 		param_info = ptr;
9368 
9369 		param_arg.curr_bw = le32_to_cpu(param_info->curr_bw);
9370 		param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask);
9371 
9372 		/* The received array is actually a 2D byte-array for per chain,
9373 		 * per 20MHz subband. Convert to 2D byte-array
9374 		 */
9375 		nf_ptr = &param_arg.nf_hw_dbm[0][0];
9376 
9377 		for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) {
9378 			nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]);
9379 
9380 			for (j = 0; j < 4; j++) {
9381 				*nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF;
9382 				nf_ptr++;
9383 			}
9384 		}
9385 
9386 		switch (param_arg.curr_bw) {
9387 		case WMI_CHAN_WIDTH_20:
9388 			num_20mhz_segments = 1;
9389 			break;
9390 		case WMI_CHAN_WIDTH_40:
9391 			num_20mhz_segments = 2;
9392 			break;
9393 		case WMI_CHAN_WIDTH_80:
9394 			num_20mhz_segments = 4;
9395 			break;
9396 		case WMI_CHAN_WIDTH_160:
9397 			num_20mhz_segments = 8;
9398 			break;
9399 		case WMI_CHAN_WIDTH_320:
9400 			num_20mhz_segments = 16;
9401 			break;
9402 		default:
9403 			ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event",
9404 				    param_arg.curr_bw);
9405 			/* In error case, still consider the primary 20 MHz segment since
9406 			 * that would be much better than instead of dropping the whole
9407 			 * event
9408 			 */
9409 			num_20mhz_segments = 1;
9410 		}
9411 
9412 		min_nf = ATH12K_DEFAULT_NOISE_FLOOR;
9413 
9414 		for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) {
9415 			if (!(param_arg.curr_rx_chainmask & BIT(i)))
9416 				continue;
9417 
9418 			for (j = 0; j < num_20mhz_segments; j++) {
9419 				if (param_arg.nf_hw_dbm[i][j] < min_nf)
9420 					min_nf = param_arg.nf_hw_dbm[i][j];
9421 			}
9422 		}
9423 
9424 		rssi_info->min_nf_dbm = min_nf;
9425 		rssi_info->nf_dbm_present = true;
9426 		break;
9427 	case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO:
9428 		if (len < sizeof(*temp_info)) {
9429 			ath12k_warn(ab,
9430 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9431 				    tag, len);
9432 			return -EINVAL;
9433 		}
9434 
9435 		temp_info = ptr;
9436 		rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset);
9437 		rssi_info->temp_offset_present = true;
9438 		break;
9439 	default:
9440 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9441 			   "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag);
9442 	}
9443 
9444 	return 0;
9445 }
9446 
9447 static int
9448 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab,
9449 					   u16 tag, u16 len,
9450 					   const void *ptr, void *data)
9451 {
9452 	int ret = 0;
9453 
9454 	switch (tag) {
9455 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM:
9456 		/* Fixed param is already processed*/
9457 		break;
9458 	case WMI_TAG_ARRAY_STRUCT:
9459 		/* len 0 is expected for array of struct when there
9460 		 * is no content of that type inside that tlv
9461 		 */
9462 		if (len == 0)
9463 			return 0;
9464 
9465 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9466 					  ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser,
9467 					  data);
9468 		break;
9469 	default:
9470 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9471 			   "Received invalid tag 0x%x for RSSI dbm conv info event\n",
9472 			   tag);
9473 		break;
9474 	}
9475 
9476 	return ret;
9477 }
9478 
9479 static int
9480 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr,
9481 						  size_t len, int *pdev_id)
9482 {
9483 	struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param;
9484 	const struct wmi_tlv *tlv;
9485 	u16 tlv_tag;
9486 
9487 	if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9488 		ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len);
9489 		return -EINVAL;
9490 	}
9491 
9492 	tlv = (struct wmi_tlv *)ptr;
9493 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9494 	ptr += sizeof(*tlv);
9495 
9496 	if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) {
9497 		ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n");
9498 		return -EINVAL;
9499 	}
9500 
9501 	fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr;
9502 	*pdev_id = le32_to_cpu(fixed_param->pdev_id);
9503 
9504 	return 0;
9505 }
9506 
9507 static void
9508 ath12k_wmi_update_rssi_offsets(struct ath12k *ar,
9509 			       struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info)
9510 {
9511 	struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info;
9512 
9513 	lockdep_assert_held(&ar->data_lock);
9514 
9515 	if (rssi_info->temp_offset_present)
9516 		info->temp_offset = rssi_info->temp_offset;
9517 
9518 	if (rssi_info->nf_dbm_present)
9519 		info->min_nf_dbm = rssi_info->min_nf_dbm;
9520 
9521 	info->noise_floor = info->min_nf_dbm + info->temp_offset;
9522 }
9523 
9524 static void
9525 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab,
9526 						 struct sk_buff *skb)
9527 {
9528 	struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info;
9529 	struct ath12k *ar;
9530 	s32 noise_floor;
9531 	u32 pdev_id;
9532 	int ret;
9533 
9534 	ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len,
9535 								&pdev_id);
9536 	if (ret) {
9537 		ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n",
9538 			    ret);
9539 		return;
9540 	}
9541 
9542 	rcu_read_lock();
9543 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
9544 	/* If pdev is not active, ignore the event */
9545 	if (!ar)
9546 		goto out_unlock;
9547 
9548 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9549 				  ath12k_wmi_rssi_dbm_conv_info_event_parser,
9550 				  &rssi_info);
9551 	if (ret) {
9552 		ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n");
9553 		goto out_unlock;
9554 	}
9555 
9556 	spin_lock_bh(&ar->data_lock);
9557 	ath12k_wmi_update_rssi_offsets(ar, &rssi_info);
9558 	noise_floor = ath12k_pdev_get_noise_floor(ar);
9559 	spin_unlock_bh(&ar->data_lock);
9560 
9561 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9562 		   "RSSI noise floor updated, new value is %d dbm\n", noise_floor);
9563 out_unlock:
9564 	rcu_read_unlock();
9565 }
9566 
9567 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
9568 {
9569 	struct wmi_cmd_hdr *cmd_hdr;
9570 	enum wmi_tlv_event_id id;
9571 
9572 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
9573 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
9574 
9575 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
9576 		goto out;
9577 
9578 	switch (id) {
9579 		/* Process all the WMI events here */
9580 	case WMI_SERVICE_READY_EVENTID:
9581 		ath12k_service_ready_event(ab, skb);
9582 		break;
9583 	case WMI_SERVICE_READY_EXT_EVENTID:
9584 		ath12k_service_ready_ext_event(ab, skb);
9585 		break;
9586 	case WMI_SERVICE_READY_EXT2_EVENTID:
9587 		ath12k_service_ready_ext2_event(ab, skb);
9588 		break;
9589 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
9590 		ath12k_reg_chan_list_event(ab, skb);
9591 		break;
9592 	case WMI_READY_EVENTID:
9593 		ath12k_ready_event(ab, skb);
9594 		break;
9595 	case WMI_PEER_DELETE_RESP_EVENTID:
9596 		ath12k_peer_delete_resp_event(ab, skb);
9597 		break;
9598 	case WMI_VDEV_START_RESP_EVENTID:
9599 		ath12k_vdev_start_resp_event(ab, skb);
9600 		break;
9601 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
9602 		ath12k_bcn_tx_status_event(ab, skb);
9603 		break;
9604 	case WMI_VDEV_STOPPED_EVENTID:
9605 		ath12k_vdev_stopped_event(ab, skb);
9606 		break;
9607 	case WMI_MGMT_RX_EVENTID:
9608 		ath12k_mgmt_rx_event(ab, skb);
9609 		/* mgmt_rx_event() owns the skb now! */
9610 		return;
9611 	case WMI_MGMT_TX_COMPLETION_EVENTID:
9612 		ath12k_mgmt_tx_compl_event(ab, skb);
9613 		break;
9614 	case WMI_SCAN_EVENTID:
9615 		ath12k_scan_event(ab, skb);
9616 		break;
9617 	case WMI_PEER_STA_KICKOUT_EVENTID:
9618 		ath12k_peer_sta_kickout_event(ab, skb);
9619 		break;
9620 	case WMI_ROAM_EVENTID:
9621 		ath12k_roam_event(ab, skb);
9622 		break;
9623 	case WMI_CHAN_INFO_EVENTID:
9624 		ath12k_chan_info_event(ab, skb);
9625 		break;
9626 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
9627 		ath12k_pdev_bss_chan_info_event(ab, skb);
9628 		break;
9629 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
9630 		ath12k_vdev_install_key_compl_event(ab, skb);
9631 		break;
9632 	case WMI_SERVICE_AVAILABLE_EVENTID:
9633 		ath12k_service_available_event(ab, skb);
9634 		break;
9635 	case WMI_PEER_ASSOC_CONF_EVENTID:
9636 		ath12k_peer_assoc_conf_event(ab, skb);
9637 		break;
9638 	case WMI_UPDATE_STATS_EVENTID:
9639 		ath12k_update_stats_event(ab, skb);
9640 		break;
9641 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
9642 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
9643 		break;
9644 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
9645 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
9646 		break;
9647 	case WMI_PDEV_TEMPERATURE_EVENTID:
9648 		ath12k_wmi_pdev_temperature_event(ab, skb);
9649 		break;
9650 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
9651 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
9652 		break;
9653 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
9654 		ath12k_fils_discovery_event(ab, skb);
9655 		break;
9656 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
9657 		ath12k_probe_resp_tx_status_event(ab, skb);
9658 		break;
9659 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
9660 		ath12k_rfkill_state_change_event(ab, skb);
9661 		break;
9662 	case WMI_TWT_ENABLE_EVENTID:
9663 		ath12k_wmi_twt_enable_event(ab, skb);
9664 		break;
9665 	case WMI_TWT_DISABLE_EVENTID:
9666 		ath12k_wmi_twt_disable_event(ab, skb);
9667 		break;
9668 	case WMI_P2P_NOA_EVENTID:
9669 		ath12k_wmi_p2p_noa_event(ab, skb);
9670 		break;
9671 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
9672 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
9673 		break;
9674 	case WMI_VDEV_DELETE_RESP_EVENTID:
9675 		ath12k_vdev_delete_resp_event(ab, skb);
9676 		break;
9677 	case WMI_DIAG_EVENTID:
9678 		ath12k_wmi_diag_event(ab, skb);
9679 		break;
9680 	case WMI_WOW_WAKEUP_HOST_EVENTID:
9681 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
9682 		break;
9683 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
9684 		ath12k_wmi_gtk_offload_status_event(ab, skb);
9685 		break;
9686 	case WMI_MLO_SETUP_COMPLETE_EVENTID:
9687 		ath12k_wmi_event_mlo_setup_complete(ab, skb);
9688 		break;
9689 	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
9690 		ath12k_wmi_event_teardown_complete(ab, skb);
9691 		break;
9692 	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
9693 		ath12k_wmi_process_tpc_stats(ab, skb);
9694 		break;
9695 	case WMI_11D_NEW_COUNTRY_EVENTID:
9696 		ath12k_reg_11d_new_cc_event(ab, skb);
9697 		break;
9698 	case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID:
9699 		ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb);
9700 		break;
9701 	/* add Unsupported events (rare) here */
9702 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
9703 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
9704 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
9705 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9706 			   "ignoring unsupported event 0x%x\n", id);
9707 		break;
9708 	/* add Unsupported events (frequent) here */
9709 	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
9710 	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
9711 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
9712 		/* debug might flood hence silently ignore (no-op) */
9713 		break;
9714 	case WMI_PDEV_UTF_EVENTID:
9715 		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
9716 			ath12k_tm_wmi_event_segmented(ab, id, skb);
9717 		else
9718 			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
9719 		break;
9720 	default:
9721 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
9722 		break;
9723 	}
9724 
9725 out:
9726 	dev_kfree_skb(skb);
9727 }
9728 
9729 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
9730 					   u32 pdev_idx)
9731 {
9732 	int status;
9733 	static const u32 svc_id[] = {
9734 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
9735 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
9736 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
9737 	};
9738 	struct ath12k_htc_svc_conn_req conn_req = {};
9739 	struct ath12k_htc_svc_conn_resp conn_resp = {};
9740 
9741 	/* these fields are the same for all service endpoints */
9742 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
9743 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
9744 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
9745 
9746 	/* connect to control service */
9747 	conn_req.service_id = svc_id[pdev_idx];
9748 
9749 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
9750 	if (status) {
9751 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
9752 			    status);
9753 		return status;
9754 	}
9755 
9756 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
9757 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
9758 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
9759 
9760 	return 0;
9761 }
9762 
9763 static int
9764 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
9765 			      struct wmi_unit_test_cmd ut_cmd,
9766 			      u32 *test_args)
9767 {
9768 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9769 	struct wmi_unit_test_cmd *cmd;
9770 	struct sk_buff *skb;
9771 	struct wmi_tlv *tlv;
9772 	void *ptr;
9773 	u32 *ut_cmd_args;
9774 	int buf_len, arg_len;
9775 	int ret;
9776 	int i;
9777 
9778 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
9779 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
9780 
9781 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9782 	if (!skb)
9783 		return -ENOMEM;
9784 
9785 	cmd = (struct wmi_unit_test_cmd *)skb->data;
9786 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
9787 						 sizeof(ut_cmd));
9788 
9789 	cmd->vdev_id = ut_cmd.vdev_id;
9790 	cmd->module_id = ut_cmd.module_id;
9791 	cmd->num_args = ut_cmd.num_args;
9792 	cmd->diag_token = ut_cmd.diag_token;
9793 
9794 	ptr = skb->data + sizeof(ut_cmd);
9795 
9796 	tlv = ptr;
9797 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
9798 
9799 	ptr += TLV_HDR_SIZE;
9800 
9801 	ut_cmd_args = ptr;
9802 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
9803 		ut_cmd_args[i] = test_args[i];
9804 
9805 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9806 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
9807 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
9808 		   cmd->diag_token);
9809 
9810 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
9811 
9812 	if (ret) {
9813 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
9814 			    ret);
9815 		dev_kfree_skb(skb);
9816 	}
9817 
9818 	return ret;
9819 }
9820 
9821 int ath12k_wmi_simulate_radar(struct ath12k *ar)
9822 {
9823 	struct ath12k_link_vif *arvif;
9824 	u32 dfs_args[DFS_MAX_TEST_ARGS];
9825 	struct wmi_unit_test_cmd wmi_ut;
9826 	bool arvif_found = false;
9827 
9828 	list_for_each_entry(arvif, &ar->arvifs, list) {
9829 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
9830 			arvif_found = true;
9831 			break;
9832 		}
9833 	}
9834 
9835 	if (!arvif_found)
9836 		return -EINVAL;
9837 
9838 	dfs_args[DFS_TEST_CMDID] = 0;
9839 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
9840 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
9841 	 * freq offset (b3 - b10) to unit test. For simulation
9842 	 * purpose this can be set to 0 which is valid.
9843 	 */
9844 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
9845 
9846 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
9847 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
9848 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
9849 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
9850 
9851 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
9852 
9853 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
9854 }
9855 
9856 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
9857 				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
9858 {
9859 	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
9860 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9861 	struct sk_buff *skb;
9862 	struct wmi_tlv *tlv;
9863 	__le32 *pdev_id;
9864 	u32 buf_len;
9865 	void *ptr;
9866 	int ret;
9867 
9868 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
9869 
9870 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9871 	if (!skb)
9872 		return -ENOMEM;
9873 	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
9874 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
9875 						 sizeof(*cmd));
9876 
9877 	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
9878 	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
9879 	cmd->subid = cpu_to_le32(tpc_stats_type);
9880 
9881 	ptr = skb->data + sizeof(*cmd);
9882 
9883 	/* The below TLV arrays optionally follow this fixed param TLV structure
9884 	 * 1. ARRAY_UINT32 pdev_ids[]
9885 	 *      If this array is present and non-zero length, stats should only
9886 	 *      be provided from the pdevs identified in the array.
9887 	 * 2. ARRAY_UNIT32 vdev_ids[]
9888 	 *      If this array is present and non-zero length, stats should only
9889 	 *      be provided from the vdevs identified in the array.
9890 	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
9891 	 *      If this array is present and non-zero length, stats should only
9892 	 *      be provided from the peers with the MAC addresses specified
9893 	 *      in the array
9894 	 */
9895 	tlv = ptr;
9896 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9897 	ptr += TLV_HDR_SIZE;
9898 
9899 	pdev_id = ptr;
9900 	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
9901 	ptr += sizeof(*pdev_id);
9902 
9903 	tlv = ptr;
9904 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9905 	ptr += TLV_HDR_SIZE;
9906 
9907 	tlv = ptr;
9908 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
9909 	ptr += TLV_HDR_SIZE;
9910 
9911 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
9912 	if (ret) {
9913 		ath12k_warn(ar->ab,
9914 			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
9915 		dev_kfree_skb(skb);
9916 		return ret;
9917 	}
9918 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
9919 		   ar->pdev->pdev_id);
9920 
9921 	return ret;
9922 }
9923 
9924 int ath12k_wmi_connect(struct ath12k_base *ab)
9925 {
9926 	u32 i;
9927 	u8 wmi_ep_count;
9928 
9929 	wmi_ep_count = ab->htc.wmi_ep_count;
9930 	if (wmi_ep_count > ab->hw_params->max_radios)
9931 		return -1;
9932 
9933 	for (i = 0; i < wmi_ep_count; i++)
9934 		ath12k_connect_pdev_htc_service(ab, i);
9935 
9936 	return 0;
9937 }
9938 
9939 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
9940 {
9941 	if (WARN_ON(pdev_id >= MAX_RADIOS))
9942 		return;
9943 
9944 	/* TODO: Deinit any pdev specific wmi resource */
9945 }
9946 
9947 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
9948 			   u8 pdev_id)
9949 {
9950 	struct ath12k_wmi_pdev *wmi_handle;
9951 
9952 	if (pdev_id >= ab->hw_params->max_radios)
9953 		return -EINVAL;
9954 
9955 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
9956 
9957 	wmi_handle->wmi_ab = &ab->wmi_ab;
9958 
9959 	ab->wmi_ab.ab = ab;
9960 	/* TODO: Init remaining resource specific to pdev */
9961 
9962 	return 0;
9963 }
9964 
9965 int ath12k_wmi_attach(struct ath12k_base *ab)
9966 {
9967 	int ret;
9968 
9969 	ret = ath12k_wmi_pdev_attach(ab, 0);
9970 	if (ret)
9971 		return ret;
9972 
9973 	ab->wmi_ab.ab = ab;
9974 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
9975 
9976 	/* It's overwritten when service_ext_ready is handled */
9977 	if (ab->hw_params->single_pdev_only)
9978 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
9979 
9980 	/* TODO: Init remaining wmi soc resources required */
9981 	init_completion(&ab->wmi_ab.service_ready);
9982 	init_completion(&ab->wmi_ab.unified_ready);
9983 
9984 	return 0;
9985 }
9986 
9987 void ath12k_wmi_detach(struct ath12k_base *ab)
9988 {
9989 	int i;
9990 
9991 	/* TODO: Deinit wmi resource specific to SOC as required */
9992 
9993 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
9994 		ath12k_wmi_pdev_detach(ab, i);
9995 
9996 	ath12k_wmi_free_dbring_caps(ab);
9997 }
9998 
9999 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
10000 {
10001 	struct wmi_hw_data_filter_cmd *cmd;
10002 	struct sk_buff *skb;
10003 	int len;
10004 
10005 	len = sizeof(*cmd);
10006 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10007 
10008 	if (!skb)
10009 		return -ENOMEM;
10010 
10011 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
10012 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
10013 						 sizeof(*cmd));
10014 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
10015 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
10016 
10017 	/* Set all modes in case of disable */
10018 	if (arg->enable)
10019 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
10020 	else
10021 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
10022 
10023 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10024 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
10025 		   arg->enable, arg->hw_filter_bitmap);
10026 
10027 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
10028 }
10029 
10030 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
10031 {
10032 	struct wmi_wow_host_wakeup_cmd *cmd;
10033 	struct sk_buff *skb;
10034 	size_t len;
10035 
10036 	len = sizeof(*cmd);
10037 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10038 	if (!skb)
10039 		return -ENOMEM;
10040 
10041 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
10042 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
10043 						 sizeof(*cmd));
10044 
10045 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
10046 
10047 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
10048 }
10049 
10050 int ath12k_wmi_wow_enable(struct ath12k *ar)
10051 {
10052 	struct wmi_wow_enable_cmd *cmd;
10053 	struct sk_buff *skb;
10054 	int len;
10055 
10056 	len = sizeof(*cmd);
10057 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10058 	if (!skb)
10059 		return -ENOMEM;
10060 
10061 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
10062 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
10063 						 sizeof(*cmd));
10064 
10065 	cmd->enable = cpu_to_le32(1);
10066 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
10067 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
10068 
10069 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
10070 }
10071 
10072 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
10073 				    enum wmi_wow_wakeup_event event,
10074 				    u32 enable)
10075 {
10076 	struct wmi_wow_add_del_event_cmd *cmd;
10077 	struct sk_buff *skb;
10078 	size_t len;
10079 
10080 	len = sizeof(*cmd);
10081 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10082 	if (!skb)
10083 		return -ENOMEM;
10084 
10085 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
10086 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
10087 						 sizeof(*cmd));
10088 	cmd->vdev_id = cpu_to_le32(vdev_id);
10089 	cmd->is_add = cpu_to_le32(enable);
10090 	cmd->event_bitmap = cpu_to_le32((1 << event));
10091 
10092 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
10093 		   wow_wakeup_event(event), enable, vdev_id);
10094 
10095 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
10096 }
10097 
10098 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
10099 			       const u8 *pattern, const u8 *mask,
10100 			       int pattern_len, int pattern_offset)
10101 {
10102 	struct wmi_wow_add_pattern_cmd *cmd;
10103 	struct wmi_wow_bitmap_pattern_params *bitmap;
10104 	struct wmi_tlv *tlv;
10105 	struct sk_buff *skb;
10106 	void *ptr;
10107 	size_t len;
10108 
10109 	len = sizeof(*cmd) +
10110 	      sizeof(*tlv) +			/* array struct */
10111 	      sizeof(*bitmap) +			/* bitmap */
10112 	      sizeof(*tlv) +			/* empty ipv4 sync */
10113 	      sizeof(*tlv) +			/* empty ipv6 sync */
10114 	      sizeof(*tlv) +			/* empty magic */
10115 	      sizeof(*tlv) +			/* empty info timeout */
10116 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
10117 
10118 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10119 	if (!skb)
10120 		return -ENOMEM;
10121 
10122 	/* cmd */
10123 	ptr = skb->data;
10124 	cmd = ptr;
10125 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
10126 						 sizeof(*cmd));
10127 	cmd->vdev_id = cpu_to_le32(vdev_id);
10128 	cmd->pattern_id = cpu_to_le32(pattern_id);
10129 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10130 
10131 	ptr += sizeof(*cmd);
10132 
10133 	/* bitmap */
10134 	tlv = ptr;
10135 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
10136 
10137 	ptr += sizeof(*tlv);
10138 
10139 	bitmap = ptr;
10140 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
10141 						    sizeof(*bitmap));
10142 	memcpy(bitmap->patternbuf, pattern, pattern_len);
10143 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
10144 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
10145 	bitmap->pattern_len = cpu_to_le32(pattern_len);
10146 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
10147 	bitmap->pattern_id = cpu_to_le32(pattern_id);
10148 
10149 	ptr += sizeof(*bitmap);
10150 
10151 	/* ipv4 sync */
10152 	tlv = ptr;
10153 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10154 
10155 	ptr += sizeof(*tlv);
10156 
10157 	/* ipv6 sync */
10158 	tlv = ptr;
10159 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10160 
10161 	ptr += sizeof(*tlv);
10162 
10163 	/* magic */
10164 	tlv = ptr;
10165 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10166 
10167 	ptr += sizeof(*tlv);
10168 
10169 	/* pattern info timeout */
10170 	tlv = ptr;
10171 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10172 
10173 	ptr += sizeof(*tlv);
10174 
10175 	/* ratelimit interval */
10176 	tlv = ptr;
10177 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
10178 
10179 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
10180 		   vdev_id, pattern_id, pattern_offset, pattern_len);
10181 
10182 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
10183 			bitmap->patternbuf, pattern_len);
10184 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
10185 			bitmap->bitmaskbuf, pattern_len);
10186 
10187 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
10188 }
10189 
10190 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
10191 {
10192 	struct wmi_wow_del_pattern_cmd *cmd;
10193 	struct sk_buff *skb;
10194 	size_t len;
10195 
10196 	len = sizeof(*cmd);
10197 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10198 	if (!skb)
10199 		return -ENOMEM;
10200 
10201 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
10202 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
10203 						 sizeof(*cmd));
10204 	cmd->vdev_id = cpu_to_le32(vdev_id);
10205 	cmd->pattern_id = cpu_to_le32(pattern_id);
10206 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10207 
10208 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
10209 		   vdev_id, pattern_id);
10210 
10211 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
10212 }
10213 
10214 static struct sk_buff *
10215 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
10216 				   struct wmi_pno_scan_req_arg *pno)
10217 {
10218 	struct nlo_configured_params *nlo_list;
10219 	size_t len, nlo_list_len, channel_list_len;
10220 	struct wmi_wow_nlo_config_cmd *cmd;
10221 	__le32 *channel_list;
10222 	struct wmi_tlv *tlv;
10223 	struct sk_buff *skb;
10224 	void *ptr;
10225 	u32 i;
10226 
10227 	len = sizeof(*cmd) +
10228 	      sizeof(*tlv) +
10229 	      /* TLV place holder for array of structures
10230 	       * nlo_configured_params(nlo_list)
10231 	       */
10232 	      sizeof(*tlv);
10233 	      /* TLV place holder for array of uint32 channel_list */
10234 
10235 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
10236 	len += channel_list_len;
10237 
10238 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
10239 	len += nlo_list_len;
10240 
10241 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10242 	if (!skb)
10243 		return ERR_PTR(-ENOMEM);
10244 
10245 	ptr = skb->data;
10246 	cmd = ptr;
10247 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
10248 
10249 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
10250 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
10251 
10252 	/* current FW does not support min-max range for dwell time */
10253 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
10254 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
10255 
10256 	if (pno->do_passive_scan)
10257 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
10258 
10259 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
10260 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
10261 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
10262 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
10263 
10264 	if (pno->enable_pno_scan_randomization) {
10265 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
10266 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
10267 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
10268 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
10269 	}
10270 
10271 	ptr += sizeof(*cmd);
10272 
10273 	/* nlo_configured_params(nlo_list) */
10274 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
10275 	tlv = ptr;
10276 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
10277 
10278 	ptr += sizeof(*tlv);
10279 	nlo_list = ptr;
10280 	for (i = 0; i < pno->uc_networks_count; i++) {
10281 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
10282 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
10283 						     sizeof(*nlo_list));
10284 
10285 		nlo_list[i].ssid.valid = cpu_to_le32(1);
10286 		nlo_list[i].ssid.ssid.ssid_len =
10287 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
10288 		memcpy(nlo_list[i].ssid.ssid.ssid,
10289 		       pno->a_networks[i].ssid.ssid,
10290 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
10291 
10292 		if (pno->a_networks[i].rssi_threshold &&
10293 		    pno->a_networks[i].rssi_threshold > -300) {
10294 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
10295 			nlo_list[i].rssi_cond.rssi =
10296 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
10297 		}
10298 
10299 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
10300 		nlo_list[i].bcast_nw_type.bcast_nw_type =
10301 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
10302 	}
10303 
10304 	ptr += nlo_list_len;
10305 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
10306 	tlv = ptr;
10307 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
10308 	ptr += sizeof(*tlv);
10309 	channel_list = ptr;
10310 
10311 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
10312 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
10313 
10314 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
10315 		   vdev_id);
10316 
10317 	return skb;
10318 }
10319 
10320 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
10321 							 u32 vdev_id)
10322 {
10323 	struct wmi_wow_nlo_config_cmd *cmd;
10324 	struct sk_buff *skb;
10325 	size_t len;
10326 
10327 	len = sizeof(*cmd);
10328 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10329 	if (!skb)
10330 		return ERR_PTR(-ENOMEM);
10331 
10332 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
10333 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
10334 
10335 	cmd->vdev_id = cpu_to_le32(vdev_id);
10336 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
10337 
10338 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10339 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
10340 	return skb;
10341 }
10342 
10343 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
10344 			      struct wmi_pno_scan_req_arg  *pno_scan)
10345 {
10346 	struct sk_buff *skb;
10347 
10348 	if (pno_scan->enable)
10349 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
10350 	else
10351 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
10352 
10353 	if (IS_ERR_OR_NULL(skb))
10354 		return -ENOMEM;
10355 
10356 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
10357 }
10358 
10359 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
10360 				       struct wmi_arp_ns_offload_arg *offload,
10361 				       void **ptr,
10362 				       bool enable,
10363 				       bool ext)
10364 {
10365 	struct wmi_ns_offload_params *ns;
10366 	struct wmi_tlv *tlv;
10367 	void *buf_ptr = *ptr;
10368 	u32 ns_cnt, ns_ext_tuples;
10369 	int i, max_offloads;
10370 
10371 	ns_cnt = offload->ipv6_count;
10372 
10373 	tlv  = buf_ptr;
10374 
10375 	if (ext) {
10376 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
10377 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10378 						 ns_ext_tuples * sizeof(*ns));
10379 		i = WMI_MAX_NS_OFFLOADS;
10380 		max_offloads = offload->ipv6_count;
10381 	} else {
10382 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10383 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
10384 		i = 0;
10385 		max_offloads = WMI_MAX_NS_OFFLOADS;
10386 	}
10387 
10388 	buf_ptr += sizeof(*tlv);
10389 
10390 	for (; i < max_offloads; i++) {
10391 		ns = buf_ptr;
10392 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
10393 							sizeof(*ns));
10394 
10395 		if (enable) {
10396 			if (i < ns_cnt)
10397 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
10398 
10399 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
10400 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
10401 
10402 			if (offload->ipv6_type[i])
10403 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
10404 
10405 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
10406 
10407 			if (!is_zero_ether_addr(ns->target_mac.addr))
10408 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
10409 
10410 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10411 				   "wmi index %d ns_solicited %pI6 target %pI6",
10412 				   i, ns->solicitation_ipaddr,
10413 				   ns->target_ipaddr[0]);
10414 		}
10415 
10416 		buf_ptr += sizeof(*ns);
10417 	}
10418 
10419 	*ptr = buf_ptr;
10420 }
10421 
10422 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
10423 					struct wmi_arp_ns_offload_arg *offload,
10424 					void **ptr,
10425 					bool enable)
10426 {
10427 	struct wmi_arp_offload_params *arp;
10428 	struct wmi_tlv *tlv;
10429 	void *buf_ptr = *ptr;
10430 	int i;
10431 
10432 	/* fill arp tuple */
10433 	tlv = buf_ptr;
10434 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10435 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
10436 	buf_ptr += sizeof(*tlv);
10437 
10438 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
10439 		arp = buf_ptr;
10440 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
10441 							 sizeof(*arp));
10442 
10443 		if (enable && i < offload->ipv4_count) {
10444 			/* Copy the target ip addr and flags */
10445 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
10446 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
10447 
10448 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
10449 				   arp->target_ipaddr);
10450 		}
10451 
10452 		buf_ptr += sizeof(*arp);
10453 	}
10454 
10455 	*ptr = buf_ptr;
10456 }
10457 
10458 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
10459 			      struct ath12k_link_vif *arvif,
10460 			      struct wmi_arp_ns_offload_arg *offload,
10461 			      bool enable)
10462 {
10463 	struct wmi_set_arp_ns_offload_cmd *cmd;
10464 	struct wmi_tlv *tlv;
10465 	struct sk_buff *skb;
10466 	void *buf_ptr;
10467 	size_t len;
10468 	u8 ns_cnt, ns_ext_tuples = 0;
10469 
10470 	ns_cnt = offload->ipv6_count;
10471 
10472 	len = sizeof(*cmd) +
10473 	      sizeof(*tlv) +
10474 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
10475 	      sizeof(*tlv) +
10476 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
10477 
10478 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
10479 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
10480 		len += sizeof(*tlv) +
10481 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
10482 	}
10483 
10484 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10485 	if (!skb)
10486 		return -ENOMEM;
10487 
10488 	buf_ptr = skb->data;
10489 	cmd = buf_ptr;
10490 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
10491 						 sizeof(*cmd));
10492 	cmd->flags = cpu_to_le32(0);
10493 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10494 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
10495 
10496 	buf_ptr += sizeof(*cmd);
10497 
10498 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
10499 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
10500 
10501 	if (ns_ext_tuples)
10502 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
10503 
10504 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
10505 }
10506 
10507 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
10508 				 struct ath12k_link_vif *arvif, bool enable)
10509 {
10510 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
10511 	struct wmi_gtk_rekey_offload_cmd *cmd;
10512 	struct sk_buff *skb;
10513 	__le64 replay_ctr;
10514 	int len;
10515 
10516 	len = sizeof(*cmd);
10517 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10518 	if (!skb)
10519 		return -ENOMEM;
10520 
10521 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
10522 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
10523 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10524 
10525 	if (enable) {
10526 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
10527 
10528 		/* the length in rekey_data and cmd is equal */
10529 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
10530 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
10531 
10532 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
10533 		memcpy(cmd->replay_ctr, &replay_ctr,
10534 		       sizeof(replay_ctr));
10535 	} else {
10536 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
10537 	}
10538 
10539 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
10540 		   arvif->vdev_id, enable);
10541 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
10542 }
10543 
10544 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
10545 				 struct ath12k_link_vif *arvif)
10546 {
10547 	struct wmi_gtk_rekey_offload_cmd *cmd;
10548 	struct sk_buff *skb;
10549 	int len;
10550 
10551 	len = sizeof(*cmd);
10552 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10553 	if (!skb)
10554 		return -ENOMEM;
10555 
10556 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
10557 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
10558 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10559 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
10560 
10561 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
10562 		   arvif->vdev_id);
10563 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
10564 }
10565 
10566 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
10567 			     const struct wmi_sta_keepalive_arg *arg)
10568 {
10569 	struct wmi_sta_keepalive_arp_resp_params *arp;
10570 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10571 	struct wmi_sta_keepalive_cmd *cmd;
10572 	struct sk_buff *skb;
10573 	size_t len;
10574 
10575 	len = sizeof(*cmd) + sizeof(*arp);
10576 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10577 	if (!skb)
10578 		return -ENOMEM;
10579 
10580 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
10581 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
10582 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
10583 	cmd->enabled = cpu_to_le32(arg->enabled);
10584 	cmd->interval = cpu_to_le32(arg->interval);
10585 	cmd->method = cpu_to_le32(arg->method);
10586 
10587 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
10588 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
10589 						 sizeof(*arp));
10590 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
10591 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
10592 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
10593 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
10594 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
10595 	}
10596 
10597 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10598 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
10599 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
10600 
10601 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
10602 }
10603 
10604 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
10605 {
10606 	struct wmi_mlo_setup_cmd *cmd;
10607 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10608 	u32 *partner_links, num_links;
10609 	int i, ret, buf_len, arg_len;
10610 	struct sk_buff *skb;
10611 	struct wmi_tlv *tlv;
10612 	void *ptr;
10613 
10614 	num_links = mlo_params->num_partner_links;
10615 	arg_len = num_links * sizeof(u32);
10616 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
10617 
10618 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
10619 	if (!skb)
10620 		return -ENOMEM;
10621 
10622 	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
10623 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
10624 						 sizeof(*cmd));
10625 	cmd->mld_group_id = mlo_params->group_id;
10626 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10627 	ptr = skb->data + sizeof(*cmd);
10628 
10629 	tlv = ptr;
10630 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
10631 	ptr += TLV_HDR_SIZE;
10632 
10633 	partner_links = ptr;
10634 	for (i = 0; i < num_links; i++)
10635 		partner_links[i] = mlo_params->partner_link_id[i];
10636 
10637 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
10638 	if (ret) {
10639 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
10640 			    ret);
10641 		dev_kfree_skb(skb);
10642 		return ret;
10643 	}
10644 
10645 	return 0;
10646 }
10647 
10648 int ath12k_wmi_mlo_ready(struct ath12k *ar)
10649 {
10650 	struct wmi_mlo_ready_cmd *cmd;
10651 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10652 	struct sk_buff *skb;
10653 	int ret, len;
10654 
10655 	len = sizeof(*cmd);
10656 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10657 	if (!skb)
10658 		return -ENOMEM;
10659 
10660 	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
10661 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
10662 						 sizeof(*cmd));
10663 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10664 
10665 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
10666 	if (ret) {
10667 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
10668 			    ret);
10669 		dev_kfree_skb(skb);
10670 		return ret;
10671 	}
10672 
10673 	return 0;
10674 }
10675 
10676 int ath12k_wmi_mlo_teardown(struct ath12k *ar)
10677 {
10678 	struct wmi_mlo_teardown_cmd *cmd;
10679 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10680 	struct sk_buff *skb;
10681 	int ret, len;
10682 
10683 	len = sizeof(*cmd);
10684 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10685 	if (!skb)
10686 		return -ENOMEM;
10687 
10688 	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
10689 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
10690 						 sizeof(*cmd));
10691 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10692 	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
10693 
10694 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
10695 	if (ret) {
10696 		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
10697 			    ret);
10698 		dev_kfree_skb(skb);
10699 		return ret;
10700 	}
10701 
10702 	return 0;
10703 }
10704 
10705 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
10706 {
10707 	return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
10708 			ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
10709 }
10710 
10711 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
10712 				       u32 vdev_id,
10713 				       struct ath12k_reg_tpc_power_info *param)
10714 {
10715 	struct wmi_vdev_set_tpc_power_cmd *cmd;
10716 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10717 	struct wmi_vdev_ch_power_params *ch;
10718 	int i, ret, len, array_len;
10719 	struct sk_buff *skb;
10720 	struct wmi_tlv *tlv;
10721 	u8 *ptr;
10722 
10723 	array_len = sizeof(*ch) * param->num_pwr_levels;
10724 	len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
10725 
10726 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10727 	if (!skb)
10728 		return -ENOMEM;
10729 
10730 	ptr = skb->data;
10731 
10732 	cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
10733 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
10734 						 sizeof(*cmd));
10735 	cmd->vdev_id = cpu_to_le32(vdev_id);
10736 	cmd->psd_power = cpu_to_le32(param->is_psd_power);
10737 	cmd->eirp_power = cpu_to_le32(param->eirp_power);
10738 	cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
10739 
10740 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10741 		   "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
10742 		   vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
10743 
10744 	ptr += sizeof(*cmd);
10745 	tlv = (struct wmi_tlv *)ptr;
10746 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
10747 
10748 	ptr += TLV_HDR_SIZE;
10749 	ch = (struct wmi_vdev_ch_power_params *)ptr;
10750 
10751 	for (i = 0; i < param->num_pwr_levels; i++, ch++) {
10752 		ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
10753 							sizeof(*ch));
10754 		ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
10755 		ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
10756 
10757 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
10758 			   ch->chan_cfreq, ch->tx_power);
10759 	}
10760 
10761 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
10762 	if (ret) {
10763 		ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
10764 		dev_kfree_skb(skb);
10765 		return ret;
10766 	}
10767 
10768 	return 0;
10769 }
10770 
10771 static int
10772 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab,
10773 				struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap,
10774 				struct wmi_mlo_link_set_active_arg *arg)
10775 {
10776 	struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg;
10777 	u8 i;
10778 
10779 	if (arg->num_disallow_mode_comb >
10780 	    ARRAY_SIZE(arg->disallow_bmap)) {
10781 		ath12k_warn(ab, "invalid num_disallow_mode_comb: %d",
10782 			    arg->num_disallow_mode_comb);
10783 		return -EINVAL;
10784 	}
10785 
10786 	dislw_bmap_arg = &arg->disallow_bmap[0];
10787 	for (i = 0; i < arg->num_disallow_mode_comb; i++) {
10788 		dislw_bmap->tlv_header =
10789 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap));
10790 		dislw_bmap->disallowed_mode_bitmap =
10791 				cpu_to_le32(dislw_bmap_arg->disallowed_mode);
10792 		dislw_bmap->ieee_link_id_comb =
10793 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[0],
10794 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) |
10795 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[1],
10796 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) |
10797 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[2],
10798 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) |
10799 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[3],
10800 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4);
10801 
10802 		ath12k_dbg(ab, ATH12K_DBG_WMI,
10803 			   "entry %d disallowed_mode %d ieee_link_id_comb 0x%x",
10804 			   i, dislw_bmap_arg->disallowed_mode,
10805 			   dislw_bmap_arg->ieee_link_id_comb);
10806 		dislw_bmap++;
10807 		dislw_bmap_arg++;
10808 	}
10809 
10810 	return 0;
10811 }
10812 
10813 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
10814 					    struct wmi_mlo_link_set_active_arg *arg)
10815 {
10816 	struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap;
10817 	struct wmi_mlo_set_active_link_number_params *link_num_param;
10818 	u32 num_link_num_param = 0, num_vdev_bitmap = 0;
10819 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
10820 	struct wmi_mlo_link_set_active_cmd *cmd;
10821 	u32 num_inactive_vdev_bitmap = 0;
10822 	u32 num_disallow_mode_comb = 0;
10823 	struct wmi_tlv *tlv;
10824 	struct sk_buff *skb;
10825 	__le32 *vdev_bitmap;
10826 	void *buf_ptr;
10827 	int i, ret;
10828 	u32 len;
10829 
10830 	if (!arg->num_vdev_bitmap && !arg->num_link_entry) {
10831 		ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry");
10832 		return -EINVAL;
10833 	}
10834 
10835 	switch (arg->force_mode) {
10836 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM:
10837 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM:
10838 		num_link_num_param = arg->num_link_entry;
10839 		fallthrough;
10840 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE:
10841 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE:
10842 	case WMI_MLO_LINK_FORCE_MODE_NO_FORCE:
10843 		num_vdev_bitmap = arg->num_vdev_bitmap;
10844 		break;
10845 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE:
10846 		num_vdev_bitmap = arg->num_vdev_bitmap;
10847 		num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap;
10848 		break;
10849 	default:
10850 		ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode);
10851 		return -EINVAL;
10852 	}
10853 
10854 	num_disallow_mode_comb = arg->num_disallow_mode_comb;
10855 	len = sizeof(*cmd) +
10856 	      TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param +
10857 	      TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap +
10858 	      TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE +
10859 	      TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb;
10860 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE)
10861 		len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
10862 
10863 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
10864 	if (!skb)
10865 		return -ENOMEM;
10866 
10867 	cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data;
10868 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD,
10869 						 sizeof(*cmd));
10870 	cmd->force_mode = cpu_to_le32(arg->force_mode);
10871 	cmd->reason = cpu_to_le32(arg->reason);
10872 	ath12k_dbg(ab, ATH12K_DBG_WMI,
10873 		   "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d",
10874 		   arg->force_mode, arg->reason, num_link_num_param,
10875 		   num_vdev_bitmap, num_inactive_vdev_bitmap,
10876 		   num_disallow_mode_comb);
10877 
10878 	buf_ptr = skb->data + sizeof(*cmd);
10879 	tlv = buf_ptr;
10880 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10881 					 sizeof(*link_num_param) * num_link_num_param);
10882 	buf_ptr += TLV_HDR_SIZE;
10883 
10884 	if (num_link_num_param) {
10885 		cmd->ctrl_flags =
10886 			le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0,
10887 					 CRTL_F_DYNC_FORCE_LINK_NUM);
10888 
10889 		link_num_param = buf_ptr;
10890 		for (i = 0; i < num_link_num_param; i++) {
10891 			link_num_param->tlv_header =
10892 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param));
10893 			link_num_param->num_of_link =
10894 				cpu_to_le32(arg->link_num[i].num_of_link);
10895 			link_num_param->vdev_type =
10896 				cpu_to_le32(arg->link_num[i].vdev_type);
10897 			link_num_param->vdev_subtype =
10898 				cpu_to_le32(arg->link_num[i].vdev_subtype);
10899 			link_num_param->home_freq =
10900 				cpu_to_le32(arg->link_num[i].home_freq);
10901 			ath12k_dbg(ab, ATH12K_DBG_WMI,
10902 				   "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d",
10903 				   i, arg->link_num[i].num_of_link,
10904 				   arg->link_num[i].vdev_type,
10905 				   arg->link_num[i].vdev_subtype,
10906 				   arg->link_num[i].home_freq,
10907 				   __le32_to_cpu(cmd->ctrl_flags));
10908 			link_num_param++;
10909 		}
10910 
10911 		buf_ptr += sizeof(*link_num_param) * num_link_num_param;
10912 	}
10913 
10914 	tlv = buf_ptr;
10915 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
10916 					 sizeof(*vdev_bitmap) * num_vdev_bitmap);
10917 	buf_ptr += TLV_HDR_SIZE;
10918 
10919 	if (num_vdev_bitmap) {
10920 		vdev_bitmap = buf_ptr;
10921 		for (i = 0; i < num_vdev_bitmap; i++) {
10922 			vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]);
10923 			ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x",
10924 				   i, arg->vdev_bitmap[i]);
10925 		}
10926 
10927 		buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap;
10928 	}
10929 
10930 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
10931 		tlv = buf_ptr;
10932 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
10933 						 sizeof(*vdev_bitmap) *
10934 						 num_inactive_vdev_bitmap);
10935 		buf_ptr += TLV_HDR_SIZE;
10936 
10937 		if (num_inactive_vdev_bitmap) {
10938 			vdev_bitmap = buf_ptr;
10939 			for (i = 0; i < num_inactive_vdev_bitmap; i++) {
10940 				vdev_bitmap[i] =
10941 					cpu_to_le32(arg->inactive_vdev_bitmap[i]);
10942 				ath12k_dbg(ab, ATH12K_DBG_WMI,
10943 					   "entry %d inactive_vdev_id_bitmap 0x%x",
10944 					    i, arg->inactive_vdev_bitmap[i]);
10945 			}
10946 
10947 			buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
10948 		}
10949 	} else {
10950 		/* add empty vdev bitmap2 tlv */
10951 		tlv = buf_ptr;
10952 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10953 		buf_ptr += TLV_HDR_SIZE;
10954 	}
10955 
10956 	/* add empty ieee_link_id_bitmap tlv */
10957 	tlv = buf_ptr;
10958 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10959 	buf_ptr += TLV_HDR_SIZE;
10960 
10961 	/* add empty ieee_link_id_bitmap2 tlv */
10962 	tlv = buf_ptr;
10963 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10964 	buf_ptr += TLV_HDR_SIZE;
10965 
10966 	tlv = buf_ptr;
10967 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10968 					 sizeof(*disallowed_mode_bmap) *
10969 					 arg->num_disallow_mode_comb);
10970 	buf_ptr += TLV_HDR_SIZE;
10971 
10972 	ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg);
10973 	if (ret)
10974 		goto free_skb;
10975 
10976 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID);
10977 	if (ret) {
10978 		ath12k_warn(ab,
10979 			    "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret);
10980 		goto free_skb;
10981 	}
10982 
10983 	ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd");
10984 
10985 	return ret;
10986 
10987 free_skb:
10988 	dev_kfree_skb(skb);
10989 	return ret;
10990 }
10991