xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 27ba973caaf85ff3a2a23eca33d6dc9b4fe405e8)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debugfs.h"
19 #include "debug.h"
20 #include "mac.h"
21 #include "hw.h"
22 #include "peer.h"
23 #include "p2p.h"
24 #include "testmode.h"
25 
26 struct ath12k_wmi_svc_ready_parse {
27 	bool wmi_svc_bitmap_done;
28 };
29 
30 struct wmi_tlv_fw_stats_parse {
31 	const struct wmi_stats_event *ev;
32 	struct ath12k_fw_stats *stats;
33 };
34 
35 struct ath12k_wmi_dma_ring_caps_parse {
36 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
37 	u32 n_dma_ring_caps;
38 };
39 
40 struct ath12k_wmi_service_ext_arg {
41 	u32 default_conc_scan_config_bits;
42 	u32 default_fw_config_bits;
43 	struct ath12k_wmi_ppe_threshold_arg ppet;
44 	u32 he_cap_info;
45 	u32 mpdu_density;
46 	u32 max_bssid_rx_filters;
47 	u32 num_hw_modes;
48 	u32 num_phy;
49 };
50 
51 struct ath12k_wmi_svc_rdy_ext_parse {
52 	struct ath12k_wmi_service_ext_arg arg;
53 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
54 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
55 	u32 n_hw_mode_caps;
56 	u32 tot_phy_id;
57 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
58 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
59 	u32 n_mac_phy_caps;
60 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
61 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
62 	u32 n_ext_hal_reg_caps;
63 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
64 	bool hw_mode_done;
65 	bool mac_phy_done;
66 	bool ext_hal_reg_done;
67 	bool mac_phy_chainmask_combo_done;
68 	bool mac_phy_chainmask_cap_done;
69 	bool oem_dma_ring_cap_done;
70 	bool dma_ring_cap_done;
71 };
72 
73 struct ath12k_wmi_svc_rdy_ext2_arg {
74 	u32 reg_db_version;
75 	u32 hw_min_max_tx_power_2ghz;
76 	u32 hw_min_max_tx_power_5ghz;
77 	u32 chwidth_num_peer_caps;
78 	u32 preamble_puncture_bw;
79 	u32 max_user_per_ppdu_ofdma;
80 	u32 max_user_per_ppdu_mumimo;
81 	u32 target_cap_flags;
82 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
83 	u32 max_num_linkview_peers;
84 	u32 max_num_msduq_supported_per_tid;
85 	u32 default_num_msduq_supported_per_tid;
86 };
87 
88 struct ath12k_wmi_svc_rdy_ext2_parse {
89 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
90 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
91 	bool dma_ring_cap_done;
92 	bool spectral_bin_scaling_done;
93 	bool mac_phy_caps_ext_done;
94 	bool hal_reg_caps_ext2_done;
95 	bool scan_radio_caps_ext2_done;
96 	bool twt_caps_done;
97 	bool htt_msdu_idx_to_qtype_map_done;
98 	bool dbs_or_sbs_cap_ext_done;
99 };
100 
101 struct ath12k_wmi_rdy_parse {
102 	u32 num_extra_mac_addr;
103 };
104 
105 struct ath12k_wmi_dma_buf_release_arg {
106 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
107 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
108 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
109 	u32 num_buf_entry;
110 	u32 num_meta;
111 	bool buf_entry_done;
112 	bool meta_data_done;
113 };
114 
115 struct ath12k_wmi_tlv_policy {
116 	size_t min_len;
117 };
118 
119 struct wmi_tlv_mgmt_rx_parse {
120 	const struct ath12k_wmi_mgmt_rx_params *fixed;
121 	const u8 *frame_buf;
122 	bool frame_buf_done;
123 };
124 
125 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
126 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
127 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
128 	[WMI_TAG_SERVICE_READY_EVENT] = {
129 		.min_len = sizeof(struct wmi_service_ready_event) },
130 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
131 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
132 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
133 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
134 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
135 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
136 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
137 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
138 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
139 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
140 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
141 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
142 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
143 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
144 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
145 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
146 	[WMI_TAG_MGMT_RX_HDR] = {
147 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
148 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
149 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
150 	[WMI_TAG_SCAN_EVENT] = {
151 		.min_len = sizeof(struct wmi_scan_event) },
152 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
153 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
154 	[WMI_TAG_ROAM_EVENT] = {
155 		.min_len = sizeof(struct wmi_roam_event) },
156 	[WMI_TAG_CHAN_INFO_EVENT] = {
157 		.min_len = sizeof(struct wmi_chan_info_event) },
158 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
159 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
160 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
161 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
162 	[WMI_TAG_READY_EVENT] = {
163 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
164 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
165 		.min_len = sizeof(struct wmi_service_available_event) },
166 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
167 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
168 	[WMI_TAG_RFKILL_EVENT] = {
169 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
170 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
171 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
172 	[WMI_TAG_HOST_SWFDA_EVENT] = {
173 		.min_len = sizeof(struct wmi_fils_discovery_event) },
174 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
175 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
176 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
177 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
178 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
179 		.min_len = sizeof(struct wmi_twt_enable_event) },
180 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
181 		.min_len = sizeof(struct wmi_twt_disable_event) },
182 	[WMI_TAG_P2P_NOA_INFO] = {
183 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
184 	[WMI_TAG_P2P_NOA_EVENT] = {
185 		.min_len = sizeof(struct wmi_p2p_noa_event) },
186 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
187 		.min_len = sizeof(struct wmi_11d_new_cc_event) },
188 };
189 
190 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
191 {
192 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
193 		le32_encode_bits(len, WMI_TLV_LEN);
194 }
195 
196 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
197 {
198 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
199 }
200 
201 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
202 			     struct ath12k_wmi_resource_config_arg *config)
203 {
204 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
205 	config->num_peers = ab->num_radios *
206 		ath12k_core_get_max_peers_per_radio(ab);
207 	config->num_tids = ath12k_core_get_max_num_tids(ab);
208 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
209 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
210 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
211 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
212 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
213 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
214 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
215 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
216 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
217 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
218 
219 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
220 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
221 	else
222 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
223 
224 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
225 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
226 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
227 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
228 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
229 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
230 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
231 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
232 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
233 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
234 	config->rx_skip_defrag_timeout_dup_detection_check =
235 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
236 	config->vow_config = TARGET_VOW_CONFIG;
237 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
238 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
239 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
240 	config->rx_batchmode = TARGET_RX_BATCHMODE;
241 	/* Indicates host supports peer map v3 and unmap v2 support */
242 	config->peer_map_unmap_version = 0x32;
243 	config->twt_ap_pdev_count = ab->num_radios;
244 	config->twt_ap_sta_count = 1000;
245 	config->ema_max_vap_cnt = ab->num_radios;
246 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
247 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
248 
249 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
250 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
251 }
252 
253 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
254 			     struct ath12k_wmi_resource_config_arg *config)
255 {
256 	config->num_vdevs = 4;
257 	config->num_peers = 16;
258 	config->num_tids = 32;
259 
260 	config->num_offload_peers = 3;
261 	config->num_offload_reorder_buffs = 3;
262 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
263 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
264 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
265 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
266 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
267 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
268 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
269 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
270 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
271 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
272 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
273 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
274 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
275 	config->num_mcast_groups = 0;
276 	config->num_mcast_table_elems = 0;
277 	config->mcast2ucast_mode = 0;
278 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
279 	config->num_wds_entries = 0;
280 	config->dma_burst_size = 0;
281 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
282 	config->vow_config = TARGET_VOW_CONFIG;
283 	config->gtk_offload_max_vdev = 2;
284 	config->num_msdu_desc = 0x400;
285 	config->beacon_tx_offload_max_vdev = 2;
286 	config->rx_batchmode = TARGET_RX_BATCHMODE;
287 
288 	config->peer_map_unmap_version = 0x1;
289 	config->use_pdev_id = 1;
290 	config->max_frag_entries = 0xa;
291 	config->num_tdls_vdevs = 0x1;
292 	config->num_tdls_conn_table_entries = 8;
293 	config->beacon_tx_offload_max_vdev = 0x2;
294 	config->num_multicast_filter_entries = 0x20;
295 	config->num_wow_filters = 0x16;
296 	config->num_keep_alive_pattern = 0;
297 }
298 
299 #define PRIMAP(_hw_mode_) \
300 	[_hw_mode_] = _hw_mode_##_PRI
301 
302 static const int ath12k_hw_mode_pri_map[] = {
303 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
304 	PRIMAP(WMI_HOST_HW_MODE_DBS),
305 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
306 	PRIMAP(WMI_HOST_HW_MODE_SBS),
307 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
308 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
309 	/* keep last */
310 	PRIMAP(WMI_HOST_HW_MODE_MAX),
311 };
312 
313 static int
314 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
315 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
316 				const void *ptr, void *data),
317 		    void *data)
318 {
319 	const void *begin = ptr;
320 	const struct wmi_tlv *tlv;
321 	u16 tlv_tag, tlv_len;
322 	int ret;
323 
324 	while (len > 0) {
325 		if (len < sizeof(*tlv)) {
326 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
327 				   ptr - begin, len, sizeof(*tlv));
328 			return -EINVAL;
329 		}
330 
331 		tlv = ptr;
332 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
333 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
334 		ptr += sizeof(*tlv);
335 		len -= sizeof(*tlv);
336 
337 		if (tlv_len > len) {
338 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
339 				   tlv_tag, ptr - begin, len, tlv_len);
340 			return -EINVAL;
341 		}
342 
343 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
344 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
345 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
346 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
347 				   tlv_tag, ptr - begin, tlv_len,
348 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
349 			return -EINVAL;
350 		}
351 
352 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
353 		if (ret)
354 			return ret;
355 
356 		ptr += tlv_len;
357 		len -= tlv_len;
358 	}
359 
360 	return 0;
361 }
362 
363 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
364 				     const void *ptr, void *data)
365 {
366 	const void **tb = data;
367 
368 	if (tag < WMI_TAG_MAX)
369 		tb[tag] = ptr;
370 
371 	return 0;
372 }
373 
374 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
375 				const void *ptr, size_t len)
376 {
377 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
378 				   (void *)tb);
379 }
380 
381 static const void **
382 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
383 			   struct sk_buff *skb, gfp_t gfp)
384 {
385 	const void **tb;
386 	int ret;
387 
388 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
389 	if (!tb)
390 		return ERR_PTR(-ENOMEM);
391 
392 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
393 	if (ret) {
394 		kfree(tb);
395 		return ERR_PTR(ret);
396 	}
397 
398 	return tb;
399 }
400 
401 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
402 				      u32 cmd_id)
403 {
404 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
405 	struct ath12k_base *ab = wmi->wmi_ab->ab;
406 	struct wmi_cmd_hdr *cmd_hdr;
407 	int ret;
408 
409 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
410 		return -ENOMEM;
411 
412 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
413 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
414 
415 	memset(skb_cb, 0, sizeof(*skb_cb));
416 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
417 
418 	if (ret)
419 		goto err_pull;
420 
421 	return 0;
422 
423 err_pull:
424 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
425 	return ret;
426 }
427 
428 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
429 			u32 cmd_id)
430 {
431 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
432 	int ret = -EOPNOTSUPP;
433 
434 	might_sleep();
435 
436 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
437 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
438 
439 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
440 			ret = -ESHUTDOWN;
441 
442 		(ret != -EAGAIN);
443 	}), WMI_SEND_TIMEOUT_HZ);
444 
445 	if (ret == -EAGAIN)
446 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
447 
448 	return ret;
449 }
450 
451 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
452 				     const void *ptr,
453 				     struct ath12k_wmi_service_ext_arg *arg)
454 {
455 	const struct wmi_service_ready_ext_event *ev = ptr;
456 	int i;
457 
458 	if (!ev)
459 		return -EINVAL;
460 
461 	/* Move this to host based bitmap */
462 	arg->default_conc_scan_config_bits =
463 		le32_to_cpu(ev->default_conc_scan_config_bits);
464 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
465 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
466 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
467 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
468 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
469 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
470 
471 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
472 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
473 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
474 
475 	return 0;
476 }
477 
478 static int
479 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
480 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
481 				      u8 hw_mode_id, u8 phy_id,
482 				      struct ath12k_pdev *pdev)
483 {
484 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
485 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
486 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
487 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
488 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
489 	struct ath12k_band_cap *cap_band;
490 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
491 	struct ath12k_fw_pdev *fw_pdev;
492 	u32 phy_map;
493 	u32 hw_idx, phy_idx = 0;
494 	int i;
495 
496 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
497 		return -EINVAL;
498 
499 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
500 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
501 			break;
502 
503 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
504 		phy_idx = fls(phy_map);
505 	}
506 
507 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
508 		return -EINVAL;
509 
510 	phy_idx += phy_id;
511 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
512 		return -EINVAL;
513 
514 	mac_caps = wmi_mac_phy_caps + phy_idx;
515 
516 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
517 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
518 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
519 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
520 
521 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
522 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
523 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
524 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
525 	ab->fw_pdev_count++;
526 
527 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
528 	 * band to band for a single radio, need to see how this should be
529 	 * handled.
530 	 */
531 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
532 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
533 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
534 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
535 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
536 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
537 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
538 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
539 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
540 		pdev_cap->nss_ratio_enabled =
541 			WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
542 		pdev_cap->nss_ratio_info =
543 			WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
544 	} else {
545 		return -EINVAL;
546 	}
547 
548 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
549 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
550 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
551 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
552 	 * will be advertised for second mac or vice-versa. Compute the shift value
553 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
554 	 * mac80211.
555 	 */
556 	pdev_cap->tx_chain_mask_shift =
557 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
558 	pdev_cap->rx_chain_mask_shift =
559 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
560 
561 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
562 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
563 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
564 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
565 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
566 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
567 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
568 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
569 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
570 			cap_band->he_cap_phy_info[i] =
571 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
572 
573 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
574 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
575 
576 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
577 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
578 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
579 	}
580 
581 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
582 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
583 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
584 		cap_band->max_bw_supported =
585 			le32_to_cpu(mac_caps->max_bw_supported_5g);
586 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
587 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
588 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
589 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
590 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
591 			cap_band->he_cap_phy_info[i] =
592 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
593 
594 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
595 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
596 
597 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
598 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
599 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
600 
601 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
602 		cap_band->max_bw_supported =
603 			le32_to_cpu(mac_caps->max_bw_supported_5g);
604 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
605 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
606 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
607 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
608 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
609 			cap_band->he_cap_phy_info[i] =
610 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
611 
612 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
613 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
614 
615 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
616 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
617 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
618 	}
619 
620 	return 0;
621 }
622 
623 static int
624 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
625 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
626 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
627 				u8 phy_idx,
628 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
629 {
630 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
631 
632 	if (!reg_caps || !ext_caps)
633 		return -EINVAL;
634 
635 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
636 		return -EINVAL;
637 
638 	ext_reg_cap = &ext_caps[phy_idx];
639 
640 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
641 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
642 	param->eeprom_reg_domain_ext =
643 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
644 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
645 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
646 	/* check if param->wireless_mode is needed */
647 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
648 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
649 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
650 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
651 
652 	return 0;
653 }
654 
655 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
656 					 const void *evt_buf,
657 					 struct ath12k_wmi_target_cap_arg *cap)
658 {
659 	const struct wmi_service_ready_event *ev = evt_buf;
660 
661 	if (!ev) {
662 		ath12k_err(ab, "%s: failed by NULL param\n",
663 			   __func__);
664 		return -EINVAL;
665 	}
666 
667 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
668 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
669 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
670 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
671 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
672 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
673 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
674 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
675 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
676 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
677 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
678 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
679 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
680 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
681 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
682 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
683 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
684 
685 	return 0;
686 }
687 
688 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
689  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
690  * 4-byte word.
691  */
692 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
693 					   const u32 *wmi_svc_bm)
694 {
695 	int i, j;
696 
697 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
698 		do {
699 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
700 				set_bit(j, wmi->wmi_ab->svc_map);
701 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
702 	}
703 }
704 
705 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
706 				    const void *ptr, void *data)
707 {
708 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
709 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
710 	u16 expect_len;
711 
712 	switch (tag) {
713 	case WMI_TAG_SERVICE_READY_EVENT:
714 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
715 			return -EINVAL;
716 		break;
717 
718 	case WMI_TAG_ARRAY_UINT32:
719 		if (!svc_ready->wmi_svc_bitmap_done) {
720 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
721 			if (len < expect_len) {
722 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
723 					    len, tag);
724 				return -EINVAL;
725 			}
726 
727 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
728 
729 			svc_ready->wmi_svc_bitmap_done = true;
730 		}
731 		break;
732 	default:
733 		break;
734 	}
735 
736 	return 0;
737 }
738 
739 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
740 {
741 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
742 	int ret;
743 
744 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
745 				  ath12k_wmi_svc_rdy_parse,
746 				  &svc_ready);
747 	if (ret) {
748 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
749 		return ret;
750 	}
751 
752 	return 0;
753 }
754 
755 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
756 				    struct ieee80211_tx_info *info)
757 {
758 	struct ath12k_base *ab = ar->ab;
759 	u32 freq = 0;
760 
761 	if (ab->hw_params->single_pdev_only &&
762 	    ar->scan.is_roc &&
763 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
764 		freq = ar->scan.roc_freq;
765 
766 	return freq;
767 }
768 
769 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
770 {
771 	struct sk_buff *skb;
772 	struct ath12k_base *ab = wmi_ab->ab;
773 	u32 round_len = roundup(len, 4);
774 
775 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
776 	if (!skb)
777 		return NULL;
778 
779 	skb_reserve(skb, WMI_SKB_HEADROOM);
780 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
781 		ath12k_warn(ab, "unaligned WMI skb data\n");
782 
783 	skb_put(skb, round_len);
784 	memset(skb->data, 0, round_len);
785 
786 	return skb;
787 }
788 
789 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
790 			 struct sk_buff *frame)
791 {
792 	struct ath12k_wmi_pdev *wmi = ar->wmi;
793 	struct wmi_mgmt_send_cmd *cmd;
794 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
795 	struct wmi_tlv *frame_tlv;
796 	struct sk_buff *skb;
797 	u32 buf_len;
798 	int ret, len;
799 
800 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
801 
802 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
803 
804 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
805 	if (!skb)
806 		return -ENOMEM;
807 
808 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
809 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
810 						 sizeof(*cmd));
811 	cmd->vdev_id = cpu_to_le32(vdev_id);
812 	cmd->desc_id = cpu_to_le32(buf_id);
813 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
814 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
815 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
816 	cmd->frame_len = cpu_to_le32(frame->len);
817 	cmd->buf_len = cpu_to_le32(buf_len);
818 	cmd->tx_params_valid = 0;
819 
820 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
821 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
822 
823 	memcpy(frame_tlv->value, frame->data, buf_len);
824 
825 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
826 	if (ret) {
827 		ath12k_warn(ar->ab,
828 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
829 		dev_kfree_skb(skb);
830 	}
831 
832 	return ret;
833 }
834 
835 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
836 				      u32 vdev_id, u32 pdev_id)
837 {
838 	struct ath12k_wmi_pdev *wmi = ar->wmi;
839 	struct wmi_request_stats_cmd *cmd;
840 	struct sk_buff *skb;
841 	int ret;
842 
843 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
844 	if (!skb)
845 		return -ENOMEM;
846 
847 	cmd = (struct wmi_request_stats_cmd *)skb->data;
848 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
849 						 sizeof(*cmd));
850 
851 	cmd->stats_id = cpu_to_le32(stats_id);
852 	cmd->vdev_id = cpu_to_le32(vdev_id);
853 	cmd->pdev_id = cpu_to_le32(pdev_id);
854 
855 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
856 	if (ret) {
857 		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
858 		dev_kfree_skb(skb);
859 	}
860 
861 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
862 		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
863 		   stats_id, vdev_id, pdev_id);
864 
865 	return ret;
866 }
867 
868 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
869 			   struct ath12k_wmi_vdev_create_arg *args)
870 {
871 	struct ath12k_wmi_pdev *wmi = ar->wmi;
872 	struct wmi_vdev_create_cmd *cmd;
873 	struct sk_buff *skb;
874 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
875 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
876 	struct wmi_vdev_create_mlo_params *ml_params;
877 	struct wmi_tlv *tlv;
878 	int ret, len;
879 	void *ptr;
880 
881 	/* It can be optimized my sending tx/rx chain configuration
882 	 * only for supported bands instead of always sending it for
883 	 * both the bands.
884 	 */
885 	len = sizeof(*cmd) + TLV_HDR_SIZE +
886 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
887 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
888 
889 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
890 	if (!skb)
891 		return -ENOMEM;
892 
893 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
894 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
895 						 sizeof(*cmd));
896 
897 	cmd->vdev_id = cpu_to_le32(args->if_id);
898 	cmd->vdev_type = cpu_to_le32(args->type);
899 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
900 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
901 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
902 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
903 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
904 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
905 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
906 
907 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
908 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
909 
910 	ptr = skb->data + sizeof(*cmd);
911 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
912 
913 	tlv = ptr;
914 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
915 
916 	ptr += TLV_HDR_SIZE;
917 	txrx_streams = ptr;
918 	len = sizeof(*txrx_streams);
919 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
920 							  len);
921 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
922 	txrx_streams->supported_tx_streams =
923 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
924 	txrx_streams->supported_rx_streams =
925 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
926 
927 	txrx_streams++;
928 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
929 							  len);
930 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
931 	txrx_streams->supported_tx_streams =
932 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
933 	txrx_streams->supported_rx_streams =
934 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
935 
936 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
937 
938 	if (is_ml_vdev) {
939 		tlv = ptr;
940 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
941 						 sizeof(*ml_params));
942 		ptr += TLV_HDR_SIZE;
943 		ml_params = ptr;
944 
945 		ml_params->tlv_header =
946 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
947 					       sizeof(*ml_params));
948 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
949 	}
950 
951 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
952 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
953 		   args->if_id, args->type, args->subtype,
954 		   macaddr, args->pdev_id);
955 
956 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
957 	if (ret) {
958 		ath12k_warn(ar->ab,
959 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
960 		dev_kfree_skb(skb);
961 	}
962 
963 	return ret;
964 }
965 
966 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
967 {
968 	struct ath12k_wmi_pdev *wmi = ar->wmi;
969 	struct wmi_vdev_delete_cmd *cmd;
970 	struct sk_buff *skb;
971 	int ret;
972 
973 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
974 	if (!skb)
975 		return -ENOMEM;
976 
977 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
978 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
979 						 sizeof(*cmd));
980 	cmd->vdev_id = cpu_to_le32(vdev_id);
981 
982 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
983 
984 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
985 	if (ret) {
986 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
987 		dev_kfree_skb(skb);
988 	}
989 
990 	return ret;
991 }
992 
993 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
994 {
995 	struct ath12k_wmi_pdev *wmi = ar->wmi;
996 	struct wmi_vdev_stop_cmd *cmd;
997 	struct sk_buff *skb;
998 	int ret;
999 
1000 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1001 	if (!skb)
1002 		return -ENOMEM;
1003 
1004 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1005 
1006 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
1007 						 sizeof(*cmd));
1008 	cmd->vdev_id = cpu_to_le32(vdev_id);
1009 
1010 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
1011 
1012 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
1013 	if (ret) {
1014 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
1015 		dev_kfree_skb(skb);
1016 	}
1017 
1018 	return ret;
1019 }
1020 
1021 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
1022 {
1023 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1024 	struct wmi_vdev_down_cmd *cmd;
1025 	struct sk_buff *skb;
1026 	int ret;
1027 
1028 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1029 	if (!skb)
1030 		return -ENOMEM;
1031 
1032 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1033 
1034 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
1035 						 sizeof(*cmd));
1036 	cmd->vdev_id = cpu_to_le32(vdev_id);
1037 
1038 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
1039 
1040 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
1041 	if (ret) {
1042 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
1043 		dev_kfree_skb(skb);
1044 	}
1045 
1046 	return ret;
1047 }
1048 
1049 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
1050 				       struct wmi_vdev_start_req_arg *arg)
1051 {
1052 	u32 center_freq1 = arg->band_center_freq1;
1053 
1054 	memset(chan, 0, sizeof(*chan));
1055 
1056 	chan->mhz = cpu_to_le32(arg->freq);
1057 	chan->band_center_freq1 = cpu_to_le32(center_freq1);
1058 	if (arg->mode == MODE_11BE_EHT320) {
1059 		if (arg->freq > center_freq1)
1060 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
1061 		else
1062 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
1063 
1064 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1065 
1066 	} else if (arg->mode == MODE_11BE_EHT160 ||
1067 		   arg->mode == MODE_11AX_HE160) {
1068 		if (arg->freq > center_freq1)
1069 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
1070 		else
1071 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
1072 
1073 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1074 	} else {
1075 		chan->band_center_freq2 = 0;
1076 	}
1077 
1078 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1079 	if (arg->passive)
1080 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1081 	if (arg->allow_ibss)
1082 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1083 	if (arg->allow_ht)
1084 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1085 	if (arg->allow_vht)
1086 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1087 	if (arg->allow_he)
1088 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1089 	if (arg->ht40plus)
1090 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1091 	if (arg->chan_radar)
1092 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1093 	if (arg->freq2_radar)
1094 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1095 
1096 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1097 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1098 		le32_encode_bits(arg->max_reg_power,
1099 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1100 
1101 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1102 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1103 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1104 }
1105 
1106 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1107 			  bool restart)
1108 {
1109 	struct wmi_vdev_start_mlo_params *ml_params;
1110 	struct wmi_partner_link_info *partner_info;
1111 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1112 	struct wmi_vdev_start_request_cmd *cmd;
1113 	struct sk_buff *skb;
1114 	struct ath12k_wmi_channel_params *chan;
1115 	struct wmi_tlv *tlv;
1116 	void *ptr;
1117 	int ret, len, i, ml_arg_size = 0;
1118 
1119 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1120 		return -EINVAL;
1121 
1122 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1123 
1124 	if (!restart && arg->ml.enabled) {
1125 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1126 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1127 					      sizeof(*partner_info));
1128 		len += ml_arg_size;
1129 	}
1130 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1131 	if (!skb)
1132 		return -ENOMEM;
1133 
1134 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1135 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1136 						 sizeof(*cmd));
1137 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1138 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1139 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1140 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1141 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1142 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1143 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1144 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1145 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1146 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1147 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1148 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1149 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1150 
1151 	if (!restart) {
1152 		if (arg->ssid) {
1153 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1154 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1155 		}
1156 		if (arg->hidden_ssid)
1157 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1158 		if (arg->pmf_enabled)
1159 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1160 	}
1161 
1162 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1163 
1164 	ptr = skb->data + sizeof(*cmd);
1165 	chan = ptr;
1166 
1167 	ath12k_wmi_put_wmi_channel(chan, arg);
1168 
1169 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1170 						  sizeof(*chan));
1171 	ptr += sizeof(*chan);
1172 
1173 	tlv = ptr;
1174 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1175 
1176 	/* Note: This is a nested TLV containing:
1177 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1178 	 */
1179 
1180 	ptr += sizeof(*tlv);
1181 
1182 	if (ml_arg_size) {
1183 		tlv = ptr;
1184 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1185 						 sizeof(*ml_params));
1186 		ptr += TLV_HDR_SIZE;
1187 
1188 		ml_params = ptr;
1189 
1190 		ml_params->tlv_header =
1191 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1192 					       sizeof(*ml_params));
1193 
1194 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1195 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1196 				   le32_encode_bits(arg->ml.assoc_link,
1197 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1198 				   le32_encode_bits(arg->ml.mcast_link,
1199 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1200 				   le32_encode_bits(arg->ml.link_add,
1201 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1202 
1203 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1204 			   arg->vdev_id, ml_params->flags);
1205 
1206 		ptr += sizeof(*ml_params);
1207 
1208 		tlv = ptr;
1209 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1210 						 arg->ml.num_partner_links *
1211 						 sizeof(*partner_info));
1212 		ptr += TLV_HDR_SIZE;
1213 
1214 		partner_info = ptr;
1215 
1216 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1217 			partner_info->tlv_header =
1218 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1219 						       sizeof(*partner_info));
1220 			partner_info->vdev_id =
1221 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1222 			partner_info->hw_link_id =
1223 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1224 			ether_addr_copy(partner_info->vdev_addr.addr,
1225 					arg->ml.partner_info[i].addr);
1226 
1227 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1228 				   partner_info->vdev_id, partner_info->hw_link_id,
1229 				   partner_info->vdev_addr.addr);
1230 
1231 			partner_info++;
1232 		}
1233 
1234 		ptr = partner_info;
1235 	}
1236 
1237 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1238 		   restart ? "restart" : "start", arg->vdev_id,
1239 		   arg->freq, arg->mode);
1240 
1241 	if (restart)
1242 		ret = ath12k_wmi_cmd_send(wmi, skb,
1243 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1244 	else
1245 		ret = ath12k_wmi_cmd_send(wmi, skb,
1246 					  WMI_VDEV_START_REQUEST_CMDID);
1247 	if (ret) {
1248 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1249 			    restart ? "restart" : "start");
1250 		dev_kfree_skb(skb);
1251 	}
1252 
1253 	return ret;
1254 }
1255 
1256 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1257 {
1258 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1259 	struct wmi_vdev_up_cmd *cmd;
1260 	struct sk_buff *skb;
1261 	int ret;
1262 
1263 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1264 	if (!skb)
1265 		return -ENOMEM;
1266 
1267 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1268 
1269 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1270 						 sizeof(*cmd));
1271 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1272 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1273 
1274 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1275 
1276 	if (params->tx_bssid) {
1277 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1278 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1279 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1280 	}
1281 
1282 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1283 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1284 		   params->vdev_id, params->aid, params->bssid);
1285 
1286 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1287 	if (ret) {
1288 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1289 		dev_kfree_skb(skb);
1290 	}
1291 
1292 	return ret;
1293 }
1294 
1295 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1296 				    struct ath12k_wmi_peer_create_arg *arg)
1297 {
1298 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1299 	struct wmi_peer_create_cmd *cmd;
1300 	struct sk_buff *skb;
1301 	int ret, len;
1302 	struct wmi_peer_create_mlo_params *ml_param;
1303 	void *ptr;
1304 	struct wmi_tlv *tlv;
1305 
1306 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1307 
1308 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1309 	if (!skb)
1310 		return -ENOMEM;
1311 
1312 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1313 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1314 						 sizeof(*cmd));
1315 
1316 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1317 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1318 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1319 
1320 	ptr = skb->data + sizeof(*cmd);
1321 	tlv = ptr;
1322 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1323 					 sizeof(*ml_param));
1324 	ptr += TLV_HDR_SIZE;
1325 	ml_param = ptr;
1326 	ml_param->tlv_header =
1327 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1328 					       sizeof(*ml_param));
1329 	if (arg->ml_enabled)
1330 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1331 
1332 	ptr += sizeof(*ml_param);
1333 
1334 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1335 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1336 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1337 
1338 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1339 	if (ret) {
1340 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1341 		dev_kfree_skb(skb);
1342 	}
1343 
1344 	return ret;
1345 }
1346 
1347 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1348 				    const u8 *peer_addr, u8 vdev_id)
1349 {
1350 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1351 	struct wmi_peer_delete_cmd *cmd;
1352 	struct sk_buff *skb;
1353 	int ret;
1354 
1355 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1356 	if (!skb)
1357 		return -ENOMEM;
1358 
1359 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1360 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1361 						 sizeof(*cmd));
1362 
1363 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1364 	cmd->vdev_id = cpu_to_le32(vdev_id);
1365 
1366 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1367 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1368 		   vdev_id,  peer_addr);
1369 
1370 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1371 	if (ret) {
1372 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1373 		dev_kfree_skb(skb);
1374 	}
1375 
1376 	return ret;
1377 }
1378 
1379 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1380 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1381 {
1382 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1383 	struct wmi_pdev_set_regdomain_cmd *cmd;
1384 	struct sk_buff *skb;
1385 	int ret;
1386 
1387 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1388 	if (!skb)
1389 		return -ENOMEM;
1390 
1391 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1392 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1393 						 sizeof(*cmd));
1394 
1395 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1396 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1397 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1398 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1399 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1400 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1401 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1402 
1403 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1404 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1405 		   arg->current_rd_in_use, arg->current_rd_2g,
1406 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1407 
1408 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1409 	if (ret) {
1410 		ath12k_warn(ar->ab,
1411 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1412 		dev_kfree_skb(skb);
1413 	}
1414 
1415 	return ret;
1416 }
1417 
1418 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1419 			      u32 vdev_id, u32 param_id, u32 param_val)
1420 {
1421 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1422 	struct wmi_peer_set_param_cmd *cmd;
1423 	struct sk_buff *skb;
1424 	int ret;
1425 
1426 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1427 	if (!skb)
1428 		return -ENOMEM;
1429 
1430 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1431 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1432 						 sizeof(*cmd));
1433 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1434 	cmd->vdev_id = cpu_to_le32(vdev_id);
1435 	cmd->param_id = cpu_to_le32(param_id);
1436 	cmd->param_value = cpu_to_le32(param_val);
1437 
1438 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1439 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1440 		   vdev_id, peer_addr, param_id, param_val);
1441 
1442 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1443 	if (ret) {
1444 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1445 		dev_kfree_skb(skb);
1446 	}
1447 
1448 	return ret;
1449 }
1450 
1451 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1452 					u8 peer_addr[ETH_ALEN],
1453 					u32 peer_tid_bitmap,
1454 					u8 vdev_id)
1455 {
1456 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1457 	struct wmi_peer_flush_tids_cmd *cmd;
1458 	struct sk_buff *skb;
1459 	int ret;
1460 
1461 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1462 	if (!skb)
1463 		return -ENOMEM;
1464 
1465 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1466 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1467 						 sizeof(*cmd));
1468 
1469 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1470 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1471 	cmd->vdev_id = cpu_to_le32(vdev_id);
1472 
1473 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1474 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1475 		   vdev_id, peer_addr, peer_tid_bitmap);
1476 
1477 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1478 	if (ret) {
1479 		ath12k_warn(ar->ab,
1480 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1481 		dev_kfree_skb(skb);
1482 	}
1483 
1484 	return ret;
1485 }
1486 
1487 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1488 					   int vdev_id, const u8 *addr,
1489 					   dma_addr_t paddr, u8 tid,
1490 					   u8 ba_window_size_valid,
1491 					   u32 ba_window_size)
1492 {
1493 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1494 	struct sk_buff *skb;
1495 	int ret;
1496 
1497 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1498 	if (!skb)
1499 		return -ENOMEM;
1500 
1501 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1502 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1503 						 sizeof(*cmd));
1504 
1505 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1506 	cmd->vdev_id = cpu_to_le32(vdev_id);
1507 	cmd->tid = cpu_to_le32(tid);
1508 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1509 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1510 	cmd->queue_no = cpu_to_le32(tid);
1511 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1512 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1513 
1514 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1515 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1516 		   addr, vdev_id, tid);
1517 
1518 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1519 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1520 	if (ret) {
1521 		ath12k_warn(ar->ab,
1522 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1523 		dev_kfree_skb(skb);
1524 	}
1525 
1526 	return ret;
1527 }
1528 
1529 int
1530 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1531 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1532 {
1533 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1534 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1535 	struct sk_buff *skb;
1536 	int ret;
1537 
1538 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1539 	if (!skb)
1540 		return -ENOMEM;
1541 
1542 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1543 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1544 						 sizeof(*cmd));
1545 
1546 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1547 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1548 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1549 
1550 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1551 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1552 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1553 
1554 	ret = ath12k_wmi_cmd_send(wmi, skb,
1555 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1556 	if (ret) {
1557 		ath12k_warn(ar->ab,
1558 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1559 		dev_kfree_skb(skb);
1560 	}
1561 
1562 	return ret;
1563 }
1564 
1565 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1566 			      u32 param_value, u8 pdev_id)
1567 {
1568 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1569 	struct wmi_pdev_set_param_cmd *cmd;
1570 	struct sk_buff *skb;
1571 	int ret;
1572 
1573 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1574 	if (!skb)
1575 		return -ENOMEM;
1576 
1577 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1578 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1579 						 sizeof(*cmd));
1580 	cmd->pdev_id = cpu_to_le32(pdev_id);
1581 	cmd->param_id = cpu_to_le32(param_id);
1582 	cmd->param_value = cpu_to_le32(param_value);
1583 
1584 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1585 		   "WMI pdev set param %d pdev id %d value %d\n",
1586 		   param_id, pdev_id, param_value);
1587 
1588 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1589 	if (ret) {
1590 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1591 		dev_kfree_skb(skb);
1592 	}
1593 
1594 	return ret;
1595 }
1596 
1597 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1598 {
1599 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1600 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1601 	struct sk_buff *skb;
1602 	int ret;
1603 
1604 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1605 	if (!skb)
1606 		return -ENOMEM;
1607 
1608 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1609 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1610 						 sizeof(*cmd));
1611 	cmd->vdev_id = cpu_to_le32(vdev_id);
1612 	cmd->sta_ps_mode = cpu_to_le32(enable);
1613 
1614 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1615 		   "WMI vdev set psmode %d vdev id %d\n",
1616 		   enable, vdev_id);
1617 
1618 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1619 	if (ret) {
1620 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1621 		dev_kfree_skb(skb);
1622 	}
1623 
1624 	return ret;
1625 }
1626 
1627 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1628 			    u32 pdev_id)
1629 {
1630 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1631 	struct wmi_pdev_suspend_cmd *cmd;
1632 	struct sk_buff *skb;
1633 	int ret;
1634 
1635 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1636 	if (!skb)
1637 		return -ENOMEM;
1638 
1639 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1640 
1641 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1642 						 sizeof(*cmd));
1643 
1644 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1645 	cmd->pdev_id = cpu_to_le32(pdev_id);
1646 
1647 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1648 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1649 
1650 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1651 	if (ret) {
1652 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1653 		dev_kfree_skb(skb);
1654 	}
1655 
1656 	return ret;
1657 }
1658 
1659 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1660 {
1661 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1662 	struct wmi_pdev_resume_cmd *cmd;
1663 	struct sk_buff *skb;
1664 	int ret;
1665 
1666 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1667 	if (!skb)
1668 		return -ENOMEM;
1669 
1670 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1671 
1672 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1673 						 sizeof(*cmd));
1674 	cmd->pdev_id = cpu_to_le32(pdev_id);
1675 
1676 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1677 		   "WMI pdev resume pdev id %d\n", pdev_id);
1678 
1679 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1680 	if (ret) {
1681 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1682 		dev_kfree_skb(skb);
1683 	}
1684 
1685 	return ret;
1686 }
1687 
1688 /* TODO FW Support for the cmd is not available yet.
1689  * Can be tested once the command and corresponding
1690  * event is implemented in FW
1691  */
1692 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1693 					  enum wmi_bss_chan_info_req_type type)
1694 {
1695 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1696 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1697 	struct sk_buff *skb;
1698 	int ret;
1699 
1700 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1701 	if (!skb)
1702 		return -ENOMEM;
1703 
1704 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1705 
1706 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1707 						 sizeof(*cmd));
1708 	cmd->req_type = cpu_to_le32(type);
1709 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1710 
1711 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1712 		   "WMI bss chan info req type %d\n", type);
1713 
1714 	ret = ath12k_wmi_cmd_send(wmi, skb,
1715 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1716 	if (ret) {
1717 		ath12k_warn(ar->ab,
1718 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1719 		dev_kfree_skb(skb);
1720 	}
1721 
1722 	return ret;
1723 }
1724 
1725 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1726 					struct ath12k_wmi_ap_ps_arg *arg)
1727 {
1728 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1729 	struct wmi_ap_ps_peer_cmd *cmd;
1730 	struct sk_buff *skb;
1731 	int ret;
1732 
1733 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1734 	if (!skb)
1735 		return -ENOMEM;
1736 
1737 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1738 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1739 						 sizeof(*cmd));
1740 
1741 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1742 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1743 	cmd->param = cpu_to_le32(arg->param);
1744 	cmd->value = cpu_to_le32(arg->value);
1745 
1746 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1747 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1748 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1749 
1750 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1751 	if (ret) {
1752 		ath12k_warn(ar->ab,
1753 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1754 		dev_kfree_skb(skb);
1755 	}
1756 
1757 	return ret;
1758 }
1759 
1760 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1761 				u32 param, u32 param_value)
1762 {
1763 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1764 	struct wmi_sta_powersave_param_cmd *cmd;
1765 	struct sk_buff *skb;
1766 	int ret;
1767 
1768 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1769 	if (!skb)
1770 		return -ENOMEM;
1771 
1772 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1773 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1774 						 sizeof(*cmd));
1775 
1776 	cmd->vdev_id = cpu_to_le32(vdev_id);
1777 	cmd->param = cpu_to_le32(param);
1778 	cmd->value = cpu_to_le32(param_value);
1779 
1780 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1781 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1782 		   vdev_id, param, param_value);
1783 
1784 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1785 	if (ret) {
1786 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1787 		dev_kfree_skb(skb);
1788 	}
1789 
1790 	return ret;
1791 }
1792 
1793 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1794 {
1795 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1796 	struct wmi_force_fw_hang_cmd *cmd;
1797 	struct sk_buff *skb;
1798 	int ret, len;
1799 
1800 	len = sizeof(*cmd);
1801 
1802 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1803 	if (!skb)
1804 		return -ENOMEM;
1805 
1806 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1807 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1808 						 len);
1809 
1810 	cmd->type = cpu_to_le32(type);
1811 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1812 
1813 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1814 
1815 	if (ret) {
1816 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1817 		dev_kfree_skb(skb);
1818 	}
1819 	return ret;
1820 }
1821 
1822 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1823 				  u32 param_id, u32 param_value)
1824 {
1825 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1826 	struct wmi_vdev_set_param_cmd *cmd;
1827 	struct sk_buff *skb;
1828 	int ret;
1829 
1830 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1831 	if (!skb)
1832 		return -ENOMEM;
1833 
1834 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1835 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1836 						 sizeof(*cmd));
1837 
1838 	cmd->vdev_id = cpu_to_le32(vdev_id);
1839 	cmd->param_id = cpu_to_le32(param_id);
1840 	cmd->param_value = cpu_to_le32(param_value);
1841 
1842 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1843 		   "WMI vdev id 0x%x set param %d value %d\n",
1844 		   vdev_id, param_id, param_value);
1845 
1846 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1847 	if (ret) {
1848 		ath12k_warn(ar->ab,
1849 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1850 		dev_kfree_skb(skb);
1851 	}
1852 
1853 	return ret;
1854 }
1855 
1856 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1857 {
1858 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1859 	struct wmi_get_pdev_temperature_cmd *cmd;
1860 	struct sk_buff *skb;
1861 	int ret;
1862 
1863 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1864 	if (!skb)
1865 		return -ENOMEM;
1866 
1867 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1868 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1869 						 sizeof(*cmd));
1870 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1871 
1872 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1873 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1874 
1875 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1876 	if (ret) {
1877 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1878 		dev_kfree_skb(skb);
1879 	}
1880 
1881 	return ret;
1882 }
1883 
1884 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1885 					    u32 vdev_id, u32 bcn_ctrl_op)
1886 {
1887 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1888 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1889 	struct sk_buff *skb;
1890 	int ret;
1891 
1892 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1893 	if (!skb)
1894 		return -ENOMEM;
1895 
1896 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1897 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1898 						 sizeof(*cmd));
1899 
1900 	cmd->vdev_id = cpu_to_le32(vdev_id);
1901 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1902 
1903 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1904 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1905 		   vdev_id, bcn_ctrl_op);
1906 
1907 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1908 	if (ret) {
1909 		ath12k_warn(ar->ab,
1910 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1911 		dev_kfree_skb(skb);
1912 	}
1913 
1914 	return ret;
1915 }
1916 
1917 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1918 			     const u8 *p2p_ie)
1919 {
1920 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1921 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1922 	size_t p2p_ie_len, aligned_len;
1923 	struct wmi_tlv *tlv;
1924 	struct sk_buff *skb;
1925 	void *ptr;
1926 	int ret, len;
1927 
1928 	p2p_ie_len = p2p_ie[1] + 2;
1929 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1930 
1931 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1932 
1933 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1934 	if (!skb)
1935 		return -ENOMEM;
1936 
1937 	ptr = skb->data;
1938 	cmd = ptr;
1939 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1940 						 sizeof(*cmd));
1941 	cmd->vdev_id = cpu_to_le32(vdev_id);
1942 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1943 
1944 	ptr += sizeof(*cmd);
1945 	tlv = ptr;
1946 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1947 					     aligned_len);
1948 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1949 
1950 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1951 	if (ret) {
1952 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1953 		dev_kfree_skb(skb);
1954 	}
1955 
1956 	return ret;
1957 }
1958 
1959 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
1960 			struct ieee80211_mutable_offsets *offs,
1961 			struct sk_buff *bcn,
1962 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1963 {
1964 	struct ath12k *ar = arvif->ar;
1965 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1966 	struct ath12k_base *ab = ar->ab;
1967 	struct wmi_bcn_tmpl_cmd *cmd;
1968 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1969 	struct ath12k_vif *ahvif = arvif->ahvif;
1970 	struct ieee80211_bss_conf *conf;
1971 	u32 vdev_id = arvif->vdev_id;
1972 	struct wmi_tlv *tlv;
1973 	struct sk_buff *skb;
1974 	u32 ema_params = 0;
1975 	void *ptr;
1976 	int ret, len;
1977 	size_t aligned_len = roundup(bcn->len, 4);
1978 
1979 	conf = ath12k_mac_get_link_bss_conf(arvif);
1980 	if (!conf) {
1981 		ath12k_warn(ab,
1982 			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
1983 			    ahvif->vif->addr, arvif->link_id);
1984 		return -EINVAL;
1985 	}
1986 
1987 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1988 
1989 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1990 	if (!skb)
1991 		return -ENOMEM;
1992 
1993 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1994 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1995 						 sizeof(*cmd));
1996 	cmd->vdev_id = cpu_to_le32(vdev_id);
1997 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1998 
1999 	if (conf->csa_active) {
2000 		cmd->csa_switch_count_offset =
2001 				cpu_to_le32(offs->cntdwn_counter_offs[0]);
2002 		cmd->ext_csa_switch_count_offset =
2003 				cpu_to_le32(offs->cntdwn_counter_offs[1]);
2004 		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
2005 		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
2006 	}
2007 
2008 	cmd->buf_len = cpu_to_le32(bcn->len);
2009 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
2010 	if (ema_args) {
2011 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
2012 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
2013 		if (ema_args->bcn_index == 0)
2014 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
2015 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
2016 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
2017 		cmd->ema_params = cpu_to_le32(ema_params);
2018 	}
2019 	cmd->feature_enable_bitmap =
2020 		cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
2021 					    WMI_BEACON_PROTECTION_EN_BIT));
2022 
2023 	ptr = skb->data + sizeof(*cmd);
2024 
2025 	bcn_prb_info = ptr;
2026 	len = sizeof(*bcn_prb_info);
2027 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
2028 							  len);
2029 	bcn_prb_info->caps = 0;
2030 	bcn_prb_info->erp = 0;
2031 
2032 	ptr += sizeof(*bcn_prb_info);
2033 
2034 	tlv = ptr;
2035 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2036 	memcpy(tlv->value, bcn->data, bcn->len);
2037 
2038 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
2039 	if (ret) {
2040 		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
2041 		dev_kfree_skb(skb);
2042 	}
2043 
2044 	return ret;
2045 }
2046 
2047 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
2048 				struct wmi_vdev_install_key_arg *arg)
2049 {
2050 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2051 	struct wmi_vdev_install_key_cmd *cmd;
2052 	struct wmi_tlv *tlv;
2053 	struct sk_buff *skb;
2054 	int ret, len, key_len_aligned;
2055 
2056 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
2057 	 * length is specified in cmd->key_len.
2058 	 */
2059 	key_len_aligned = roundup(arg->key_len, 4);
2060 
2061 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
2062 
2063 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2064 	if (!skb)
2065 		return -ENOMEM;
2066 
2067 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
2068 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
2069 						 sizeof(*cmd));
2070 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2071 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2072 	cmd->key_idx = cpu_to_le32(arg->key_idx);
2073 	cmd->key_flags = cpu_to_le32(arg->key_flags);
2074 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
2075 	cmd->key_len = cpu_to_le32(arg->key_len);
2076 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
2077 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
2078 
2079 	if (arg->key_rsc_counter)
2080 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
2081 
2082 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2083 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
2084 	memcpy(tlv->value, arg->key_data, arg->key_len);
2085 
2086 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2087 		   "WMI vdev install key idx %d cipher %d len %d\n",
2088 		   arg->key_idx, arg->key_cipher, arg->key_len);
2089 
2090 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
2091 	if (ret) {
2092 		ath12k_warn(ar->ab,
2093 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2094 		dev_kfree_skb(skb);
2095 	}
2096 
2097 	return ret;
2098 }
2099 
2100 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2101 				       struct ath12k_wmi_peer_assoc_arg *arg,
2102 				       bool hw_crypto_disabled)
2103 {
2104 	cmd->peer_flags = 0;
2105 	cmd->peer_flags_ext = 0;
2106 
2107 	if (arg->is_wme_set) {
2108 		if (arg->qos_flag)
2109 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2110 		if (arg->apsd_flag)
2111 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2112 		if (arg->ht_flag)
2113 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2114 		if (arg->bw_40)
2115 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2116 		if (arg->bw_80)
2117 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2118 		if (arg->bw_160)
2119 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2120 		if (arg->bw_320)
2121 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2122 
2123 		/* Typically if STBC is enabled for VHT it should be enabled
2124 		 * for HT as well
2125 		 **/
2126 		if (arg->stbc_flag)
2127 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2128 
2129 		/* Typically if LDPC is enabled for VHT it should be enabled
2130 		 * for HT as well
2131 		 **/
2132 		if (arg->ldpc_flag)
2133 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2134 
2135 		if (arg->static_mimops_flag)
2136 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2137 		if (arg->dynamic_mimops_flag)
2138 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2139 		if (arg->spatial_mux_flag)
2140 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2141 		if (arg->vht_flag)
2142 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2143 		if (arg->he_flag)
2144 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2145 		if (arg->twt_requester)
2146 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2147 		if (arg->twt_responder)
2148 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2149 		if (arg->eht_flag)
2150 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2151 	}
2152 
2153 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2154 	 * (during re-association).
2155 	 * Authorization will be done for these modes on key installation.
2156 	 */
2157 	if (arg->auth_flag)
2158 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2159 	if (arg->need_ptk_4_way) {
2160 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2161 		if (!hw_crypto_disabled && arg->is_assoc)
2162 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2163 	}
2164 	if (arg->need_gtk_2_way)
2165 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2166 	/* safe mode bypass the 4-way handshake */
2167 	if (arg->safe_mode_enabled)
2168 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2169 						 WMI_PEER_NEED_GTK_2_WAY));
2170 
2171 	if (arg->is_pmf_enabled)
2172 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2173 
2174 	/* Disable AMSDU for station transmit, if user configures it */
2175 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2176 	 * it
2177 	 * if (arg->amsdu_disable) Add after FW support
2178 	 **/
2179 
2180 	/* Target asserts if node is marked HT and all MCS is set to 0.
2181 	 * Mark the node as non-HT if all the mcs rates are disabled through
2182 	 * iwpriv
2183 	 **/
2184 	if (arg->peer_ht_rates.num_rates == 0)
2185 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2186 }
2187 
2188 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2189 				   struct ath12k_wmi_peer_assoc_arg *arg)
2190 {
2191 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2192 	struct wmi_peer_assoc_complete_cmd *cmd;
2193 	struct ath12k_wmi_vht_rate_set_params *mcs;
2194 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2195 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2196 	struct wmi_peer_assoc_mlo_params *ml_params;
2197 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2198 	struct sk_buff *skb;
2199 	struct wmi_tlv *tlv;
2200 	void *ptr;
2201 	u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
2202 	u32 peer_ht_rates_align, eml_trans_timeout;
2203 	int i, ret, len;
2204 	u16 eml_cap;
2205 	__le32 v;
2206 
2207 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2208 					  sizeof(u32));
2209 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2210 				      sizeof(u32));
2211 
2212 	len = sizeof(*cmd) +
2213 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2214 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2215 	      sizeof(*mcs) + TLV_HDR_SIZE +
2216 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2217 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2218 
2219 	if (arg->ml.enabled)
2220 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2221 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2222 	else
2223 		len += (2 * TLV_HDR_SIZE);
2224 
2225 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2226 	if (!skb)
2227 		return -ENOMEM;
2228 
2229 	ptr = skb->data;
2230 
2231 	cmd = ptr;
2232 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2233 						 sizeof(*cmd));
2234 
2235 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2236 
2237 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2238 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2239 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2240 
2241 	ath12k_wmi_copy_peer_flags(cmd, arg,
2242 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2243 					    &ar->ab->dev_flags));
2244 
2245 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2246 
2247 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2248 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2249 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2250 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2251 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2252 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2253 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2254 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2255 
2256 	/* Update 11ax capabilities */
2257 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2258 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2259 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2260 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2261 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2262 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2263 		cmd->peer_he_cap_phy[i] =
2264 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2265 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2266 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2267 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2268 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2269 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2270 
2271 	/* Update 11be capabilities */
2272 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2273 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2274 		       0);
2275 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2276 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2277 		       0);
2278 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2279 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2280 
2281 	/* Update peer legacy rate information */
2282 	ptr += sizeof(*cmd);
2283 
2284 	tlv = ptr;
2285 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2286 
2287 	ptr += TLV_HDR_SIZE;
2288 
2289 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2290 	memcpy(ptr, arg->peer_legacy_rates.rates,
2291 	       arg->peer_legacy_rates.num_rates);
2292 
2293 	/* Update peer HT rate information */
2294 	ptr += peer_legacy_rates_align;
2295 
2296 	tlv = ptr;
2297 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2298 	ptr += TLV_HDR_SIZE;
2299 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2300 	memcpy(ptr, arg->peer_ht_rates.rates,
2301 	       arg->peer_ht_rates.num_rates);
2302 
2303 	/* VHT Rates */
2304 	ptr += peer_ht_rates_align;
2305 
2306 	mcs = ptr;
2307 
2308 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2309 						 sizeof(*mcs));
2310 
2311 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2312 
2313 	/* Update bandwidth-NSS mapping */
2314 	cmd->peer_bw_rxnss_override = 0;
2315 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2316 
2317 	if (arg->vht_capable) {
2318 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2319 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2320 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2321 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2322 	}
2323 
2324 	/* HE Rates */
2325 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2326 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2327 
2328 	ptr += sizeof(*mcs);
2329 
2330 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2331 
2332 	tlv = ptr;
2333 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2334 	ptr += TLV_HDR_SIZE;
2335 
2336 	/* Loop through the HE rate set */
2337 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2338 		he_mcs = ptr;
2339 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2340 							    sizeof(*he_mcs));
2341 
2342 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2343 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2344 		ptr += sizeof(*he_mcs);
2345 	}
2346 
2347 	tlv = ptr;
2348 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2349 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2350 	ptr += TLV_HDR_SIZE;
2351 	if (!len)
2352 		goto skip_ml_params;
2353 
2354 	ml_params = ptr;
2355 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2356 						       len);
2357 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2358 
2359 	if (arg->ml.assoc_link)
2360 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2361 
2362 	if (arg->ml.primary_umac)
2363 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2364 
2365 	if (arg->ml.logical_link_idx_valid)
2366 		ml_params->flags |=
2367 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2368 
2369 	if (arg->ml.peer_id_valid)
2370 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2371 
2372 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2373 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2374 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2375 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2376 
2377 	eml_cap = arg->ml.eml_cap;
2378 	if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
2379 		/* Padding delay */
2380 		eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
2381 		ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
2382 		/* Transition delay */
2383 		eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
2384 		ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
2385 		/* Transition timeout */
2386 		eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
2387 		ml_params->emlsr_trans_timeout_us =
2388 					cpu_to_le32(eml_trans_timeout);
2389 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
2390 			   arg->peer_mac, eml_pad_delay, eml_trans_delay,
2391 			   eml_trans_timeout);
2392 	}
2393 
2394 	ptr += sizeof(*ml_params);
2395 
2396 skip_ml_params:
2397 	/* Loop through the EHT rate set */
2398 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2399 	tlv = ptr;
2400 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2401 	ptr += TLV_HDR_SIZE;
2402 
2403 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2404 		eht_mcs = ptr;
2405 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
2406 							     sizeof(*eht_mcs));
2407 
2408 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2409 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2410 		ptr += sizeof(*eht_mcs);
2411 	}
2412 
2413 	/* Update MCS15 capability */
2414 	if (arg->eht_disable_mcs15)
2415 		cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
2416 
2417 	tlv = ptr;
2418 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2419 	/* fill ML Partner links */
2420 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2421 	ptr += TLV_HDR_SIZE;
2422 
2423 	if (len == 0)
2424 		goto send;
2425 
2426 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2427 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2428 
2429 		partner_info = ptr;
2430 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2431 								  sizeof(*partner_info));
2432 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2433 		partner_info->hw_link_id =
2434 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2435 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2436 
2437 		if (arg->ml.partner_info[i].assoc_link)
2438 			partner_info->flags |=
2439 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2440 
2441 		if (arg->ml.partner_info[i].primary_umac)
2442 			partner_info->flags |=
2443 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2444 
2445 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2446 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2447 			partner_info->flags |= v;
2448 		}
2449 
2450 		partner_info->logical_link_idx =
2451 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2452 		ptr += sizeof(*partner_info);
2453 	}
2454 
2455 send:
2456 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2457 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
2458 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2459 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2460 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2461 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2462 		   cmd->peer_mpdu_density,
2463 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2464 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2465 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2466 		   cmd->peer_he_cap_phy[2],
2467 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2468 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2469 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2470 		   cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
2471 
2472 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2473 	if (ret) {
2474 		ath12k_warn(ar->ab,
2475 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2476 		dev_kfree_skb(skb);
2477 	}
2478 
2479 	return ret;
2480 }
2481 
2482 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2483 				struct ath12k_wmi_scan_req_arg *arg)
2484 {
2485 	/* setup commonly used values */
2486 	arg->scan_req_id = 1;
2487 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2488 	arg->dwell_time_active = 50;
2489 	arg->dwell_time_active_2g = 0;
2490 	arg->dwell_time_passive = 150;
2491 	arg->dwell_time_active_6g = 70;
2492 	arg->dwell_time_passive_6g = 70;
2493 	arg->min_rest_time = 50;
2494 	arg->max_rest_time = 500;
2495 	arg->repeat_probe_time = 0;
2496 	arg->probe_spacing_time = 0;
2497 	arg->idle_time = 0;
2498 	arg->max_scan_time = 20000;
2499 	arg->probe_delay = 5;
2500 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2501 				  WMI_SCAN_EVENT_COMPLETED |
2502 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2503 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2504 				  WMI_SCAN_EVENT_DEQUEUED;
2505 	arg->scan_f_chan_stat_evnt = 1;
2506 	arg->num_bssid = 1;
2507 
2508 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2509 	 * ZEROs in probe request
2510 	 */
2511 	eth_broadcast_addr(arg->bssid_list[0].addr);
2512 }
2513 
2514 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2515 						   struct ath12k_wmi_scan_req_arg *arg)
2516 {
2517 	/* Scan events subscription */
2518 	if (arg->scan_ev_started)
2519 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2520 	if (arg->scan_ev_completed)
2521 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2522 	if (arg->scan_ev_bss_chan)
2523 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2524 	if (arg->scan_ev_foreign_chan)
2525 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2526 	if (arg->scan_ev_dequeued)
2527 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2528 	if (arg->scan_ev_preempted)
2529 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2530 	if (arg->scan_ev_start_failed)
2531 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2532 	if (arg->scan_ev_restarted)
2533 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2534 	if (arg->scan_ev_foreign_chn_exit)
2535 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2536 	if (arg->scan_ev_suspended)
2537 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2538 	if (arg->scan_ev_resumed)
2539 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2540 
2541 	/** Set scan control flags */
2542 	cmd->scan_ctrl_flags = 0;
2543 	if (arg->scan_f_passive)
2544 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2545 	if (arg->scan_f_strict_passive_pch)
2546 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2547 	if (arg->scan_f_promisc_mode)
2548 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2549 	if (arg->scan_f_capture_phy_err)
2550 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2551 	if (arg->scan_f_half_rate)
2552 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2553 	if (arg->scan_f_quarter_rate)
2554 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2555 	if (arg->scan_f_cck_rates)
2556 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2557 	if (arg->scan_f_ofdm_rates)
2558 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2559 	if (arg->scan_f_chan_stat_evnt)
2560 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2561 	if (arg->scan_f_filter_prb_req)
2562 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2563 	if (arg->scan_f_bcast_probe)
2564 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2565 	if (arg->scan_f_offchan_mgmt_tx)
2566 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2567 	if (arg->scan_f_offchan_data_tx)
2568 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2569 	if (arg->scan_f_force_active_dfs_chn)
2570 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2571 	if (arg->scan_f_add_tpc_ie_in_probe)
2572 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2573 	if (arg->scan_f_add_ds_ie_in_probe)
2574 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2575 	if (arg->scan_f_add_spoofed_mac_in_probe)
2576 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2577 	if (arg->scan_f_add_rand_seq_in_probe)
2578 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2579 	if (arg->scan_f_en_ie_whitelist_in_probe)
2580 		cmd->scan_ctrl_flags |=
2581 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2582 
2583 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2584 						 WMI_SCAN_DWELL_MODE_MASK);
2585 }
2586 
2587 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2588 				   struct ath12k_wmi_scan_req_arg *arg)
2589 {
2590 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2591 	struct wmi_start_scan_cmd *cmd;
2592 	struct ath12k_wmi_ssid_params *ssid = NULL;
2593 	struct ath12k_wmi_mac_addr_params *bssid;
2594 	struct sk_buff *skb;
2595 	struct wmi_tlv *tlv;
2596 	void *ptr;
2597 	int i, ret, len;
2598 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2599 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2600 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2601 
2602 	len = sizeof(*cmd);
2603 
2604 	len += TLV_HDR_SIZE;
2605 	if (arg->num_chan)
2606 		len += arg->num_chan * sizeof(u32);
2607 
2608 	len += TLV_HDR_SIZE;
2609 	if (arg->num_ssids)
2610 		len += arg->num_ssids * sizeof(*ssid);
2611 
2612 	len += TLV_HDR_SIZE;
2613 	if (arg->num_bssid)
2614 		len += sizeof(*bssid) * arg->num_bssid;
2615 
2616 	if (arg->num_hint_bssid)
2617 		len += TLV_HDR_SIZE +
2618 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2619 
2620 	if (arg->num_hint_s_ssid)
2621 		len += TLV_HDR_SIZE +
2622 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2623 
2624 	len += TLV_HDR_SIZE;
2625 	if (arg->extraie.len)
2626 		extraie_len_with_pad =
2627 			roundup(arg->extraie.len, sizeof(u32));
2628 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2629 		len += extraie_len_with_pad;
2630 	} else {
2631 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2632 			    arg->extraie.len);
2633 		extraie_len_with_pad = 0;
2634 	}
2635 
2636 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2637 	if (!skb)
2638 		return -ENOMEM;
2639 
2640 	ptr = skb->data;
2641 
2642 	cmd = ptr;
2643 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2644 						 sizeof(*cmd));
2645 
2646 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2647 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2648 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2649 	if (ar->state_11d == ATH12K_11D_PREPARING)
2650 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
2651 	else
2652 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2653 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2654 
2655 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2656 
2657 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2658 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2659 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2660 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2661 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2662 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2663 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2664 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2665 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2666 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2667 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2668 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2669 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2670 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2671 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2672 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2673 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2674 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2675 
2676 	ptr += sizeof(*cmd);
2677 
2678 	len = arg->num_chan * sizeof(u32);
2679 
2680 	tlv = ptr;
2681 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2682 	ptr += TLV_HDR_SIZE;
2683 	tmp_ptr = (u32 *)ptr;
2684 
2685 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2686 
2687 	ptr += len;
2688 
2689 	len = arg->num_ssids * sizeof(*ssid);
2690 	tlv = ptr;
2691 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2692 
2693 	ptr += TLV_HDR_SIZE;
2694 
2695 	if (arg->num_ssids) {
2696 		ssid = ptr;
2697 		for (i = 0; i < arg->num_ssids; ++i) {
2698 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2699 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2700 			       arg->ssid[i].ssid_len);
2701 			ssid++;
2702 		}
2703 	}
2704 
2705 	ptr += (arg->num_ssids * sizeof(*ssid));
2706 	len = arg->num_bssid * sizeof(*bssid);
2707 	tlv = ptr;
2708 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2709 
2710 	ptr += TLV_HDR_SIZE;
2711 	bssid = ptr;
2712 
2713 	if (arg->num_bssid) {
2714 		for (i = 0; i < arg->num_bssid; ++i) {
2715 			ether_addr_copy(bssid->addr,
2716 					arg->bssid_list[i].addr);
2717 			bssid++;
2718 		}
2719 	}
2720 
2721 	ptr += arg->num_bssid * sizeof(*bssid);
2722 
2723 	len = extraie_len_with_pad;
2724 	tlv = ptr;
2725 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2726 	ptr += TLV_HDR_SIZE;
2727 
2728 	if (extraie_len_with_pad)
2729 		memcpy(ptr, arg->extraie.ptr,
2730 		       arg->extraie.len);
2731 
2732 	ptr += extraie_len_with_pad;
2733 
2734 	if (arg->num_hint_s_ssid) {
2735 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2736 		tlv = ptr;
2737 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2738 		ptr += TLV_HDR_SIZE;
2739 		s_ssid = ptr;
2740 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2741 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2742 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2743 			s_ssid++;
2744 		}
2745 		ptr += len;
2746 	}
2747 
2748 	if (arg->num_hint_bssid) {
2749 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2750 		tlv = ptr;
2751 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2752 		ptr += TLV_HDR_SIZE;
2753 		hint_bssid = ptr;
2754 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2755 			hint_bssid->freq_flags =
2756 				arg->hint_bssid[i].freq_flags;
2757 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2758 					&hint_bssid->bssid.addr[0]);
2759 			hint_bssid++;
2760 		}
2761 	}
2762 
2763 	ret = ath12k_wmi_cmd_send(wmi, skb,
2764 				  WMI_START_SCAN_CMDID);
2765 	if (ret) {
2766 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2767 		dev_kfree_skb(skb);
2768 	}
2769 
2770 	return ret;
2771 }
2772 
2773 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2774 				  struct ath12k_wmi_scan_cancel_arg *arg)
2775 {
2776 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2777 	struct wmi_stop_scan_cmd *cmd;
2778 	struct sk_buff *skb;
2779 	int ret;
2780 
2781 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2782 	if (!skb)
2783 		return -ENOMEM;
2784 
2785 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2786 
2787 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2788 						 sizeof(*cmd));
2789 
2790 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2791 	cmd->requestor = cpu_to_le32(arg->requester);
2792 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2793 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2794 	/* stop the scan with the corresponding scan_id */
2795 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2796 		/* Cancelling all scans */
2797 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2798 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2799 		/* Cancelling VAP scans */
2800 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2801 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2802 		/* Cancelling specific scan */
2803 		cmd->req_type = WMI_SCAN_STOP_ONE;
2804 	} else {
2805 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2806 			    arg->req_type);
2807 		dev_kfree_skb(skb);
2808 		return -EINVAL;
2809 	}
2810 
2811 	ret = ath12k_wmi_cmd_send(wmi, skb,
2812 				  WMI_STOP_SCAN_CMDID);
2813 	if (ret) {
2814 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2815 		dev_kfree_skb(skb);
2816 	}
2817 
2818 	return ret;
2819 }
2820 
2821 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2822 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2823 {
2824 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2825 	struct wmi_scan_chan_list_cmd *cmd;
2826 	struct sk_buff *skb;
2827 	struct ath12k_wmi_channel_params *chan_info;
2828 	struct ath12k_wmi_channel_arg *channel_arg;
2829 	struct wmi_tlv *tlv;
2830 	void *ptr;
2831 	int i, ret, len;
2832 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2833 	__le32 *reg1, *reg2;
2834 
2835 	channel_arg = &arg->channel[0];
2836 	while (arg->nallchans) {
2837 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2838 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2839 			sizeof(*chan_info);
2840 
2841 		num_send_chans = min(arg->nallchans, max_chan_limit);
2842 
2843 		arg->nallchans -= num_send_chans;
2844 		len += sizeof(*chan_info) * num_send_chans;
2845 
2846 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2847 		if (!skb)
2848 			return -ENOMEM;
2849 
2850 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2851 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2852 							 sizeof(*cmd));
2853 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2854 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2855 		if (num_sends)
2856 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2857 
2858 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2859 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2860 			   num_send_chans, len, cmd->pdev_id, num_sends);
2861 
2862 		ptr = skb->data + sizeof(*cmd);
2863 
2864 		len = sizeof(*chan_info) * num_send_chans;
2865 		tlv = ptr;
2866 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2867 						     len);
2868 		ptr += TLV_HDR_SIZE;
2869 
2870 		for (i = 0; i < num_send_chans; ++i) {
2871 			chan_info = ptr;
2872 			memset(chan_info, 0, sizeof(*chan_info));
2873 			len = sizeof(*chan_info);
2874 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2875 								       len);
2876 
2877 			reg1 = &chan_info->reg_info_1;
2878 			reg2 = &chan_info->reg_info_2;
2879 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2880 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2881 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2882 
2883 			if (channel_arg->is_chan_passive)
2884 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2885 			if (channel_arg->allow_he)
2886 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2887 			else if (channel_arg->allow_vht)
2888 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2889 			else if (channel_arg->allow_ht)
2890 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2891 			if (channel_arg->half_rate)
2892 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2893 			if (channel_arg->quarter_rate)
2894 				chan_info->info |=
2895 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2896 
2897 			if (channel_arg->psc_channel)
2898 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2899 
2900 			if (channel_arg->dfs_set)
2901 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2902 
2903 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2904 							    WMI_CHAN_INFO_MODE);
2905 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2906 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2907 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2908 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2909 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2910 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2911 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2912 						  WMI_CHAN_REG_INFO1_REG_CLS);
2913 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2914 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2915 			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
2916 						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
2917 
2918 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2919 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2920 				   i, chan_info->mhz, chan_info->info);
2921 
2922 			ptr += sizeof(*chan_info);
2923 
2924 			channel_arg++;
2925 		}
2926 
2927 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2928 		if (ret) {
2929 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2930 			dev_kfree_skb(skb);
2931 			return ret;
2932 		}
2933 
2934 		num_sends++;
2935 	}
2936 
2937 	return 0;
2938 }
2939 
2940 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2941 				   struct wmi_wmm_params_all_arg *param)
2942 {
2943 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2944 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2945 	struct wmi_wmm_params *wmm_param;
2946 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2947 	struct sk_buff *skb;
2948 	int ret, ac;
2949 
2950 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2951 	if (!skb)
2952 		return -ENOMEM;
2953 
2954 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2955 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2956 						 sizeof(*cmd));
2957 
2958 	cmd->vdev_id = cpu_to_le32(vdev_id);
2959 	cmd->wmm_param_type = 0;
2960 
2961 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2962 		switch (ac) {
2963 		case WME_AC_BE:
2964 			wmi_wmm_arg = &param->ac_be;
2965 			break;
2966 		case WME_AC_BK:
2967 			wmi_wmm_arg = &param->ac_bk;
2968 			break;
2969 		case WME_AC_VI:
2970 			wmi_wmm_arg = &param->ac_vi;
2971 			break;
2972 		case WME_AC_VO:
2973 			wmi_wmm_arg = &param->ac_vo;
2974 			break;
2975 		}
2976 
2977 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2978 		wmm_param->tlv_header =
2979 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2980 					       sizeof(*wmm_param));
2981 
2982 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2983 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2984 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2985 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2986 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2987 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2988 
2989 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2990 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2991 			   ac, wmm_param->aifs, wmm_param->cwmin,
2992 			   wmm_param->cwmax, wmm_param->txoplimit,
2993 			   wmm_param->acm, wmm_param->no_ack);
2994 	}
2995 	ret = ath12k_wmi_cmd_send(wmi, skb,
2996 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2997 	if (ret) {
2998 		ath12k_warn(ar->ab,
2999 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
3000 		dev_kfree_skb(skb);
3001 	}
3002 
3003 	return ret;
3004 }
3005 
3006 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
3007 						  u32 pdev_id)
3008 {
3009 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3010 	struct wmi_dfs_phyerr_offload_cmd *cmd;
3011 	struct sk_buff *skb;
3012 	int ret;
3013 
3014 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3015 	if (!skb)
3016 		return -ENOMEM;
3017 
3018 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
3019 	cmd->tlv_header =
3020 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
3021 				       sizeof(*cmd));
3022 
3023 	cmd->pdev_id = cpu_to_le32(pdev_id);
3024 
3025 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3026 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
3027 
3028 	ret = ath12k_wmi_cmd_send(wmi, skb,
3029 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
3030 	if (ret) {
3031 		ath12k_warn(ar->ab,
3032 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
3033 		dev_kfree_skb(skb);
3034 	}
3035 
3036 	return ret;
3037 }
3038 
3039 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
3040 			    const u8 *buf, size_t buf_len)
3041 {
3042 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3043 	struct wmi_pdev_set_bios_interface_cmd *cmd;
3044 	struct wmi_tlv *tlv;
3045 	struct sk_buff *skb;
3046 	u8 *ptr;
3047 	u32 len, len_aligned;
3048 	int ret;
3049 
3050 	len_aligned = roundup(buf_len, sizeof(u32));
3051 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
3052 
3053 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3054 	if (!skb)
3055 		return -ENOMEM;
3056 
3057 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
3058 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
3059 						 sizeof(*cmd));
3060 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3061 	cmd->param_type_id = cpu_to_le32(param_id);
3062 	cmd->length = cpu_to_le32(buf_len);
3063 
3064 	ptr = skb->data + sizeof(*cmd);
3065 	tlv = (struct wmi_tlv *)ptr;
3066 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
3067 	ptr += TLV_HDR_SIZE;
3068 	memcpy(ptr, buf, buf_len);
3069 
3070 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3071 				  skb,
3072 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
3073 	if (ret) {
3074 		ath12k_warn(ab,
3075 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
3076 			    param_id, ret);
3077 		dev_kfree_skb(skb);
3078 	}
3079 
3080 	return 0;
3081 }
3082 
3083 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
3084 {
3085 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3086 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
3087 	struct wmi_tlv *tlv;
3088 	struct sk_buff *skb;
3089 	int ret;
3090 	u8 *buf_ptr;
3091 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
3092 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
3093 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
3094 
3095 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
3096 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
3097 					      sizeof(u32));
3098 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
3099 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
3100 
3101 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3102 	if (!skb)
3103 		return -ENOMEM;
3104 
3105 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
3106 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
3107 						 sizeof(*cmd));
3108 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3109 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3110 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3111 
3112 	buf_ptr = skb->data + sizeof(*cmd);
3113 	tlv = (struct wmi_tlv *)buf_ptr;
3114 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3115 					 sar_table_len_aligned);
3116 	buf_ptr += TLV_HDR_SIZE;
3117 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3118 
3119 	buf_ptr += sar_table_len_aligned;
3120 	tlv = (struct wmi_tlv *)buf_ptr;
3121 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3122 					 sar_dbs_backoff_len_aligned);
3123 	buf_ptr += TLV_HDR_SIZE;
3124 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3125 
3126 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3127 				  skb,
3128 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3129 	if (ret) {
3130 		ath12k_warn(ab,
3131 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3132 			    ret);
3133 		dev_kfree_skb(skb);
3134 	}
3135 
3136 	return ret;
3137 }
3138 
3139 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3140 {
3141 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3142 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3143 	struct wmi_tlv *tlv;
3144 	struct sk_buff *skb;
3145 	int ret;
3146 	u8 *buf_ptr;
3147 	u32 len, sar_geo_len_aligned;
3148 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3149 
3150 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3151 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3152 
3153 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3154 	if (!skb)
3155 		return -ENOMEM;
3156 
3157 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3158 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3159 						 sizeof(*cmd));
3160 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3161 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3162 
3163 	buf_ptr = skb->data + sizeof(*cmd);
3164 	tlv = (struct wmi_tlv *)buf_ptr;
3165 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3166 	buf_ptr += TLV_HDR_SIZE;
3167 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3168 
3169 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3170 				  skb,
3171 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3172 	if (ret) {
3173 		ath12k_warn(ab,
3174 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3175 			    ret);
3176 		dev_kfree_skb(skb);
3177 	}
3178 
3179 	return ret;
3180 }
3181 
3182 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3183 			  u32 tid, u32 initiator, u32 reason)
3184 {
3185 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3186 	struct wmi_delba_send_cmd *cmd;
3187 	struct sk_buff *skb;
3188 	int ret;
3189 
3190 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3191 	if (!skb)
3192 		return -ENOMEM;
3193 
3194 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3195 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3196 						 sizeof(*cmd));
3197 	cmd->vdev_id = cpu_to_le32(vdev_id);
3198 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3199 	cmd->tid = cpu_to_le32(tid);
3200 	cmd->initiator = cpu_to_le32(initiator);
3201 	cmd->reasoncode = cpu_to_le32(reason);
3202 
3203 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3204 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3205 		   vdev_id, mac, tid, initiator, reason);
3206 
3207 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3208 
3209 	if (ret) {
3210 		ath12k_warn(ar->ab,
3211 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3212 		dev_kfree_skb(skb);
3213 	}
3214 
3215 	return ret;
3216 }
3217 
3218 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3219 			      u32 tid, u32 status)
3220 {
3221 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3222 	struct wmi_addba_setresponse_cmd *cmd;
3223 	struct sk_buff *skb;
3224 	int ret;
3225 
3226 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3227 	if (!skb)
3228 		return -ENOMEM;
3229 
3230 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3231 	cmd->tlv_header =
3232 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3233 				       sizeof(*cmd));
3234 	cmd->vdev_id = cpu_to_le32(vdev_id);
3235 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3236 	cmd->tid = cpu_to_le32(tid);
3237 	cmd->statuscode = cpu_to_le32(status);
3238 
3239 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3240 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3241 		   vdev_id, mac, tid, status);
3242 
3243 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3244 
3245 	if (ret) {
3246 		ath12k_warn(ar->ab,
3247 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3248 		dev_kfree_skb(skb);
3249 	}
3250 
3251 	return ret;
3252 }
3253 
3254 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3255 			  u32 tid, u32 buf_size)
3256 {
3257 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3258 	struct wmi_addba_send_cmd *cmd;
3259 	struct sk_buff *skb;
3260 	int ret;
3261 
3262 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3263 	if (!skb)
3264 		return -ENOMEM;
3265 
3266 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3267 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3268 						 sizeof(*cmd));
3269 	cmd->vdev_id = cpu_to_le32(vdev_id);
3270 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3271 	cmd->tid = cpu_to_le32(tid);
3272 	cmd->buffersize = cpu_to_le32(buf_size);
3273 
3274 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3275 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3276 		   vdev_id, mac, tid, buf_size);
3277 
3278 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3279 
3280 	if (ret) {
3281 		ath12k_warn(ar->ab,
3282 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3283 		dev_kfree_skb(skb);
3284 	}
3285 
3286 	return ret;
3287 }
3288 
3289 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3290 {
3291 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3292 	struct wmi_addba_clear_resp_cmd *cmd;
3293 	struct sk_buff *skb;
3294 	int ret;
3295 
3296 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3297 	if (!skb)
3298 		return -ENOMEM;
3299 
3300 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3301 	cmd->tlv_header =
3302 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3303 				       sizeof(*cmd));
3304 	cmd->vdev_id = cpu_to_le32(vdev_id);
3305 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3306 
3307 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3308 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3309 		   vdev_id, mac);
3310 
3311 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3312 
3313 	if (ret) {
3314 		ath12k_warn(ar->ab,
3315 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3316 		dev_kfree_skb(skb);
3317 	}
3318 
3319 	return ret;
3320 }
3321 
3322 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3323 				     struct ath12k_wmi_init_country_arg *arg)
3324 {
3325 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3326 	struct wmi_init_country_cmd *cmd;
3327 	struct sk_buff *skb;
3328 	int ret;
3329 
3330 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3331 	if (!skb)
3332 		return -ENOMEM;
3333 
3334 	cmd = (struct wmi_init_country_cmd *)skb->data;
3335 	cmd->tlv_header =
3336 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3337 				       sizeof(*cmd));
3338 
3339 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3340 
3341 	switch (arg->flags) {
3342 	case ALPHA_IS_SET:
3343 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3344 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3345 		break;
3346 	case CC_IS_SET:
3347 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3348 		cmd->cc_info.country_code =
3349 			cpu_to_le32(arg->cc_info.country_code);
3350 		break;
3351 	case REGDMN_IS_SET:
3352 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3353 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3354 		break;
3355 	default:
3356 		ret = -EINVAL;
3357 		goto out;
3358 	}
3359 
3360 	ret = ath12k_wmi_cmd_send(wmi, skb,
3361 				  WMI_SET_INIT_COUNTRY_CMDID);
3362 
3363 out:
3364 	if (ret) {
3365 		ath12k_warn(ar->ab,
3366 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3367 			    ret);
3368 		dev_kfree_skb(skb);
3369 	}
3370 
3371 	return ret;
3372 }
3373 
3374 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
3375 					    struct wmi_set_current_country_arg *arg)
3376 {
3377 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3378 	struct wmi_set_current_country_cmd *cmd;
3379 	struct sk_buff *skb;
3380 	int ret;
3381 
3382 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3383 	if (!skb)
3384 		return -ENOMEM;
3385 
3386 	cmd = (struct wmi_set_current_country_cmd *)skb->data;
3387 	cmd->tlv_header =
3388 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
3389 				       sizeof(*cmd));
3390 
3391 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3392 	memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
3393 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
3394 
3395 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3396 		   "set current country pdev id %d alpha2 %c%c\n",
3397 		   ar->pdev->pdev_id,
3398 		   arg->alpha2[0],
3399 		   arg->alpha2[1]);
3400 
3401 	if (ret) {
3402 		ath12k_warn(ar->ab,
3403 			    "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
3404 		dev_kfree_skb(skb);
3405 	}
3406 
3407 	return ret;
3408 }
3409 
3410 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
3411 				       struct wmi_11d_scan_start_arg *arg)
3412 {
3413 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3414 	struct wmi_11d_scan_start_cmd *cmd;
3415 	struct sk_buff *skb;
3416 	int ret;
3417 
3418 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3419 	if (!skb)
3420 		return -ENOMEM;
3421 
3422 	cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
3423 	cmd->tlv_header =
3424 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
3425 				       sizeof(*cmd));
3426 
3427 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3428 	cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
3429 	cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
3430 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
3431 
3432 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3433 		   "send 11d scan start vdev id %d period %d ms internal %d ms\n",
3434 		   arg->vdev_id, arg->scan_period_msec,
3435 		   arg->start_interval_msec);
3436 
3437 	if (ret) {
3438 		ath12k_warn(ar->ab,
3439 			    "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
3440 		dev_kfree_skb(skb);
3441 	}
3442 
3443 	return ret;
3444 }
3445 
3446 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
3447 {
3448 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3449 	struct wmi_11d_scan_stop_cmd *cmd;
3450 	struct sk_buff *skb;
3451 	int ret;
3452 
3453 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3454 	if (!skb)
3455 		return -ENOMEM;
3456 
3457 	cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
3458 	cmd->tlv_header =
3459 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
3460 				       sizeof(*cmd));
3461 
3462 	cmd->vdev_id = cpu_to_le32(vdev_id);
3463 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
3464 
3465 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3466 		   "send 11d scan stop vdev id %d\n",
3467 		   cmd->vdev_id);
3468 
3469 	if (ret) {
3470 		ath12k_warn(ar->ab,
3471 			    "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
3472 		dev_kfree_skb(skb);
3473 	}
3474 
3475 	return ret;
3476 }
3477 
3478 int
3479 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3480 {
3481 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3482 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3483 	struct wmi_twt_enable_params_cmd *cmd;
3484 	struct sk_buff *skb;
3485 	int ret, len;
3486 
3487 	len = sizeof(*cmd);
3488 
3489 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3490 	if (!skb)
3491 		return -ENOMEM;
3492 
3493 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3494 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3495 						 len);
3496 	cmd->pdev_id = cpu_to_le32(pdev_id);
3497 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3498 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3499 	cmd->congestion_thresh_setup =
3500 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3501 	cmd->congestion_thresh_teardown =
3502 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3503 	cmd->congestion_thresh_critical =
3504 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3505 	cmd->interference_thresh_teardown =
3506 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3507 	cmd->interference_thresh_setup =
3508 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3509 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3510 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3511 	cmd->no_of_bcast_mcast_slots =
3512 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3513 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3514 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3515 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3516 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3517 	cmd->remove_sta_slot_interval =
3518 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3519 	/* TODO add MBSSID support */
3520 	cmd->mbss_support = 0;
3521 
3522 	ret = ath12k_wmi_cmd_send(wmi, skb,
3523 				  WMI_TWT_ENABLE_CMDID);
3524 	if (ret) {
3525 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3526 		dev_kfree_skb(skb);
3527 	}
3528 	return ret;
3529 }
3530 
3531 int
3532 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3533 {
3534 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3535 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3536 	struct wmi_twt_disable_params_cmd *cmd;
3537 	struct sk_buff *skb;
3538 	int ret, len;
3539 
3540 	len = sizeof(*cmd);
3541 
3542 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3543 	if (!skb)
3544 		return -ENOMEM;
3545 
3546 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3547 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3548 						 len);
3549 	cmd->pdev_id = cpu_to_le32(pdev_id);
3550 
3551 	ret = ath12k_wmi_cmd_send(wmi, skb,
3552 				  WMI_TWT_DISABLE_CMDID);
3553 	if (ret) {
3554 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3555 		dev_kfree_skb(skb);
3556 	}
3557 	return ret;
3558 }
3559 
3560 int
3561 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3562 			     struct ieee80211_he_obss_pd *he_obss_pd)
3563 {
3564 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3565 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3566 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3567 	struct sk_buff *skb;
3568 	int ret, len;
3569 
3570 	len = sizeof(*cmd);
3571 
3572 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3573 	if (!skb)
3574 		return -ENOMEM;
3575 
3576 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3577 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3578 						 len);
3579 	cmd->vdev_id = cpu_to_le32(vdev_id);
3580 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3581 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3582 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3583 
3584 	ret = ath12k_wmi_cmd_send(wmi, skb,
3585 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3586 	if (ret) {
3587 		ath12k_warn(ab,
3588 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3589 		dev_kfree_skb(skb);
3590 	}
3591 	return ret;
3592 }
3593 
3594 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3595 				  u8 bss_color, u32 period,
3596 				  bool enable)
3597 {
3598 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3599 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3600 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3601 	struct sk_buff *skb;
3602 	int ret, len;
3603 
3604 	len = sizeof(*cmd);
3605 
3606 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3607 	if (!skb)
3608 		return -ENOMEM;
3609 
3610 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3611 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3612 						 len);
3613 	cmd->vdev_id = cpu_to_le32(vdev_id);
3614 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3615 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3616 	cmd->current_bss_color = cpu_to_le32(bss_color);
3617 	cmd->detection_period_ms = cpu_to_le32(period);
3618 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3619 	cmd->free_slot_expiry_time_ms = 0;
3620 	cmd->flags = 0;
3621 
3622 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3623 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3624 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3625 		   cmd->detection_period_ms, cmd->scan_period_ms);
3626 
3627 	ret = ath12k_wmi_cmd_send(wmi, skb,
3628 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3629 	if (ret) {
3630 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3631 		dev_kfree_skb(skb);
3632 	}
3633 	return ret;
3634 }
3635 
3636 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3637 						bool enable)
3638 {
3639 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3640 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3641 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3642 	struct sk_buff *skb;
3643 	int ret, len;
3644 
3645 	len = sizeof(*cmd);
3646 
3647 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3648 	if (!skb)
3649 		return -ENOMEM;
3650 
3651 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3652 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3653 						 len);
3654 	cmd->vdev_id = cpu_to_le32(vdev_id);
3655 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3656 
3657 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3658 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3659 		   cmd->vdev_id, cmd->enable);
3660 
3661 	ret = ath12k_wmi_cmd_send(wmi, skb,
3662 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3663 	if (ret) {
3664 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3665 		dev_kfree_skb(skb);
3666 	}
3667 	return ret;
3668 }
3669 
3670 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3671 				   struct sk_buff *tmpl)
3672 {
3673 	struct wmi_tlv *tlv;
3674 	struct sk_buff *skb;
3675 	void *ptr;
3676 	int ret, len;
3677 	size_t aligned_len;
3678 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3679 
3680 	aligned_len = roundup(tmpl->len, 4);
3681 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3682 
3683 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3684 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3685 
3686 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3687 	if (!skb)
3688 		return -ENOMEM;
3689 
3690 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3691 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3692 						 sizeof(*cmd));
3693 	cmd->vdev_id = cpu_to_le32(vdev_id);
3694 	cmd->buf_len = cpu_to_le32(tmpl->len);
3695 	ptr = skb->data + sizeof(*cmd);
3696 
3697 	tlv = ptr;
3698 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3699 	memcpy(tlv->value, tmpl->data, tmpl->len);
3700 
3701 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3702 	if (ret) {
3703 		ath12k_warn(ar->ab,
3704 			    "WMI vdev %i failed to send FILS discovery template command\n",
3705 			    vdev_id);
3706 		dev_kfree_skb(skb);
3707 	}
3708 	return ret;
3709 }
3710 
3711 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3712 			       struct sk_buff *tmpl)
3713 {
3714 	struct wmi_probe_tmpl_cmd *cmd;
3715 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3716 	struct wmi_tlv *tlv;
3717 	struct sk_buff *skb;
3718 	void *ptr;
3719 	int ret, len;
3720 	size_t aligned_len = roundup(tmpl->len, 4);
3721 
3722 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3723 		   "WMI vdev %i set probe response template\n", vdev_id);
3724 
3725 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3726 
3727 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3728 	if (!skb)
3729 		return -ENOMEM;
3730 
3731 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3732 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3733 						 sizeof(*cmd));
3734 	cmd->vdev_id = cpu_to_le32(vdev_id);
3735 	cmd->buf_len = cpu_to_le32(tmpl->len);
3736 
3737 	ptr = skb->data + sizeof(*cmd);
3738 
3739 	probe_info = ptr;
3740 	len = sizeof(*probe_info);
3741 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3742 							len);
3743 	probe_info->caps = 0;
3744 	probe_info->erp = 0;
3745 
3746 	ptr += sizeof(*probe_info);
3747 
3748 	tlv = ptr;
3749 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3750 	memcpy(tlv->value, tmpl->data, tmpl->len);
3751 
3752 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3753 	if (ret) {
3754 		ath12k_warn(ar->ab,
3755 			    "WMI vdev %i failed to send probe response template command\n",
3756 			    vdev_id);
3757 		dev_kfree_skb(skb);
3758 	}
3759 	return ret;
3760 }
3761 
3762 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3763 			      bool unsol_bcast_probe_resp_enabled)
3764 {
3765 	struct sk_buff *skb;
3766 	int ret, len;
3767 	struct wmi_fils_discovery_cmd *cmd;
3768 
3769 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3770 		   "WMI vdev %i set %s interval to %u TU\n",
3771 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3772 		   "unsolicited broadcast probe response" : "FILS discovery",
3773 		   interval);
3774 
3775 	len = sizeof(*cmd);
3776 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3777 	if (!skb)
3778 		return -ENOMEM;
3779 
3780 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3781 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3782 						 len);
3783 	cmd->vdev_id = cpu_to_le32(vdev_id);
3784 	cmd->interval = cpu_to_le32(interval);
3785 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3786 
3787 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3788 	if (ret) {
3789 		ath12k_warn(ar->ab,
3790 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3791 			    vdev_id);
3792 		dev_kfree_skb(skb);
3793 	}
3794 	return ret;
3795 }
3796 
3797 static void
3798 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3799 			      struct ath12k_wmi_pdev_band_arg *arg)
3800 {
3801 	u8 i;
3802 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3803 	struct ath12k_pdev *pdev;
3804 
3805 	for (i = 0; i < soc->num_radios; i++) {
3806 		pdev = &soc->pdevs[i];
3807 		hal_reg_cap = &soc->hal_reg_cap[i];
3808 		arg[i].pdev_id = pdev->pdev_id;
3809 
3810 		switch (pdev->cap.supported_bands) {
3811 		case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
3812 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3813 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3814 			break;
3815 		case WMI_HOST_WLAN_2GHZ_CAP:
3816 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3817 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3818 			break;
3819 		case WMI_HOST_WLAN_5GHZ_CAP:
3820 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3821 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3822 			break;
3823 		default:
3824 			break;
3825 		}
3826 	}
3827 }
3828 
3829 static void
3830 ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
3831 				struct ath12k_wmi_resource_config_params *wmi_cfg,
3832 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3833 {
3834 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3835 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3836 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3837 	wmi_cfg->num_offload_reorder_buffs =
3838 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3839 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3840 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3841 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3842 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3843 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3844 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3845 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3846 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3847 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3848 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3849 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3850 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3851 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3852 	wmi_cfg->roam_offload_max_ap_profiles =
3853 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3854 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3855 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3856 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3857 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3858 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3859 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3860 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3861 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3862 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3863 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3864 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3865 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3866 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3867 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3868 	wmi_cfg->num_tdls_conn_table_entries =
3869 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3870 	wmi_cfg->beacon_tx_offload_max_vdev =
3871 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3872 	wmi_cfg->num_multicast_filter_entries =
3873 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3874 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3875 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3876 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3877 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3878 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3879 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3880 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3881 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3882 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3883 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3884 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3885 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3886 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3887 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3888 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3889 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
3890 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3891 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3892 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3893 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3894 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3895 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3896 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3897 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3898 	if (ab->hw_params->reoq_lut_support)
3899 		wmi_cfg->host_service_flags |=
3900 			cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
3901 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3902 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3903 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3904 }
3905 
3906 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3907 				struct ath12k_wmi_init_cmd_arg *arg)
3908 {
3909 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3910 	struct sk_buff *skb;
3911 	struct wmi_init_cmd *cmd;
3912 	struct ath12k_wmi_resource_config_params *cfg;
3913 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3914 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3915 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3916 	struct wmi_tlv *tlv;
3917 	size_t ret, len;
3918 	void *ptr;
3919 	u32 hw_mode_len = 0;
3920 	u16 idx;
3921 
3922 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3923 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3924 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3925 
3926 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3927 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3928 
3929 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3930 	if (!skb)
3931 		return -ENOMEM;
3932 
3933 	cmd = (struct wmi_init_cmd *)skb->data;
3934 
3935 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3936 						 sizeof(*cmd));
3937 
3938 	ptr = skb->data + sizeof(*cmd);
3939 	cfg = ptr;
3940 
3941 	ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
3942 
3943 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3944 						 sizeof(*cfg));
3945 
3946 	ptr += sizeof(*cfg);
3947 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3948 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3949 
3950 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3951 		host_mem_chunks[idx].tlv_header =
3952 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3953 					   len);
3954 
3955 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3956 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3957 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3958 
3959 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3960 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3961 			   arg->mem_chunks[idx].req_id,
3962 			   (u64)arg->mem_chunks[idx].paddr,
3963 			   arg->mem_chunks[idx].len);
3964 	}
3965 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3966 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3967 
3968 	/* num_mem_chunks is zero */
3969 	tlv = ptr;
3970 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3971 	ptr += TLV_HDR_SIZE + len;
3972 
3973 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3974 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3975 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3976 							     sizeof(*hw_mode));
3977 
3978 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3979 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3980 
3981 		ptr += sizeof(*hw_mode);
3982 
3983 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3984 		tlv = ptr;
3985 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3986 
3987 		ptr += TLV_HDR_SIZE;
3988 		len = sizeof(*band_to_mac);
3989 
3990 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3991 			band_to_mac = (void *)ptr;
3992 
3993 			band_to_mac->tlv_header =
3994 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3995 						       len);
3996 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3997 			band_to_mac->start_freq =
3998 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3999 			band_to_mac->end_freq =
4000 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
4001 			ptr += sizeof(*band_to_mac);
4002 		}
4003 	}
4004 
4005 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
4006 	if (ret) {
4007 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
4008 		dev_kfree_skb(skb);
4009 	}
4010 
4011 	return ret;
4012 }
4013 
4014 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
4015 			    int pdev_id)
4016 {
4017 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
4018 	struct sk_buff *skb;
4019 	int ret;
4020 
4021 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4022 	if (!skb)
4023 		return -ENOMEM;
4024 
4025 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
4026 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
4027 						 sizeof(*cmd));
4028 
4029 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
4030 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
4031 
4032 	cmd->pdev_id = cpu_to_le32(pdev_id);
4033 
4034 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4035 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
4036 
4037 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
4038 	if (ret) {
4039 		ath12k_warn(ar->ab,
4040 			    "failed to send lro cfg req wmi cmd\n");
4041 		goto err;
4042 	}
4043 
4044 	return 0;
4045 err:
4046 	dev_kfree_skb(skb);
4047 	return ret;
4048 }
4049 
4050 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
4051 {
4052 	unsigned long time_left;
4053 
4054 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
4055 						WMI_SERVICE_READY_TIMEOUT_HZ);
4056 	if (!time_left)
4057 		return -ETIMEDOUT;
4058 
4059 	return 0;
4060 }
4061 
4062 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
4063 {
4064 	unsigned long time_left;
4065 
4066 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
4067 						WMI_SERVICE_READY_TIMEOUT_HZ);
4068 	if (!time_left)
4069 		return -ETIMEDOUT;
4070 
4071 	return 0;
4072 }
4073 
4074 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
4075 			   enum wmi_host_hw_mode_config_type mode)
4076 {
4077 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
4078 	struct sk_buff *skb;
4079 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4080 	int len;
4081 	int ret;
4082 
4083 	len = sizeof(*cmd);
4084 
4085 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
4086 	if (!skb)
4087 		return -ENOMEM;
4088 
4089 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
4090 
4091 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
4092 						 sizeof(*cmd));
4093 
4094 	cmd->pdev_id = WMI_PDEV_ID_SOC;
4095 	cmd->hw_mode_index = cpu_to_le32(mode);
4096 
4097 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
4098 	if (ret) {
4099 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
4100 		dev_kfree_skb(skb);
4101 	}
4102 
4103 	return ret;
4104 }
4105 
4106 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
4107 {
4108 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4109 	struct ath12k_wmi_init_cmd_arg arg = {};
4110 
4111 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
4112 		     ab->wmi_ab.svc_map))
4113 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
4114 
4115 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
4116 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
4117 
4118 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
4119 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
4120 	arg.mem_chunks = wmi_ab->mem_chunks;
4121 
4122 	if (ab->hw_params->single_pdev_only)
4123 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
4124 
4125 	arg.num_band_to_mac = ab->num_radios;
4126 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
4127 
4128 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
4129 
4130 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
4131 }
4132 
4133 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
4134 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
4135 {
4136 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
4137 	struct sk_buff *skb;
4138 	int ret;
4139 
4140 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4141 	if (!skb)
4142 		return -ENOMEM;
4143 
4144 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
4145 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
4146 						 sizeof(*cmd));
4147 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
4148 	cmd->scan_count = cpu_to_le32(arg->scan_count);
4149 	cmd->scan_period = cpu_to_le32(arg->scan_period);
4150 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
4151 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
4152 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
4153 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
4154 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
4155 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
4156 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
4157 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
4158 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
4159 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
4160 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
4161 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
4162 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
4163 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
4164 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
4165 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
4166 
4167 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4168 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
4169 		   arg->vdev_id);
4170 
4171 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4172 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4173 	if (ret) {
4174 		ath12k_warn(ar->ab,
4175 			    "failed to send spectral scan config wmi cmd\n");
4176 		goto err;
4177 	}
4178 
4179 	return 0;
4180 err:
4181 	dev_kfree_skb(skb);
4182 	return ret;
4183 }
4184 
4185 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
4186 				    u32 trigger, u32 enable)
4187 {
4188 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
4189 	struct sk_buff *skb;
4190 	int ret;
4191 
4192 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4193 	if (!skb)
4194 		return -ENOMEM;
4195 
4196 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
4197 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
4198 						 sizeof(*cmd));
4199 
4200 	cmd->vdev_id = cpu_to_le32(vdev_id);
4201 	cmd->trigger_cmd = cpu_to_le32(trigger);
4202 	cmd->enable_cmd = cpu_to_le32(enable);
4203 
4204 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4205 		   "WMI spectral enable cmd vdev id 0x%x\n",
4206 		   vdev_id);
4207 
4208 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4209 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4210 	if (ret) {
4211 		ath12k_warn(ar->ab,
4212 			    "failed to send spectral enable wmi cmd\n");
4213 		goto err;
4214 	}
4215 
4216 	return 0;
4217 err:
4218 	dev_kfree_skb(skb);
4219 	return ret;
4220 }
4221 
4222 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
4223 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
4224 {
4225 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4226 	struct sk_buff *skb;
4227 	int ret;
4228 
4229 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4230 	if (!skb)
4231 		return -ENOMEM;
4232 
4233 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4234 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4235 						 sizeof(*cmd));
4236 
4237 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4238 	cmd->module_id = cpu_to_le32(arg->module_id);
4239 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4240 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4241 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4242 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4243 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4244 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4245 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4246 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4247 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4248 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4249 
4250 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4251 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4252 		   arg->pdev_id);
4253 
4254 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4255 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4256 	if (ret) {
4257 		ath12k_warn(ar->ab,
4258 			    "failed to send dma ring cfg req wmi cmd\n");
4259 		goto err;
4260 	}
4261 
4262 	return 0;
4263 err:
4264 	dev_kfree_skb(skb);
4265 	return ret;
4266 }
4267 
4268 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4269 					  u16 tag, u16 len,
4270 					  const void *ptr, void *data)
4271 {
4272 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4273 
4274 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4275 		return -EPROTO;
4276 
4277 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4278 		return -ENOBUFS;
4279 
4280 	arg->num_buf_entry++;
4281 	return 0;
4282 }
4283 
4284 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4285 					 u16 tag, u16 len,
4286 					 const void *ptr, void *data)
4287 {
4288 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4289 
4290 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4291 		return -EPROTO;
4292 
4293 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4294 		return -ENOBUFS;
4295 
4296 	arg->num_meta++;
4297 
4298 	return 0;
4299 }
4300 
4301 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4302 				    u16 tag, u16 len,
4303 				    const void *ptr, void *data)
4304 {
4305 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4306 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4307 	u32 pdev_id;
4308 	int ret;
4309 
4310 	switch (tag) {
4311 	case WMI_TAG_DMA_BUF_RELEASE:
4312 		fixed = ptr;
4313 		arg->fixed = *fixed;
4314 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4315 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4316 		break;
4317 	case WMI_TAG_ARRAY_STRUCT:
4318 		if (!arg->buf_entry_done) {
4319 			arg->num_buf_entry = 0;
4320 			arg->buf_entry = ptr;
4321 
4322 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4323 						  ath12k_wmi_dma_buf_entry_parse,
4324 						  arg);
4325 			if (ret) {
4326 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4327 					    ret);
4328 				return ret;
4329 			}
4330 
4331 			arg->buf_entry_done = true;
4332 		} else if (!arg->meta_data_done) {
4333 			arg->num_meta = 0;
4334 			arg->meta_data = ptr;
4335 
4336 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4337 						  ath12k_wmi_dma_buf_meta_parse,
4338 						  arg);
4339 			if (ret) {
4340 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4341 					    ret);
4342 				return ret;
4343 			}
4344 
4345 			arg->meta_data_done = true;
4346 		}
4347 		break;
4348 	default:
4349 		break;
4350 	}
4351 	return 0;
4352 }
4353 
4354 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4355 						       struct sk_buff *skb)
4356 {
4357 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4358 	struct ath12k_dbring_buf_release_event param;
4359 	int ret;
4360 
4361 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4362 				  ath12k_wmi_dma_buf_parse,
4363 				  &arg);
4364 	if (ret) {
4365 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4366 		return;
4367 	}
4368 
4369 	param.fixed = arg.fixed;
4370 	param.buf_entry = arg.buf_entry;
4371 	param.num_buf_entry = arg.num_buf_entry;
4372 	param.meta_data = arg.meta_data;
4373 	param.num_meta = arg.num_meta;
4374 
4375 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4376 	if (ret) {
4377 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4378 		return;
4379 	}
4380 }
4381 
4382 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4383 					 u16 tag, u16 len,
4384 					 const void *ptr, void *data)
4385 {
4386 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4387 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4388 	u32 phy_map = 0;
4389 
4390 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4391 		return -EPROTO;
4392 
4393 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4394 		return -ENOBUFS;
4395 
4396 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4397 				   hw_mode_id);
4398 	svc_rdy_ext->n_hw_mode_caps++;
4399 
4400 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4401 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4402 
4403 	return 0;
4404 }
4405 
4406 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4407 				   u16 len, const void *ptr, void *data)
4408 {
4409 	struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info;
4410 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4411 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4412 	enum wmi_host_hw_mode_config_type mode, pref;
4413 	u32 i;
4414 	int ret;
4415 
4416 	svc_rdy_ext->n_hw_mode_caps = 0;
4417 	svc_rdy_ext->hw_mode_caps = ptr;
4418 
4419 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4420 				  ath12k_wmi_hw_mode_caps_parse,
4421 				  svc_rdy_ext);
4422 	if (ret) {
4423 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4424 		return ret;
4425 	}
4426 
4427 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4428 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4429 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4430 
4431 		if (mode >= WMI_HOST_HW_MODE_MAX)
4432 			continue;
4433 
4434 		pref = soc->wmi_ab.preferred_hw_mode;
4435 
4436 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4437 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4438 			soc->wmi_ab.preferred_hw_mode = mode;
4439 		}
4440 	}
4441 
4442 	svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps;
4443 
4444 	ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n",
4445 		   svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode);
4446 
4447 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4448 		return -EINVAL;
4449 
4450 	return 0;
4451 }
4452 
4453 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4454 					 u16 tag, u16 len,
4455 					 const void *ptr, void *data)
4456 {
4457 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4458 
4459 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4460 		return -EPROTO;
4461 
4462 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4463 		return -ENOBUFS;
4464 
4465 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4466 	if (!svc_rdy_ext->n_mac_phy_caps) {
4467 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4468 						    GFP_ATOMIC);
4469 		if (!svc_rdy_ext->mac_phy_caps)
4470 			return -ENOMEM;
4471 	}
4472 
4473 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4474 	svc_rdy_ext->n_mac_phy_caps++;
4475 	return 0;
4476 }
4477 
4478 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4479 					     u16 tag, u16 len,
4480 					     const void *ptr, void *data)
4481 {
4482 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4483 
4484 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4485 		return -EPROTO;
4486 
4487 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4488 		return -ENOBUFS;
4489 
4490 	svc_rdy_ext->n_ext_hal_reg_caps++;
4491 	return 0;
4492 }
4493 
4494 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4495 				       u16 len, const void *ptr, void *data)
4496 {
4497 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4498 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4499 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4500 	int ret;
4501 	u32 i;
4502 
4503 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4504 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4505 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4506 				  ath12k_wmi_ext_hal_reg_caps_parse,
4507 				  svc_rdy_ext);
4508 	if (ret) {
4509 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4510 		return ret;
4511 	}
4512 
4513 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4514 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4515 						      svc_rdy_ext->soc_hal_reg_caps,
4516 						      svc_rdy_ext->ext_hal_reg_caps, i,
4517 						      &reg_cap);
4518 		if (ret) {
4519 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4520 			return ret;
4521 		}
4522 
4523 		if (reg_cap.phy_id >= MAX_RADIOS) {
4524 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4525 			return -EINVAL;
4526 		}
4527 
4528 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4529 	}
4530 	return 0;
4531 }
4532 
4533 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4534 						 u16 len, const void *ptr,
4535 						 void *data)
4536 {
4537 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4538 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4539 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4540 	u32 phy_id_map;
4541 	int pdev_index = 0;
4542 	int ret;
4543 
4544 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4545 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4546 
4547 	soc->num_radios = 0;
4548 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4549 	soc->fw_pdev_count = 0;
4550 
4551 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4552 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4553 							    svc_rdy_ext,
4554 							    hw_mode_id, soc->num_radios,
4555 							    &soc->pdevs[pdev_index]);
4556 		if (ret) {
4557 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4558 				    soc->num_radios);
4559 			return ret;
4560 		}
4561 
4562 		soc->num_radios++;
4563 
4564 		/* For single_pdev_only targets,
4565 		 * save mac_phy capability in the same pdev
4566 		 */
4567 		if (soc->hw_params->single_pdev_only)
4568 			pdev_index = 0;
4569 		else
4570 			pdev_index = soc->num_radios;
4571 
4572 		/* TODO: mac_phy_cap prints */
4573 		phy_id_map >>= 1;
4574 	}
4575 
4576 	if (soc->hw_params->single_pdev_only) {
4577 		soc->num_radios = 1;
4578 		soc->pdevs[0].pdev_id = 0;
4579 	}
4580 
4581 	return 0;
4582 }
4583 
4584 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4585 					  u16 tag, u16 len,
4586 					  const void *ptr, void *data)
4587 {
4588 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4589 
4590 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4591 		return -EPROTO;
4592 
4593 	parse->n_dma_ring_caps++;
4594 	return 0;
4595 }
4596 
4597 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4598 					u32 num_cap)
4599 {
4600 	size_t sz;
4601 	void *ptr;
4602 
4603 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4604 	ptr = kzalloc(sz, GFP_ATOMIC);
4605 	if (!ptr)
4606 		return -ENOMEM;
4607 
4608 	ab->db_caps = ptr;
4609 	ab->num_db_cap = num_cap;
4610 
4611 	return 0;
4612 }
4613 
4614 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4615 {
4616 	kfree(ab->db_caps);
4617 	ab->db_caps = NULL;
4618 	ab->num_db_cap = 0;
4619 }
4620 
4621 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4622 				    u16 len, const void *ptr, void *data)
4623 {
4624 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4625 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4626 	struct ath12k_dbring_cap *dir_buff_caps;
4627 	int ret;
4628 	u32 i;
4629 
4630 	dma_caps_parse->n_dma_ring_caps = 0;
4631 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4632 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4633 				  ath12k_wmi_dma_ring_caps_parse,
4634 				  dma_caps_parse);
4635 	if (ret) {
4636 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4637 		return ret;
4638 	}
4639 
4640 	if (!dma_caps_parse->n_dma_ring_caps)
4641 		return 0;
4642 
4643 	if (ab->num_db_cap) {
4644 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4645 		return 0;
4646 	}
4647 
4648 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4649 	if (ret)
4650 		return ret;
4651 
4652 	dir_buff_caps = ab->db_caps;
4653 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4654 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4655 			ath12k_warn(ab, "Invalid module id %d\n",
4656 				    le32_to_cpu(dma_caps[i].module_id));
4657 			ret = -EINVAL;
4658 			goto free_dir_buff;
4659 		}
4660 
4661 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4662 		dir_buff_caps[i].pdev_id =
4663 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4664 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4665 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4666 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4667 	}
4668 
4669 	return 0;
4670 
4671 free_dir_buff:
4672 	ath12k_wmi_free_dbring_caps(ab);
4673 	return ret;
4674 }
4675 
4676 static void
4677 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab,
4678 			     const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap,
4679 			     struct ath12k_svc_ext_mac_phy_info *mac_phy_info)
4680 {
4681 	mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id);
4682 	mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands);
4683 	mac_phy_info->hw_freq_range.low_2ghz_freq =
4684 					__le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq);
4685 	mac_phy_info->hw_freq_range.high_2ghz_freq =
4686 					__le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq);
4687 	mac_phy_info->hw_freq_range.low_5ghz_freq =
4688 					__le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq);
4689 	mac_phy_info->hw_freq_range.high_5ghz_freq =
4690 					__le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq);
4691 }
4692 
4693 static void
4694 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab,
4695 				 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
4696 {
4697 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
4698 	const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap;
4699 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4700 	struct ath12k_svc_ext_mac_phy_info *mac_phy_info;
4701 	u32 hw_mode_id, phy_bit_map;
4702 	u8 hw_idx;
4703 
4704 	mac_phy_info = &svc_ext_info->mac_phy_info[0];
4705 	mac_phy_cap = svc_rdy_ext->mac_phy_caps;
4706 
4707 	for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) {
4708 		hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx];
4709 		hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id);
4710 		phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map);
4711 
4712 		while (phy_bit_map) {
4713 			ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info);
4714 			mac_phy_info->hw_mode_config_type =
4715 					le32_get_bits(hw_mode_cap->hw_mode_config_type,
4716 						      WMI_HW_MODE_CAP_CFG_TYPE);
4717 			ath12k_dbg(ab, ATH12K_DBG_WMI,
4718 				   "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n",
4719 				   hw_idx, hw_mode_id,
4720 				   mac_phy_info->hw_mode_config_type,
4721 				   mac_phy_info->supported_bands, mac_phy_info->phy_id,
4722 				   mac_phy_info->hw_freq_range.low_2ghz_freq,
4723 				   mac_phy_info->hw_freq_range.high_2ghz_freq,
4724 				   mac_phy_info->hw_freq_range.low_5ghz_freq,
4725 				   mac_phy_info->hw_freq_range.high_5ghz_freq);
4726 
4727 			mac_phy_cap++;
4728 			mac_phy_info++;
4729 
4730 			phy_bit_map >>= 1;
4731 		}
4732 	}
4733 }
4734 
4735 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4736 					u16 tag, u16 len,
4737 					const void *ptr, void *data)
4738 {
4739 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4740 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4741 	int ret;
4742 
4743 	switch (tag) {
4744 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4745 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4746 						&svc_rdy_ext->arg);
4747 		if (ret) {
4748 			ath12k_warn(ab, "unable to extract ext params\n");
4749 			return ret;
4750 		}
4751 		break;
4752 
4753 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4754 		svc_rdy_ext->hw_caps = ptr;
4755 		svc_rdy_ext->arg.num_hw_modes =
4756 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4757 		break;
4758 
4759 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4760 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4761 							    svc_rdy_ext);
4762 		if (ret)
4763 			return ret;
4764 		break;
4765 
4766 	case WMI_TAG_ARRAY_STRUCT:
4767 		if (!svc_rdy_ext->hw_mode_done) {
4768 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4769 			if (ret)
4770 				return ret;
4771 
4772 			svc_rdy_ext->hw_mode_done = true;
4773 		} else if (!svc_rdy_ext->mac_phy_done) {
4774 			svc_rdy_ext->n_mac_phy_caps = 0;
4775 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4776 						  ath12k_wmi_mac_phy_caps_parse,
4777 						  svc_rdy_ext);
4778 			if (ret) {
4779 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4780 				return ret;
4781 			}
4782 
4783 			ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext);
4784 
4785 			svc_rdy_ext->mac_phy_done = true;
4786 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4787 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4788 			if (ret)
4789 				return ret;
4790 
4791 			svc_rdy_ext->ext_hal_reg_done = true;
4792 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4793 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4794 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4795 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4796 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4797 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4798 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4799 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4800 						       &svc_rdy_ext->dma_caps_parse);
4801 			if (ret)
4802 				return ret;
4803 
4804 			svc_rdy_ext->dma_ring_cap_done = true;
4805 		}
4806 		break;
4807 
4808 	default:
4809 		break;
4810 	}
4811 	return 0;
4812 }
4813 
4814 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4815 					  struct sk_buff *skb)
4816 {
4817 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4818 	int ret;
4819 
4820 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4821 				  ath12k_wmi_svc_rdy_ext_parse,
4822 				  &svc_rdy_ext);
4823 	if (ret) {
4824 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4825 		goto err;
4826 	}
4827 
4828 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4829 		complete(&ab->wmi_ab.service_ready);
4830 
4831 	kfree(svc_rdy_ext.mac_phy_caps);
4832 	return 0;
4833 
4834 err:
4835 	kfree(svc_rdy_ext.mac_phy_caps);
4836 	ath12k_wmi_free_dbring_caps(ab);
4837 	return ret;
4838 }
4839 
4840 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4841 				      const void *ptr,
4842 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4843 {
4844 	const struct wmi_service_ready_ext2_event *ev = ptr;
4845 
4846 	if (!ev)
4847 		return -EINVAL;
4848 
4849 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4850 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4851 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4852 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4853 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4854 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4855 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4856 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4857 	return 0;
4858 }
4859 
4860 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4861 				      const __le32 cap_mac_info[],
4862 				      const __le32 cap_phy_info[],
4863 				      const __le32 supp_mcs[],
4864 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4865 				       __le32 cap_info_internal)
4866 {
4867 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4868 	u32 support_320mhz;
4869 	u8 i;
4870 
4871 	if (band == NL80211_BAND_6GHZ)
4872 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4873 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4874 
4875 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4876 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4877 
4878 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4879 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4880 
4881 	if (band == NL80211_BAND_6GHZ)
4882 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4883 
4884 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4885 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4886 	if (band != NL80211_BAND_2GHZ) {
4887 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4888 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4889 	}
4890 
4891 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4892 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4893 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4894 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4895 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4896 
4897 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4898 }
4899 
4900 static int
4901 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4902 				      const struct ath12k_wmi_caps_ext_params *caps,
4903 				      struct ath12k_pdev *pdev)
4904 {
4905 	struct ath12k_band_cap *cap_band;
4906 	u32 bands, support_320mhz;
4907 	int i;
4908 
4909 	if (ab->hw_params->single_pdev_only) {
4910 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4911 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4912 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4913 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4914 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4915 			return 0;
4916 		}
4917 
4918 		for (i = 0; i < ab->fw_pdev_count; i++) {
4919 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4920 
4921 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4922 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4923 				bands = fw_pdev->supported_bands;
4924 				break;
4925 			}
4926 		}
4927 
4928 		if (i == ab->fw_pdev_count)
4929 			return -EINVAL;
4930 	} else {
4931 		bands = pdev->cap.supported_bands;
4932 	}
4933 
4934 	if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
4935 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4936 					  caps->eht_cap_mac_info_2ghz,
4937 					  caps->eht_cap_phy_info_2ghz,
4938 					  caps->eht_supp_mcs_ext_2ghz,
4939 					  &caps->eht_ppet_2ghz,
4940 					  caps->eht_cap_info_internal);
4941 	}
4942 
4943 	if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
4944 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4945 					  caps->eht_cap_mac_info_5ghz,
4946 					  caps->eht_cap_phy_info_5ghz,
4947 					  caps->eht_supp_mcs_ext_5ghz,
4948 					  &caps->eht_ppet_5ghz,
4949 					  caps->eht_cap_info_internal);
4950 
4951 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4952 					  caps->eht_cap_mac_info_5ghz,
4953 					  caps->eht_cap_phy_info_5ghz,
4954 					  caps->eht_supp_mcs_ext_5ghz,
4955 					  &caps->eht_ppet_5ghz,
4956 					  caps->eht_cap_info_internal);
4957 	}
4958 
4959 	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
4960 	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
4961 
4962 	return 0;
4963 }
4964 
4965 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4966 					   u16 len, const void *ptr,
4967 					   void *data)
4968 {
4969 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4970 	int i = 0, ret;
4971 
4972 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4973 		return -EPROTO;
4974 
4975 	if (ab->hw_params->single_pdev_only) {
4976 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4977 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4978 			return 0;
4979 	} else {
4980 		for (i = 0; i < ab->num_radios; i++) {
4981 			if (ab->pdevs[i].pdev_id ==
4982 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4983 				break;
4984 		}
4985 
4986 		if (i == ab->num_radios)
4987 			return -EINVAL;
4988 	}
4989 
4990 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4991 	if (ret) {
4992 		ath12k_warn(ab,
4993 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4994 			    ret, ab->pdevs[i].pdev_id);
4995 		return ret;
4996 	}
4997 
4998 	return 0;
4999 }
5000 
5001 static void
5002 ath12k_wmi_update_freq_info(struct ath12k_base *ab,
5003 			    struct ath12k_svc_ext_mac_phy_info *mac_cap,
5004 			    enum ath12k_hw_mode mode,
5005 			    u32 phy_id)
5006 {
5007 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5008 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5009 
5010 	mac_range = &hw_mode_info->freq_range_caps[mode][phy_id];
5011 
5012 	if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
5013 		mac_range->low_2ghz_freq = max_t(u32,
5014 						 mac_cap->hw_freq_range.low_2ghz_freq,
5015 						 ATH12K_MIN_2GHZ_FREQ);
5016 		mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ?
5017 					    min_t(u32,
5018 						  mac_cap->hw_freq_range.high_2ghz_freq,
5019 						  ATH12K_MAX_2GHZ_FREQ) :
5020 					    ATH12K_MAX_2GHZ_FREQ;
5021 	}
5022 
5023 	if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
5024 		mac_range->low_5ghz_freq = max_t(u32,
5025 						 mac_cap->hw_freq_range.low_5ghz_freq,
5026 						 ATH12K_MIN_5GHZ_FREQ);
5027 		mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ?
5028 					    min_t(u32,
5029 						  mac_cap->hw_freq_range.high_5ghz_freq,
5030 						  ATH12K_MAX_6GHZ_FREQ) :
5031 					    ATH12K_MAX_6GHZ_FREQ;
5032 	}
5033 }
5034 
5035 static bool
5036 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab,
5037 				 enum ath12k_hw_mode hwmode)
5038 {
5039 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5040 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5041 	u8 phy_id;
5042 
5043 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5044 		mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id];
5045 		/* modify SBS/DBS range only when both phy for DBS are filled */
5046 		if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq)
5047 			return false;
5048 	}
5049 
5050 	return true;
5051 }
5052 
5053 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab)
5054 {
5055 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5056 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5057 	u8 phy_id;
5058 
5059 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS];
5060 	/* Reset 5 GHz range for shared mac for DBS */
5061 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5062 		if (mac_range[phy_id].low_2ghz_freq &&
5063 		    mac_range[phy_id].low_5ghz_freq) {
5064 			mac_range[phy_id].low_5ghz_freq = 0;
5065 			mac_range[phy_id].high_5ghz_freq = 0;
5066 		}
5067 	}
5068 }
5069 
5070 static u32
5071 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5072 {
5073 	u32 highest_freq = 0;
5074 	u8 phy_id;
5075 
5076 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5077 		if (range[phy_id].high_5ghz_freq > highest_freq)
5078 			highest_freq = range[phy_id].high_5ghz_freq;
5079 	}
5080 
5081 	return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ;
5082 }
5083 
5084 static u32
5085 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5086 {
5087 	u32 lowest_freq = 0;
5088 	u8 phy_id;
5089 
5090 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5091 		if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
5092 		    range[phy_id].low_5ghz_freq < lowest_freq)
5093 			lowest_freq = range[phy_id].low_5ghz_freq;
5094 	}
5095 
5096 	return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ;
5097 }
5098 
5099 static void
5100 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab,
5101 				     u16 sbs_range_sep,
5102 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5103 {
5104 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5105 	struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range;
5106 	u8 phy_id;
5107 
5108 	upper_sbs_freq_range =
5109 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
5110 
5111 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5112 		upper_sbs_freq_range[phy_id].low_2ghz_freq =
5113 						ref_freq[phy_id].low_2ghz_freq;
5114 		upper_sbs_freq_range[phy_id].high_2ghz_freq =
5115 						ref_freq[phy_id].high_2ghz_freq;
5116 
5117 		/* update for shared mac */
5118 		if (upper_sbs_freq_range[phy_id].low_2ghz_freq) {
5119 			upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5120 			upper_sbs_freq_range[phy_id].high_5ghz_freq =
5121 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5122 		} else {
5123 			upper_sbs_freq_range[phy_id].low_5ghz_freq =
5124 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5125 			upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5126 		}
5127 	}
5128 }
5129 
5130 static void
5131 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab,
5132 				     u16 sbs_range_sep,
5133 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5134 {
5135 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5136 	struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range;
5137 	u8 phy_id;
5138 
5139 	lower_sbs_freq_range =
5140 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
5141 
5142 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5143 		lower_sbs_freq_range[phy_id].low_2ghz_freq =
5144 						ref_freq[phy_id].low_2ghz_freq;
5145 		lower_sbs_freq_range[phy_id].high_2ghz_freq =
5146 						ref_freq[phy_id].high_2ghz_freq;
5147 
5148 		/* update for shared mac */
5149 		if (lower_sbs_freq_range[phy_id].low_2ghz_freq) {
5150 			lower_sbs_freq_range[phy_id].low_5ghz_freq =
5151 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5152 			lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5153 		} else {
5154 			lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5155 			lower_sbs_freq_range[phy_id].high_5ghz_freq =
5156 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5157 		}
5158 	}
5159 }
5160 
5161 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)
5162 {
5163 	static const char * const mode_str[] = {
5164 		[ATH12K_HW_MODE_SMM] = "SMM",
5165 		[ATH12K_HW_MODE_DBS] = "DBS",
5166 		[ATH12K_HW_MODE_SBS] = "SBS",
5167 		[ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE",
5168 		[ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE",
5169 	};
5170 
5171 	if (hw_mode >= ARRAY_SIZE(mode_str))
5172 		return "Unknown";
5173 
5174 	return mode_str[hw_mode];
5175 }
5176 
5177 static void
5178 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab,
5179 				   struct ath12k_hw_mode_freq_range_arg *freq_range,
5180 				   enum ath12k_hw_mode hw_mode)
5181 {
5182 	u8 i;
5183 
5184 	for (i = 0; i < MAX_RADIOS; i++)
5185 		if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq)
5186 			ath12k_dbg(ab, ATH12K_DBG_WMI,
5187 				   "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5188 				   ath12k_wmi_hw_mode_to_str(hw_mode),
5189 				   hw_mode, i,
5190 				   freq_range[i].low_2ghz_freq,
5191 				   freq_range[i].high_2ghz_freq,
5192 				   freq_range[i].low_5ghz_freq,
5193 				   freq_range[i].high_5ghz_freq);
5194 }
5195 
5196 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab)
5197 {
5198 	struct ath12k_hw_mode_freq_range_arg *freq_range;
5199 	u8 i;
5200 
5201 	for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) {
5202 		freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i];
5203 		ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i);
5204 	}
5205 }
5206 
5207 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id)
5208 {
5209 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5210 	struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range;
5211 	struct ath12k_hw_mode_freq_range_arg *non_shared_range;
5212 	u8 shared_phy_id;
5213 
5214 	sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id];
5215 
5216 	/* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id
5217 	 * keep the range as it is in SBS
5218 	 */
5219 	if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq)
5220 		return 0;
5221 
5222 	if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) {
5223 		ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz");
5224 		ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS);
5225 		return -EINVAL;
5226 	}
5227 
5228 	non_shared_range = sbs_mac_range;
5229 	/* if SBS mac range has only 5 GHz then it's the non-shared phy, so
5230 	 * modify the range as per the shared mac.
5231 	 */
5232 	shared_phy_id = phy_id ? 0 : 1;
5233 	shared_mac_range =
5234 		&hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id];
5235 
5236 	if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) {
5237 		ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared");
5238 		/* If the shared mac lower 5 GHz frequency is greater than
5239 		 * non-shared mac lower 5 GHz frequency then the shared mac has
5240 		 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high
5241 		 * freq should be less than the shared mac's low 5 GHz freq.
5242 		 */
5243 		if (non_shared_range->high_5ghz_freq >=
5244 		    shared_mac_range->low_5ghz_freq)
5245 			non_shared_range->high_5ghz_freq =
5246 				max_t(u32, shared_mac_range->low_5ghz_freq - 10,
5247 				      non_shared_range->low_5ghz_freq);
5248 	} else if (shared_mac_range->high_5ghz_freq <
5249 		   non_shared_range->high_5ghz_freq) {
5250 		ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared");
5251 		/* If the shared mac high 5 GHz frequency is less than
5252 		 * non-shared mac high 5 GHz frequency then the shared mac has
5253 		 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low
5254 		 * freq should be greater than the shared mac's high 5 GHz freq.
5255 		 */
5256 		if (shared_mac_range->high_5ghz_freq >=
5257 		    non_shared_range->low_5ghz_freq)
5258 			non_shared_range->low_5ghz_freq =
5259 				min_t(u32, shared_mac_range->high_5ghz_freq + 10,
5260 				      non_shared_range->high_5ghz_freq);
5261 	} else {
5262 		ath12k_warn(ab, "invalid SBS range with all 5 GHz shared");
5263 		return -EINVAL;
5264 	}
5265 
5266 	return 0;
5267 }
5268 
5269 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab)
5270 {
5271 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5272 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5273 	u16 sbs_range_sep;
5274 	u8 phy_id;
5275 	int ret;
5276 
5277 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS];
5278 
5279 	/* If sbs_lower_band_end_freq has a value, then the frequency range
5280 	 * will be split using that value.
5281 	 */
5282 	sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq;
5283 	if (sbs_range_sep) {
5284 		ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep,
5285 						     mac_range);
5286 		ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep,
5287 						     mac_range);
5288 		/* Hardware specifies the range boundary with sbs_range_sep,
5289 		 * (i.e. the boundary between 5 GHz high and 5 GHz low),
5290 		 * reset the original one to make sure it will not get used.
5291 		 */
5292 		memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5293 		return;
5294 	}
5295 
5296 	/* If sbs_lower_band_end_freq is not set that means firmware will send one
5297 	 * shared mac range and one non-shared mac range. so update that freq.
5298 	 */
5299 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5300 		ret = ath12k_wmi_modify_sbs_freq(ab, phy_id);
5301 		if (ret) {
5302 			memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5303 			break;
5304 		}
5305 	}
5306 }
5307 
5308 static void
5309 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab,
5310 				enum wmi_host_hw_mode_config_type hw_config_type,
5311 				u32 phy_id,
5312 				struct ath12k_svc_ext_mac_phy_info *mac_cap)
5313 {
5314 	if (phy_id >= MAX_RADIOS) {
5315 		ath12k_err(ab, "mac more than two not supported: %d", phy_id);
5316 		return;
5317 	}
5318 
5319 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5320 		   "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5321 		   hw_config_type, phy_id, mac_cap->supported_bands,
5322 		   ab->wmi_ab.sbs_lower_band_end_freq,
5323 		   mac_cap->hw_freq_range.low_2ghz_freq,
5324 		   mac_cap->hw_freq_range.high_2ghz_freq,
5325 		   mac_cap->hw_freq_range.low_5ghz_freq,
5326 		   mac_cap->hw_freq_range.high_5ghz_freq);
5327 
5328 	switch (hw_config_type) {
5329 	case WMI_HOST_HW_MODE_SINGLE:
5330 		if (phy_id) {
5331 			ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported");
5332 			break;
5333 		}
5334 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id);
5335 		break;
5336 
5337 	case WMI_HOST_HW_MODE_DBS:
5338 		if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5339 			ath12k_wmi_update_freq_info(ab, mac_cap,
5340 						    ATH12K_HW_MODE_DBS, phy_id);
5341 		break;
5342 	case WMI_HOST_HW_MODE_DBS_SBS:
5343 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
5344 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id);
5345 		if (ab->wmi_ab.sbs_lower_band_end_freq ||
5346 		    mac_cap->hw_freq_range.low_5ghz_freq ||
5347 		    mac_cap->hw_freq_range.low_2ghz_freq)
5348 			ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS,
5349 						    phy_id);
5350 
5351 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5352 			ath12k_wmi_update_dbs_freq_info(ab);
5353 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5354 			ath12k_wmi_update_sbs_freq_info(ab);
5355 		break;
5356 	case WMI_HOST_HW_MODE_SBS:
5357 	case WMI_HOST_HW_MODE_SBS_PASSIVE:
5358 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id);
5359 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5360 			ath12k_wmi_update_sbs_freq_info(ab);
5361 
5362 		break;
5363 	default:
5364 		break;
5365 	}
5366 }
5367 
5368 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab)
5369 {
5370 	if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) ||
5371 	    (ab->wmi_ab.sbs_lower_band_end_freq &&
5372 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) &&
5373 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE)))
5374 		return true;
5375 
5376 	return false;
5377 }
5378 
5379 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab)
5380 {
5381 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
5382 	struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
5383 	enum wmi_host_hw_mode_config_type hw_config_type;
5384 	struct ath12k_svc_ext_mac_phy_info *tmp;
5385 	bool dbs_mode = false, sbs_mode = false;
5386 	u32 i, j = 0;
5387 
5388 	if (!svc_ext_info->num_hw_modes) {
5389 		ath12k_err(ab, "invalid number of hw modes");
5390 		return -EINVAL;
5391 	}
5392 
5393 	ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d",
5394 		   svc_ext_info->num_hw_modes);
5395 
5396 	memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps));
5397 
5398 	for (i = 0; i < svc_ext_info->num_hw_modes; i++) {
5399 		if (j >= ATH12K_MAX_MAC_PHY_CAP)
5400 			return -EINVAL;
5401 
5402 		/* Update for MAC0 */
5403 		tmp = &svc_ext_info->mac_phy_info[j++];
5404 		hw_config_type = tmp->hw_mode_config_type;
5405 		ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp);
5406 
5407 		/* SBS and DBS have dual MAC. Up to 2 MACs are considered. */
5408 		if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5409 		    hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5410 		    hw_config_type == WMI_HOST_HW_MODE_SBS ||
5411 		    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) {
5412 			if (j >= ATH12K_MAX_MAC_PHY_CAP)
5413 				return -EINVAL;
5414 			/* Update for MAC1 */
5415 			tmp = &svc_ext_info->mac_phy_info[j++];
5416 			ath12k_wmi_update_mac_freq_info(ab, hw_config_type,
5417 							tmp->phy_id, tmp);
5418 
5419 			if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5420 			    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)
5421 				dbs_mode = true;
5422 
5423 			if (ath12k_wmi_sbs_range_present(ab) &&
5424 			    (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5425 			     hw_config_type == WMI_HOST_HW_MODE_SBS ||
5426 			     hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS))
5427 				sbs_mode = true;
5428 		}
5429 	}
5430 
5431 	info->support_dbs = dbs_mode;
5432 	info->support_sbs = sbs_mode;
5433 
5434 	ath12k_wmi_dump_freq_range(ab);
5435 
5436 	return 0;
5437 }
5438 
5439 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
5440 					 u16 tag, u16 len,
5441 					 const void *ptr, void *data)
5442 {
5443 	const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps;
5444 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
5445 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
5446 	int ret;
5447 
5448 	switch (tag) {
5449 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
5450 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
5451 						 &parse->arg);
5452 		if (ret) {
5453 			ath12k_warn(ab,
5454 				    "failed to extract wmi service ready ext2 parameters: %d\n",
5455 				    ret);
5456 			return ret;
5457 		}
5458 		break;
5459 
5460 	case WMI_TAG_ARRAY_STRUCT:
5461 		if (!parse->dma_ring_cap_done) {
5462 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
5463 						       &parse->dma_caps_parse);
5464 			if (ret)
5465 				return ret;
5466 
5467 			parse->dma_ring_cap_done = true;
5468 		} else if (!parse->spectral_bin_scaling_done) {
5469 			/* TODO: This is a place-holder as WMI tag for
5470 			 * spectral scaling is before
5471 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
5472 			 */
5473 			parse->spectral_bin_scaling_done = true;
5474 		} else if (!parse->mac_phy_caps_ext_done) {
5475 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
5476 						  ath12k_wmi_tlv_mac_phy_caps_ext,
5477 						  parse);
5478 			if (ret) {
5479 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
5480 					    ret);
5481 				return ret;
5482 			}
5483 
5484 			parse->mac_phy_caps_ext_done = true;
5485 		} else if (!parse->hal_reg_caps_ext2_done) {
5486 			parse->hal_reg_caps_ext2_done = true;
5487 		} else if (!parse->scan_radio_caps_ext2_done) {
5488 			parse->scan_radio_caps_ext2_done = true;
5489 		} else if (!parse->twt_caps_done) {
5490 			parse->twt_caps_done = true;
5491 		} else if (!parse->htt_msdu_idx_to_qtype_map_done) {
5492 			parse->htt_msdu_idx_to_qtype_map_done = true;
5493 		} else if (!parse->dbs_or_sbs_cap_ext_done) {
5494 			dbs_or_sbs_caps = ptr;
5495 			ab->wmi_ab.sbs_lower_band_end_freq =
5496 				__le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq);
5497 
5498 			ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n",
5499 				   ab->wmi_ab.sbs_lower_band_end_freq);
5500 
5501 			ret = ath12k_wmi_update_hw_mode_list(ab);
5502 			if (ret) {
5503 				ath12k_warn(ab, "failed to update hw mode list: %d\n",
5504 					    ret);
5505 				return ret;
5506 			}
5507 
5508 			parse->dbs_or_sbs_cap_ext_done = true;
5509 		}
5510 
5511 		break;
5512 	default:
5513 		break;
5514 	}
5515 
5516 	return 0;
5517 }
5518 
5519 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
5520 					   struct sk_buff *skb)
5521 {
5522 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
5523 	int ret;
5524 
5525 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5526 				  ath12k_wmi_svc_rdy_ext2_parse,
5527 				  &svc_rdy_ext2);
5528 	if (ret) {
5529 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
5530 		goto err;
5531 	}
5532 
5533 	complete(&ab->wmi_ab.service_ready);
5534 
5535 	return 0;
5536 
5537 err:
5538 	ath12k_wmi_free_dbring_caps(ab);
5539 	return ret;
5540 }
5541 
5542 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5543 					   struct wmi_vdev_start_resp_event *vdev_rsp)
5544 {
5545 	const void **tb;
5546 	const struct wmi_vdev_start_resp_event *ev;
5547 	int ret;
5548 
5549 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5550 	if (IS_ERR(tb)) {
5551 		ret = PTR_ERR(tb);
5552 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5553 		return ret;
5554 	}
5555 
5556 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
5557 	if (!ev) {
5558 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
5559 		kfree(tb);
5560 		return -EPROTO;
5561 	}
5562 
5563 	*vdev_rsp = *ev;
5564 
5565 	kfree(tb);
5566 	return 0;
5567 }
5568 
5569 static struct ath12k_reg_rule
5570 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
5571 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
5572 {
5573 	struct ath12k_reg_rule *reg_rule_ptr;
5574 	u32 count;
5575 
5576 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
5577 			       GFP_ATOMIC);
5578 
5579 	if (!reg_rule_ptr)
5580 		return NULL;
5581 
5582 	for (count = 0; count < num_reg_rules; count++) {
5583 		reg_rule_ptr[count].start_freq =
5584 			le32_get_bits(wmi_reg_rule[count].freq_info,
5585 				      REG_RULE_START_FREQ);
5586 		reg_rule_ptr[count].end_freq =
5587 			le32_get_bits(wmi_reg_rule[count].freq_info,
5588 				      REG_RULE_END_FREQ);
5589 		reg_rule_ptr[count].max_bw =
5590 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5591 				      REG_RULE_MAX_BW);
5592 		reg_rule_ptr[count].reg_power =
5593 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5594 				      REG_RULE_REG_PWR);
5595 		reg_rule_ptr[count].ant_gain =
5596 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5597 				      REG_RULE_ANT_GAIN);
5598 		reg_rule_ptr[count].flags =
5599 			le32_get_bits(wmi_reg_rule[count].flag_info,
5600 				      REG_RULE_FLAGS);
5601 		reg_rule_ptr[count].psd_flag =
5602 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5603 				      REG_RULE_PSD_INFO);
5604 		reg_rule_ptr[count].psd_eirp =
5605 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5606 				      REG_RULE_PSD_EIRP);
5607 	}
5608 
5609 	return reg_rule_ptr;
5610 }
5611 
5612 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
5613 					    u32 num_reg_rules)
5614 {
5615 	u8 num_invalid_5ghz_rules = 0;
5616 	u32 count, start_freq;
5617 
5618 	for (count = 0; count < num_reg_rules; count++) {
5619 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
5620 
5621 		if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
5622 			num_invalid_5ghz_rules++;
5623 	}
5624 
5625 	return num_invalid_5ghz_rules;
5626 }
5627 
5628 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
5629 						   struct sk_buff *skb,
5630 						   struct ath12k_reg_info *reg_info)
5631 {
5632 	const void **tb;
5633 	const struct wmi_reg_chan_list_cc_ext_event *ev;
5634 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
5635 	u32 num_2g_reg_rules, num_5g_reg_rules;
5636 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
5637 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
5638 	u8 num_invalid_5ghz_ext_rules;
5639 	u32 total_reg_rules = 0;
5640 	int ret, i, j;
5641 
5642 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
5643 
5644 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5645 	if (IS_ERR(tb)) {
5646 		ret = PTR_ERR(tb);
5647 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5648 		return ret;
5649 	}
5650 
5651 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
5652 	if (!ev) {
5653 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
5654 		kfree(tb);
5655 		return -EPROTO;
5656 	}
5657 
5658 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
5659 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
5660 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
5661 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
5662 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
5663 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
5664 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
5665 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
5666 
5667 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5668 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5669 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
5670 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5671 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
5672 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5673 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
5674 	}
5675 
5676 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
5677 	total_reg_rules += num_2g_reg_rules;
5678 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
5679 	total_reg_rules += num_5g_reg_rules;
5680 
5681 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
5682 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
5683 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
5684 		kfree(tb);
5685 		return -EINVAL;
5686 	}
5687 
5688 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5689 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
5690 
5691 		if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
5692 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
5693 				    i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
5694 			kfree(tb);
5695 			return -EINVAL;
5696 		}
5697 
5698 		total_reg_rules += num_6g_reg_rules_ap[i];
5699 	}
5700 
5701 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5702 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5703 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5704 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5705 
5706 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5707 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5708 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5709 
5710 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5711 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5712 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5713 
5714 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
5715 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
5716 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6GHZ_REG_RULES) {
5717 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
5718 				    i);
5719 			kfree(tb);
5720 			return -EINVAL;
5721 		}
5722 	}
5723 
5724 	if (!total_reg_rules) {
5725 		ath12k_warn(ab, "No reg rules available\n");
5726 		kfree(tb);
5727 		return -EINVAL;
5728 	}
5729 
5730 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
5731 
5732 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
5733 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
5734 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
5735 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
5736 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
5737 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
5738 
5739 	switch (le32_to_cpu(ev->status_code)) {
5740 	case WMI_REG_SET_CC_STATUS_PASS:
5741 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
5742 		break;
5743 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
5744 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
5745 		break;
5746 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
5747 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
5748 		break;
5749 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
5750 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
5751 		break;
5752 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
5753 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
5754 		break;
5755 	case WMI_REG_SET_CC_STATUS_FAIL:
5756 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
5757 		break;
5758 	}
5759 
5760 	reg_info->is_ext_reg_event = true;
5761 
5762 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
5763 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
5764 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
5765 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
5766 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
5767 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
5768 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
5769 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
5770 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
5771 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
5772 
5773 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5774 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5775 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
5776 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5777 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
5778 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5779 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
5780 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5781 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
5782 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
5783 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
5784 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
5785 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
5786 	}
5787 
5788 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5789 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
5790 		   __func__, reg_info->alpha2, reg_info->dfs_region,
5791 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
5792 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
5793 		   reg_info->phybitmap);
5794 
5795 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5796 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
5797 		   num_2g_reg_rules, num_5g_reg_rules);
5798 
5799 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5800 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
5801 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
5802 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
5803 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
5804 
5805 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5806 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5807 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
5808 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
5809 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
5810 
5811 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5812 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5813 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
5814 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
5815 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
5816 
5817 	ext_wmi_reg_rule =
5818 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
5819 			+ sizeof(*ev)
5820 			+ sizeof(struct wmi_tlv));
5821 
5822 	if (num_2g_reg_rules) {
5823 		reg_info->reg_rules_2g_ptr =
5824 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
5825 						      ext_wmi_reg_rule);
5826 
5827 		if (!reg_info->reg_rules_2g_ptr) {
5828 			kfree(tb);
5829 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
5830 			return -ENOMEM;
5831 		}
5832 	}
5833 
5834 	ext_wmi_reg_rule += num_2g_reg_rules;
5835 
5836 	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
5837 	 * for few countries along with separate 6 GHz rule.
5838 	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
5839 	 * causes intersect check to be true, and same rules will be
5840 	 * shown multiple times in iw cmd.
5841 	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
5842 	 */
5843 	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
5844 								       num_5g_reg_rules);
5845 
5846 	if (num_invalid_5ghz_ext_rules) {
5847 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5848 			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
5849 			   reg_info->alpha2, reg_info->num_5g_reg_rules,
5850 			   num_invalid_5ghz_ext_rules);
5851 
5852 		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
5853 		reg_info->num_5g_reg_rules = num_5g_reg_rules;
5854 	}
5855 
5856 	if (num_5g_reg_rules) {
5857 		reg_info->reg_rules_5g_ptr =
5858 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
5859 						      ext_wmi_reg_rule);
5860 
5861 		if (!reg_info->reg_rules_5g_ptr) {
5862 			kfree(tb);
5863 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5864 			return -ENOMEM;
5865 		}
5866 	}
5867 
5868 	/* We have adjusted the number of 5 GHz reg rules above. But still those
5869 	 * many rules needs to be adjusted in ext_wmi_reg_rule.
5870 	 *
5871 	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
5872 	 */
5873 	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
5874 
5875 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5876 		reg_info->reg_rules_6g_ap_ptr[i] =
5877 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
5878 						      ext_wmi_reg_rule);
5879 
5880 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
5881 			kfree(tb);
5882 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
5883 			return -ENOMEM;
5884 		}
5885 
5886 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
5887 	}
5888 
5889 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
5890 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5891 			reg_info->reg_rules_6g_client_ptr[j][i] =
5892 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
5893 							      ext_wmi_reg_rule);
5894 
5895 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
5896 				kfree(tb);
5897 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
5898 				return -ENOMEM;
5899 			}
5900 
5901 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
5902 		}
5903 	}
5904 
5905 	reg_info->client_type = le32_to_cpu(ev->client_type);
5906 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
5907 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
5908 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
5909 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
5910 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
5911 		le32_to_cpu(ev->domain_code_6g_ap_sp);
5912 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
5913 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
5914 
5915 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5916 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5917 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5918 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5919 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5920 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5921 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5922 	}
5923 
5924 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5925 
5926 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5927 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
5928 
5929 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5930 
5931 	kfree(tb);
5932 	return 0;
5933 }
5934 
5935 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5936 					struct wmi_peer_delete_resp_event *peer_del_resp)
5937 {
5938 	const void **tb;
5939 	const struct wmi_peer_delete_resp_event *ev;
5940 	int ret;
5941 
5942 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5943 	if (IS_ERR(tb)) {
5944 		ret = PTR_ERR(tb);
5945 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5946 		return ret;
5947 	}
5948 
5949 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5950 	if (!ev) {
5951 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
5952 		kfree(tb);
5953 		return -EPROTO;
5954 	}
5955 
5956 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5957 
5958 	peer_del_resp->vdev_id = ev->vdev_id;
5959 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5960 			ev->peer_macaddr.addr);
5961 
5962 	kfree(tb);
5963 	return 0;
5964 }
5965 
5966 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5967 					struct sk_buff *skb,
5968 					u32 *vdev_id)
5969 {
5970 	const void **tb;
5971 	const struct wmi_vdev_delete_resp_event *ev;
5972 	int ret;
5973 
5974 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5975 	if (IS_ERR(tb)) {
5976 		ret = PTR_ERR(tb);
5977 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5978 		return ret;
5979 	}
5980 
5981 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5982 	if (!ev) {
5983 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5984 		kfree(tb);
5985 		return -EPROTO;
5986 	}
5987 
5988 	*vdev_id = le32_to_cpu(ev->vdev_id);
5989 
5990 	kfree(tb);
5991 	return 0;
5992 }
5993 
5994 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5995 					struct sk_buff *skb,
5996 					u32 *vdev_id, u32 *tx_status)
5997 {
5998 	const void **tb;
5999 	const struct wmi_bcn_tx_status_event *ev;
6000 	int ret;
6001 
6002 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6003 	if (IS_ERR(tb)) {
6004 		ret = PTR_ERR(tb);
6005 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6006 		return ret;
6007 	}
6008 
6009 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
6010 	if (!ev) {
6011 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
6012 		kfree(tb);
6013 		return -EPROTO;
6014 	}
6015 
6016 	*vdev_id = le32_to_cpu(ev->vdev_id);
6017 	*tx_status = le32_to_cpu(ev->tx_status);
6018 
6019 	kfree(tb);
6020 	return 0;
6021 }
6022 
6023 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
6024 					      u32 *vdev_id)
6025 {
6026 	const void **tb;
6027 	const struct wmi_vdev_stopped_event *ev;
6028 	int ret;
6029 
6030 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6031 	if (IS_ERR(tb)) {
6032 		ret = PTR_ERR(tb);
6033 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6034 		return ret;
6035 	}
6036 
6037 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
6038 	if (!ev) {
6039 		ath12k_warn(ab, "failed to fetch vdev stop ev");
6040 		kfree(tb);
6041 		return -EPROTO;
6042 	}
6043 
6044 	*vdev_id = le32_to_cpu(ev->vdev_id);
6045 
6046 	kfree(tb);
6047 	return 0;
6048 }
6049 
6050 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
6051 					u16 tag, u16 len,
6052 					const void *ptr, void *data)
6053 {
6054 	struct wmi_tlv_mgmt_rx_parse *parse = data;
6055 
6056 	switch (tag) {
6057 	case WMI_TAG_MGMT_RX_HDR:
6058 		parse->fixed = ptr;
6059 		break;
6060 	case WMI_TAG_ARRAY_BYTE:
6061 		if (!parse->frame_buf_done) {
6062 			parse->frame_buf = ptr;
6063 			parse->frame_buf_done = true;
6064 		}
6065 		break;
6066 	}
6067 	return 0;
6068 }
6069 
6070 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
6071 					  struct sk_buff *skb,
6072 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
6073 {
6074 	struct wmi_tlv_mgmt_rx_parse parse = { };
6075 	const struct ath12k_wmi_mgmt_rx_params *ev;
6076 	const u8 *frame;
6077 	int i, ret;
6078 
6079 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6080 				  ath12k_wmi_tlv_mgmt_rx_parse,
6081 				  &parse);
6082 	if (ret) {
6083 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
6084 		return ret;
6085 	}
6086 
6087 	ev = parse.fixed;
6088 	frame = parse.frame_buf;
6089 
6090 	if (!ev || !frame) {
6091 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
6092 		return -EPROTO;
6093 	}
6094 
6095 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
6096 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
6097 	hdr->channel = le32_to_cpu(ev->channel);
6098 	hdr->snr = le32_to_cpu(ev->snr);
6099 	hdr->rate = le32_to_cpu(ev->rate);
6100 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
6101 	hdr->buf_len = le32_to_cpu(ev->buf_len);
6102 	hdr->status = le32_to_cpu(ev->status);
6103 	hdr->flags = le32_to_cpu(ev->flags);
6104 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
6105 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
6106 
6107 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
6108 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
6109 
6110 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
6111 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
6112 		return -EPROTO;
6113 	}
6114 
6115 	/* shift the sk_buff to point to `frame` */
6116 	skb_trim(skb, 0);
6117 	skb_put(skb, frame - skb->data);
6118 	skb_pull(skb, frame - skb->data);
6119 	skb_put(skb, hdr->buf_len);
6120 
6121 	return 0;
6122 }
6123 
6124 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
6125 				    u32 status)
6126 {
6127 	struct sk_buff *msdu;
6128 	struct ieee80211_tx_info *info;
6129 	struct ath12k_skb_cb *skb_cb;
6130 	int num_mgmt;
6131 
6132 	spin_lock_bh(&ar->txmgmt_idr_lock);
6133 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
6134 
6135 	if (!msdu) {
6136 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
6137 			    desc_id);
6138 		spin_unlock_bh(&ar->txmgmt_idr_lock);
6139 		return -ENOENT;
6140 	}
6141 
6142 	idr_remove(&ar->txmgmt_idr, desc_id);
6143 	spin_unlock_bh(&ar->txmgmt_idr_lock);
6144 
6145 	skb_cb = ATH12K_SKB_CB(msdu);
6146 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
6147 
6148 	info = IEEE80211_SKB_CB(msdu);
6149 	memset(&info->status, 0, sizeof(info->status));
6150 
6151 	/* skip tx rate update from ieee80211_status*/
6152 	info->status.rates[0].idx = -1;
6153 
6154 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
6155 		info->flags |= IEEE80211_TX_STAT_ACK;
6156 
6157 	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
6158 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
6159 
6160 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
6161 
6162 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
6163 
6164 	/* WARN when we received this event without doing any mgmt tx */
6165 	if (num_mgmt < 0)
6166 		WARN_ON_ONCE(1);
6167 
6168 	if (!num_mgmt)
6169 		wake_up(&ar->txmgmt_empty_waitq);
6170 
6171 	return 0;
6172 }
6173 
6174 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
6175 					       struct sk_buff *skb,
6176 					       struct wmi_mgmt_tx_compl_event *param)
6177 {
6178 	const void **tb;
6179 	const struct wmi_mgmt_tx_compl_event *ev;
6180 	int ret;
6181 
6182 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6183 	if (IS_ERR(tb)) {
6184 		ret = PTR_ERR(tb);
6185 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6186 		return ret;
6187 	}
6188 
6189 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
6190 	if (!ev) {
6191 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
6192 		kfree(tb);
6193 		return -EPROTO;
6194 	}
6195 
6196 	param->pdev_id = ev->pdev_id;
6197 	param->desc_id = ev->desc_id;
6198 	param->status = ev->status;
6199 
6200 	kfree(tb);
6201 	return 0;
6202 }
6203 
6204 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
6205 {
6206 	lockdep_assert_held(&ar->data_lock);
6207 
6208 	switch (ar->scan.state) {
6209 	case ATH12K_SCAN_IDLE:
6210 	case ATH12K_SCAN_RUNNING:
6211 	case ATH12K_SCAN_ABORTING:
6212 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
6213 			    ath12k_scan_state_str(ar->scan.state),
6214 			    ar->scan.state);
6215 		break;
6216 	case ATH12K_SCAN_STARTING:
6217 		ar->scan.state = ATH12K_SCAN_RUNNING;
6218 
6219 		if (ar->scan.is_roc)
6220 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
6221 
6222 		complete(&ar->scan.started);
6223 		break;
6224 	}
6225 }
6226 
6227 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
6228 {
6229 	lockdep_assert_held(&ar->data_lock);
6230 
6231 	switch (ar->scan.state) {
6232 	case ATH12K_SCAN_IDLE:
6233 	case ATH12K_SCAN_RUNNING:
6234 	case ATH12K_SCAN_ABORTING:
6235 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
6236 			    ath12k_scan_state_str(ar->scan.state),
6237 			    ar->scan.state);
6238 		break;
6239 	case ATH12K_SCAN_STARTING:
6240 		complete(&ar->scan.started);
6241 		__ath12k_mac_scan_finish(ar);
6242 		break;
6243 	}
6244 }
6245 
6246 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
6247 {
6248 	lockdep_assert_held(&ar->data_lock);
6249 
6250 	switch (ar->scan.state) {
6251 	case ATH12K_SCAN_IDLE:
6252 	case ATH12K_SCAN_STARTING:
6253 		/* One suspected reason scan can be completed while starting is
6254 		 * if firmware fails to deliver all scan events to the host,
6255 		 * e.g. when transport pipe is full. This has been observed
6256 		 * with spectral scan phyerr events starving wmi transport
6257 		 * pipe. In such case the "scan completed" event should be (and
6258 		 * is) ignored by the host as it may be just firmware's scan
6259 		 * state machine recovering.
6260 		 */
6261 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
6262 			    ath12k_scan_state_str(ar->scan.state),
6263 			    ar->scan.state);
6264 		break;
6265 	case ATH12K_SCAN_RUNNING:
6266 	case ATH12K_SCAN_ABORTING:
6267 		__ath12k_mac_scan_finish(ar);
6268 		break;
6269 	}
6270 }
6271 
6272 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
6273 {
6274 	lockdep_assert_held(&ar->data_lock);
6275 
6276 	switch (ar->scan.state) {
6277 	case ATH12K_SCAN_IDLE:
6278 	case ATH12K_SCAN_STARTING:
6279 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
6280 			    ath12k_scan_state_str(ar->scan.state),
6281 			    ar->scan.state);
6282 		break;
6283 	case ATH12K_SCAN_RUNNING:
6284 	case ATH12K_SCAN_ABORTING:
6285 		ar->scan_channel = NULL;
6286 		break;
6287 	}
6288 }
6289 
6290 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
6291 {
6292 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6293 
6294 	lockdep_assert_held(&ar->data_lock);
6295 
6296 	switch (ar->scan.state) {
6297 	case ATH12K_SCAN_IDLE:
6298 	case ATH12K_SCAN_STARTING:
6299 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
6300 			    ath12k_scan_state_str(ar->scan.state),
6301 			    ar->scan.state);
6302 		break;
6303 	case ATH12K_SCAN_RUNNING:
6304 	case ATH12K_SCAN_ABORTING:
6305 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
6306 
6307 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
6308 			complete(&ar->scan.on_channel);
6309 
6310 		break;
6311 	}
6312 }
6313 
6314 static const char *
6315 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
6316 			       enum wmi_scan_completion_reason reason)
6317 {
6318 	switch (type) {
6319 	case WMI_SCAN_EVENT_STARTED:
6320 		return "started";
6321 	case WMI_SCAN_EVENT_COMPLETED:
6322 		switch (reason) {
6323 		case WMI_SCAN_REASON_COMPLETED:
6324 			return "completed";
6325 		case WMI_SCAN_REASON_CANCELLED:
6326 			return "completed [cancelled]";
6327 		case WMI_SCAN_REASON_PREEMPTED:
6328 			return "completed [preempted]";
6329 		case WMI_SCAN_REASON_TIMEDOUT:
6330 			return "completed [timedout]";
6331 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
6332 			return "completed [internal err]";
6333 		case WMI_SCAN_REASON_MAX:
6334 			break;
6335 		}
6336 		return "completed [unknown]";
6337 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6338 		return "bss channel";
6339 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6340 		return "foreign channel";
6341 	case WMI_SCAN_EVENT_DEQUEUED:
6342 		return "dequeued";
6343 	case WMI_SCAN_EVENT_PREEMPTED:
6344 		return "preempted";
6345 	case WMI_SCAN_EVENT_START_FAILED:
6346 		return "start failed";
6347 	case WMI_SCAN_EVENT_RESTARTED:
6348 		return "restarted";
6349 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6350 		return "foreign channel exit";
6351 	default:
6352 		return "unknown";
6353 	}
6354 }
6355 
6356 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
6357 			       struct wmi_scan_event *scan_evt_param)
6358 {
6359 	const void **tb;
6360 	const struct wmi_scan_event *ev;
6361 	int ret;
6362 
6363 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6364 	if (IS_ERR(tb)) {
6365 		ret = PTR_ERR(tb);
6366 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6367 		return ret;
6368 	}
6369 
6370 	ev = tb[WMI_TAG_SCAN_EVENT];
6371 	if (!ev) {
6372 		ath12k_warn(ab, "failed to fetch scan ev");
6373 		kfree(tb);
6374 		return -EPROTO;
6375 	}
6376 
6377 	scan_evt_param->event_type = ev->event_type;
6378 	scan_evt_param->reason = ev->reason;
6379 	scan_evt_param->channel_freq = ev->channel_freq;
6380 	scan_evt_param->scan_req_id = ev->scan_req_id;
6381 	scan_evt_param->scan_id = ev->scan_id;
6382 	scan_evt_param->vdev_id = ev->vdev_id;
6383 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
6384 
6385 	kfree(tb);
6386 	return 0;
6387 }
6388 
6389 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
6390 					   struct wmi_peer_sta_kickout_arg *arg)
6391 {
6392 	const void **tb;
6393 	const struct wmi_peer_sta_kickout_event *ev;
6394 	int ret;
6395 
6396 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6397 	if (IS_ERR(tb)) {
6398 		ret = PTR_ERR(tb);
6399 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6400 		return ret;
6401 	}
6402 
6403 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
6404 	if (!ev) {
6405 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
6406 		kfree(tb);
6407 		return -EPROTO;
6408 	}
6409 
6410 	arg->mac_addr = ev->peer_macaddr.addr;
6411 
6412 	kfree(tb);
6413 	return 0;
6414 }
6415 
6416 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
6417 			       struct wmi_roam_event *roam_ev)
6418 {
6419 	const void **tb;
6420 	const struct wmi_roam_event *ev;
6421 	int ret;
6422 
6423 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6424 	if (IS_ERR(tb)) {
6425 		ret = PTR_ERR(tb);
6426 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6427 		return ret;
6428 	}
6429 
6430 	ev = tb[WMI_TAG_ROAM_EVENT];
6431 	if (!ev) {
6432 		ath12k_warn(ab, "failed to fetch roam ev");
6433 		kfree(tb);
6434 		return -EPROTO;
6435 	}
6436 
6437 	roam_ev->vdev_id = ev->vdev_id;
6438 	roam_ev->reason = ev->reason;
6439 	roam_ev->rssi = ev->rssi;
6440 
6441 	kfree(tb);
6442 	return 0;
6443 }
6444 
6445 static int freq_to_idx(struct ath12k *ar, int freq)
6446 {
6447 	struct ieee80211_supported_band *sband;
6448 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6449 	int band, ch, idx = 0;
6450 
6451 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6452 		if (!ar->mac.sbands[band].channels)
6453 			continue;
6454 
6455 		sband = hw->wiphy->bands[band];
6456 		if (!sband)
6457 			continue;
6458 
6459 		for (ch = 0; ch < sband->n_channels; ch++, idx++) {
6460 			if (sband->channels[ch].center_freq <
6461 			    KHZ_TO_MHZ(ar->freq_range.start_freq) ||
6462 			    sband->channels[ch].center_freq >
6463 			    KHZ_TO_MHZ(ar->freq_range.end_freq))
6464 				continue;
6465 
6466 			if (sband->channels[ch].center_freq == freq)
6467 				goto exit;
6468 		}
6469 	}
6470 
6471 exit:
6472 	return idx;
6473 }
6474 
6475 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6476 				    struct wmi_chan_info_event *ch_info_ev)
6477 {
6478 	const void **tb;
6479 	const struct wmi_chan_info_event *ev;
6480 	int ret;
6481 
6482 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6483 	if (IS_ERR(tb)) {
6484 		ret = PTR_ERR(tb);
6485 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6486 		return ret;
6487 	}
6488 
6489 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
6490 	if (!ev) {
6491 		ath12k_warn(ab, "failed to fetch chan info ev");
6492 		kfree(tb);
6493 		return -EPROTO;
6494 	}
6495 
6496 	ch_info_ev->err_code = ev->err_code;
6497 	ch_info_ev->freq = ev->freq;
6498 	ch_info_ev->cmd_flags = ev->cmd_flags;
6499 	ch_info_ev->noise_floor = ev->noise_floor;
6500 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
6501 	ch_info_ev->cycle_count = ev->cycle_count;
6502 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
6503 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
6504 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
6505 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
6506 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
6507 	ch_info_ev->vdev_id = ev->vdev_id;
6508 
6509 	kfree(tb);
6510 	return 0;
6511 }
6512 
6513 static int
6514 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6515 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
6516 {
6517 	const void **tb;
6518 	const struct wmi_pdev_bss_chan_info_event *ev;
6519 	int ret;
6520 
6521 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6522 	if (IS_ERR(tb)) {
6523 		ret = PTR_ERR(tb);
6524 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6525 		return ret;
6526 	}
6527 
6528 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
6529 	if (!ev) {
6530 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
6531 		kfree(tb);
6532 		return -EPROTO;
6533 	}
6534 
6535 	bss_ch_info_ev->pdev_id = ev->pdev_id;
6536 	bss_ch_info_ev->freq = ev->freq;
6537 	bss_ch_info_ev->noise_floor = ev->noise_floor;
6538 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
6539 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
6540 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
6541 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
6542 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
6543 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
6544 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
6545 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
6546 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
6547 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
6548 
6549 	kfree(tb);
6550 	return 0;
6551 }
6552 
6553 static int
6554 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
6555 				      struct wmi_vdev_install_key_complete_arg *arg)
6556 {
6557 	const void **tb;
6558 	const struct wmi_vdev_install_key_compl_event *ev;
6559 	int ret;
6560 
6561 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6562 	if (IS_ERR(tb)) {
6563 		ret = PTR_ERR(tb);
6564 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6565 		return ret;
6566 	}
6567 
6568 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
6569 	if (!ev) {
6570 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
6571 		kfree(tb);
6572 		return -EPROTO;
6573 	}
6574 
6575 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
6576 	arg->macaddr = ev->peer_macaddr.addr;
6577 	arg->key_idx = le32_to_cpu(ev->key_idx);
6578 	arg->key_flags = le32_to_cpu(ev->key_flags);
6579 	arg->status = le32_to_cpu(ev->status);
6580 
6581 	kfree(tb);
6582 	return 0;
6583 }
6584 
6585 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
6586 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
6587 {
6588 	const void **tb;
6589 	const struct wmi_peer_assoc_conf_event *ev;
6590 	int ret;
6591 
6592 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6593 	if (IS_ERR(tb)) {
6594 		ret = PTR_ERR(tb);
6595 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6596 		return ret;
6597 	}
6598 
6599 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
6600 	if (!ev) {
6601 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
6602 		kfree(tb);
6603 		return -EPROTO;
6604 	}
6605 
6606 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
6607 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
6608 
6609 	kfree(tb);
6610 	return 0;
6611 }
6612 
6613 static int
6614 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
6615 			 const struct wmi_pdev_temperature_event *ev)
6616 {
6617 	const void **tb;
6618 	int ret;
6619 
6620 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6621 	if (IS_ERR(tb)) {
6622 		ret = PTR_ERR(tb);
6623 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6624 		return ret;
6625 	}
6626 
6627 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
6628 	if (!ev) {
6629 		ath12k_warn(ab, "failed to fetch pdev temp ev");
6630 		kfree(tb);
6631 		return -EPROTO;
6632 	}
6633 
6634 	kfree(tb);
6635 	return 0;
6636 }
6637 
6638 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
6639 {
6640 	/* try to send pending beacons first. they take priority */
6641 	wake_up(&ab->wmi_ab.tx_credits_wq);
6642 }
6643 
6644 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
6645 {
6646 	const struct wmi_11d_new_cc_event *ev;
6647 	struct ath12k *ar;
6648 	struct ath12k_pdev *pdev;
6649 	const void **tb;
6650 	int ret, i;
6651 
6652 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6653 	if (IS_ERR(tb)) {
6654 		ret = PTR_ERR(tb);
6655 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6656 		return ret;
6657 	}
6658 
6659 	ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
6660 	if (!ev) {
6661 		kfree(tb);
6662 		ath12k_warn(ab, "failed to fetch 11d new cc ev");
6663 		return -EPROTO;
6664 	}
6665 
6666 	spin_lock_bh(&ab->base_lock);
6667 	memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
6668 	spin_unlock_bh(&ab->base_lock);
6669 
6670 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
6671 		   ab->new_alpha2[0],
6672 		   ab->new_alpha2[1]);
6673 
6674 	kfree(tb);
6675 
6676 	for (i = 0; i < ab->num_radios; i++) {
6677 		pdev = &ab->pdevs[i];
6678 		ar = pdev->ar;
6679 		ar->state_11d = ATH12K_11D_IDLE;
6680 		ar->ah->regd_updated = false;
6681 		complete(&ar->completed_11d_scan);
6682 	}
6683 
6684 	queue_work(ab->workqueue, &ab->update_11d_work);
6685 
6686 	return 0;
6687 }
6688 
6689 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
6690 				       struct sk_buff *skb)
6691 {
6692 	dev_kfree_skb(skb);
6693 }
6694 
6695 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
6696 {
6697 	struct ath12k_reg_info *reg_info;
6698 	struct ath12k *ar = NULL;
6699 	u8 pdev_idx = 255;
6700 	int ret;
6701 
6702 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
6703 	if (!reg_info) {
6704 		ret = -ENOMEM;
6705 		goto fallback;
6706 	}
6707 
6708 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
6709 	if (ret) {
6710 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
6711 		goto mem_free;
6712 	}
6713 
6714 	ret = ath12k_reg_validate_reg_info(ab, reg_info);
6715 	if (ret == ATH12K_REG_STATUS_FALLBACK) {
6716 		ath12k_warn(ab, "failed to validate reg info %d\n", ret);
6717 		/* firmware has successfully switches to new regd but host can not
6718 		 * continue, so free reginfo and fallback to old regd
6719 		 */
6720 		goto mem_free;
6721 	} else if (ret == ATH12K_REG_STATUS_DROP) {
6722 		/* reg info is valid but we will not store it and
6723 		 * not going to create new regd for it
6724 		 */
6725 		ret = ATH12K_REG_STATUS_VALID;
6726 		goto mem_free;
6727 	}
6728 
6729 	/* free old reg_info if it exist */
6730 	pdev_idx = reg_info->phy_id;
6731 	if (ab->reg_info[pdev_idx]) {
6732 		ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
6733 		kfree(ab->reg_info[pdev_idx]);
6734 	}
6735 	/* reg_info is valid, we store it for later use
6736 	 * even below regd build failed
6737 	 */
6738 	ab->reg_info[pdev_idx] = reg_info;
6739 
6740 	ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
6741 					  IEEE80211_REG_UNSET_AP);
6742 	if (ret) {
6743 		ath12k_warn(ab, "failed to handle chan list %d\n", ret);
6744 		goto fallback;
6745 	}
6746 
6747 	goto out;
6748 
6749 mem_free:
6750 	ath12k_reg_reset_reg_info(reg_info);
6751 	kfree(reg_info);
6752 
6753 	if (ret == ATH12K_REG_STATUS_VALID)
6754 		goto out;
6755 
6756 fallback:
6757 	/* Fallback to older reg (by sending previous country setting
6758 	 * again if fw has succeeded and we failed to process here.
6759 	 * The Regdomain should be uniform across driver and fw. Since the
6760 	 * FW has processed the command and sent a success status, we expect
6761 	 * this function to succeed as well. If it doesn't, CTRY needs to be
6762 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
6763 	 */
6764 	/* TODO: This is rare, but still should also be handled */
6765 	WARN_ON(1);
6766 
6767 out:
6768 	/* In some error cases, even a valid pdev_idx might not be available */
6769 	if (pdev_idx != 255)
6770 		ar = ab->pdevs[pdev_idx].ar;
6771 
6772 	/* During the boot-time update, 'ar' might not be allocated,
6773 	 * so the completion cannot be marked at that point.
6774 	 * This boot-time update is handled in ath12k_mac_hw_register()
6775 	 * before registering the hardware.
6776 	 */
6777 	if (ar)
6778 		complete_all(&ar->regd_update_completed);
6779 
6780 	return ret;
6781 }
6782 
6783 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
6784 				const void *ptr, void *data)
6785 {
6786 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
6787 	struct wmi_ready_event fixed_param;
6788 	struct ath12k_wmi_mac_addr_params *addr_list;
6789 	struct ath12k_pdev *pdev;
6790 	u32 num_mac_addr;
6791 	int i;
6792 
6793 	switch (tag) {
6794 	case WMI_TAG_READY_EVENT:
6795 		memset(&fixed_param, 0, sizeof(fixed_param));
6796 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
6797 		       min_t(u16, sizeof(fixed_param), len));
6798 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
6799 		rdy_parse->num_extra_mac_addr =
6800 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
6801 
6802 		ether_addr_copy(ab->mac_addr,
6803 				fixed_param.ready_event_min.mac_addr.addr);
6804 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
6805 		ab->wmi_ready = true;
6806 		break;
6807 	case WMI_TAG_ARRAY_FIXED_STRUCT:
6808 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
6809 		num_mac_addr = rdy_parse->num_extra_mac_addr;
6810 
6811 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6812 			break;
6813 
6814 		for (i = 0; i < ab->num_radios; i++) {
6815 			pdev = &ab->pdevs[i];
6816 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6817 		}
6818 		ab->pdevs_macaddr_valid = true;
6819 		break;
6820 	default:
6821 		break;
6822 	}
6823 
6824 	return 0;
6825 }
6826 
6827 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
6828 {
6829 	struct ath12k_wmi_rdy_parse rdy_parse = { };
6830 	int ret;
6831 
6832 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6833 				  ath12k_wmi_rdy_parse, &rdy_parse);
6834 	if (ret) {
6835 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
6836 		return ret;
6837 	}
6838 
6839 	complete(&ab->wmi_ab.unified_ready);
6840 	return 0;
6841 }
6842 
6843 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6844 {
6845 	struct wmi_peer_delete_resp_event peer_del_resp;
6846 	struct ath12k *ar;
6847 
6848 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6849 		ath12k_warn(ab, "failed to extract peer delete resp");
6850 		return;
6851 	}
6852 
6853 	rcu_read_lock();
6854 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
6855 	if (!ar) {
6856 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6857 			    peer_del_resp.vdev_id);
6858 		rcu_read_unlock();
6859 		return;
6860 	}
6861 
6862 	complete(&ar->peer_delete_done);
6863 	rcu_read_unlock();
6864 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6865 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6866 }
6867 
6868 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
6869 					  struct sk_buff *skb)
6870 {
6871 	struct ath12k *ar;
6872 	u32 vdev_id = 0;
6873 
6874 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6875 		ath12k_warn(ab, "failed to extract vdev delete resp");
6876 		return;
6877 	}
6878 
6879 	rcu_read_lock();
6880 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6881 	if (!ar) {
6882 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6883 			    vdev_id);
6884 		rcu_read_unlock();
6885 		return;
6886 	}
6887 
6888 	complete(&ar->vdev_delete_done);
6889 
6890 	rcu_read_unlock();
6891 
6892 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6893 		   vdev_id);
6894 }
6895 
6896 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
6897 {
6898 	switch (vdev_resp_status) {
6899 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6900 		return "invalid vdev id";
6901 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6902 		return "not supported";
6903 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6904 		return "dfs violation";
6905 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6906 		return "invalid regdomain";
6907 	default:
6908 		return "unknown";
6909 	}
6910 }
6911 
6912 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6913 {
6914 	struct wmi_vdev_start_resp_event vdev_start_resp;
6915 	struct ath12k *ar;
6916 	u32 status;
6917 
6918 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6919 		ath12k_warn(ab, "failed to extract vdev start resp");
6920 		return;
6921 	}
6922 
6923 	rcu_read_lock();
6924 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
6925 	if (!ar) {
6926 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6927 			    vdev_start_resp.vdev_id);
6928 		rcu_read_unlock();
6929 		return;
6930 	}
6931 
6932 	ar->last_wmi_vdev_start_status = 0;
6933 
6934 	status = le32_to_cpu(vdev_start_resp.status);
6935 	if (WARN_ON_ONCE(status)) {
6936 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6937 			    status, ath12k_wmi_vdev_resp_print(status));
6938 		ar->last_wmi_vdev_start_status = status;
6939 	}
6940 
6941 	ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
6942 
6943 	complete(&ar->vdev_setup_done);
6944 
6945 	rcu_read_unlock();
6946 
6947 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6948 		   vdev_start_resp.vdev_id);
6949 }
6950 
6951 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6952 {
6953 	u32 vdev_id, tx_status;
6954 
6955 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6956 		ath12k_warn(ab, "failed to extract bcn tx status");
6957 		return;
6958 	}
6959 }
6960 
6961 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6962 {
6963 	struct ath12k *ar;
6964 	u32 vdev_id = 0;
6965 
6966 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6967 		ath12k_warn(ab, "failed to extract vdev stopped event");
6968 		return;
6969 	}
6970 
6971 	rcu_read_lock();
6972 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6973 	if (!ar) {
6974 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6975 			    vdev_id);
6976 		rcu_read_unlock();
6977 		return;
6978 	}
6979 
6980 	complete(&ar->vdev_setup_done);
6981 
6982 	rcu_read_unlock();
6983 
6984 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6985 }
6986 
6987 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6988 {
6989 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6990 	struct ath12k *ar;
6991 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6992 	struct ieee80211_hdr *hdr;
6993 	u16 fc;
6994 	struct ieee80211_supported_band *sband;
6995 	s32 noise_floor;
6996 
6997 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6998 		ath12k_warn(ab, "failed to extract mgmt rx event");
6999 		dev_kfree_skb(skb);
7000 		return;
7001 	}
7002 
7003 	memset(status, 0, sizeof(*status));
7004 
7005 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
7006 		   rx_ev.status);
7007 
7008 	rcu_read_lock();
7009 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
7010 
7011 	if (!ar) {
7012 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
7013 			    rx_ev.pdev_id);
7014 		dev_kfree_skb(skb);
7015 		goto exit;
7016 	}
7017 
7018 	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
7019 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
7020 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
7021 			     WMI_RX_STATUS_ERR_CRC))) {
7022 		dev_kfree_skb(skb);
7023 		goto exit;
7024 	}
7025 
7026 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
7027 		status->flag |= RX_FLAG_MMIC_ERROR;
7028 
7029 	if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
7030 	    rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
7031 		status->band = NL80211_BAND_6GHZ;
7032 		status->freq = rx_ev.chan_freq;
7033 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
7034 		status->band = NL80211_BAND_2GHZ;
7035 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
7036 		status->band = NL80211_BAND_5GHZ;
7037 	} else {
7038 		/* Shouldn't happen unless list of advertised channels to
7039 		 * mac80211 has been changed.
7040 		 */
7041 		WARN_ON_ONCE(1);
7042 		dev_kfree_skb(skb);
7043 		goto exit;
7044 	}
7045 
7046 	if (rx_ev.phy_mode == MODE_11B &&
7047 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
7048 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7049 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
7050 
7051 	sband = &ar->mac.sbands[status->band];
7052 
7053 	if (status->band != NL80211_BAND_6GHZ)
7054 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
7055 							      status->band);
7056 
7057 	spin_lock_bh(&ar->data_lock);
7058 	noise_floor = ath12k_pdev_get_noise_floor(ar);
7059 	spin_unlock_bh(&ar->data_lock);
7060 
7061 	status->signal = rx_ev.snr + noise_floor;
7062 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
7063 
7064 	hdr = (struct ieee80211_hdr *)skb->data;
7065 	fc = le16_to_cpu(hdr->frame_control);
7066 
7067 	/* Firmware is guaranteed to report all essential management frames via
7068 	 * WMI while it can deliver some extra via HTT. Since there can be
7069 	 * duplicates split the reporting wrt monitor/sniffing.
7070 	 */
7071 	status->flag |= RX_FLAG_SKIP_MONITOR;
7072 
7073 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
7074 	 * including group privacy action frames.
7075 	 */
7076 	if (ieee80211_has_protected(hdr->frame_control)) {
7077 		status->flag |= RX_FLAG_DECRYPTED;
7078 
7079 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
7080 			status->flag |= RX_FLAG_IV_STRIPPED |
7081 					RX_FLAG_MMIC_STRIPPED;
7082 			hdr->frame_control = __cpu_to_le16(fc &
7083 					     ~IEEE80211_FCTL_PROTECTED);
7084 		}
7085 	}
7086 
7087 	if (ieee80211_is_beacon(hdr->frame_control))
7088 		ath12k_mac_handle_beacon(ar, skb);
7089 
7090 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7091 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
7092 		   skb, skb->len,
7093 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
7094 
7095 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7096 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
7097 		   status->freq, status->band, status->signal,
7098 		   status->rate_idx);
7099 
7100 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
7101 
7102 exit:
7103 	rcu_read_unlock();
7104 }
7105 
7106 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
7107 {
7108 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
7109 	struct ath12k *ar;
7110 
7111 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
7112 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
7113 		return;
7114 	}
7115 
7116 	rcu_read_lock();
7117 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
7118 	if (!ar) {
7119 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
7120 			    tx_compl_param.pdev_id);
7121 		goto exit;
7122 	}
7123 
7124 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
7125 				 le32_to_cpu(tx_compl_param.status));
7126 
7127 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7128 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
7129 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
7130 		   tx_compl_param.status);
7131 
7132 exit:
7133 	rcu_read_unlock();
7134 }
7135 
7136 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
7137 						  u32 vdev_id,
7138 						  enum ath12k_scan_state state)
7139 {
7140 	int i;
7141 	struct ath12k_pdev *pdev;
7142 	struct ath12k *ar;
7143 
7144 	for (i = 0; i < ab->num_radios; i++) {
7145 		pdev = rcu_dereference(ab->pdevs_active[i]);
7146 		if (pdev && pdev->ar) {
7147 			ar = pdev->ar;
7148 
7149 			spin_lock_bh(&ar->data_lock);
7150 			if (ar->scan.state == state &&
7151 			    ar->scan.arvif &&
7152 			    ar->scan.arvif->vdev_id == vdev_id) {
7153 				spin_unlock_bh(&ar->data_lock);
7154 				return ar;
7155 			}
7156 			spin_unlock_bh(&ar->data_lock);
7157 		}
7158 	}
7159 	return NULL;
7160 }
7161 
7162 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
7163 {
7164 	struct ath12k *ar;
7165 	struct wmi_scan_event scan_ev = {0};
7166 
7167 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
7168 		ath12k_warn(ab, "failed to extract scan event");
7169 		return;
7170 	}
7171 
7172 	rcu_read_lock();
7173 
7174 	/* In case the scan was cancelled, ex. during interface teardown,
7175 	 * the interface will not be found in active interfaces.
7176 	 * Rather, in such scenarios, iterate over the active pdev's to
7177 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
7178 	 * aborting scan's vdev id matches this event info.
7179 	 */
7180 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
7181 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
7182 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7183 						 ATH12K_SCAN_ABORTING);
7184 		if (!ar)
7185 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7186 							 ATH12K_SCAN_RUNNING);
7187 	} else {
7188 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
7189 	}
7190 
7191 	if (!ar) {
7192 		ath12k_warn(ab, "Received scan event for unknown vdev");
7193 		rcu_read_unlock();
7194 		return;
7195 	}
7196 
7197 	spin_lock_bh(&ar->data_lock);
7198 
7199 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7200 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
7201 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
7202 						  le32_to_cpu(scan_ev.reason)),
7203 		   le32_to_cpu(scan_ev.event_type),
7204 		   le32_to_cpu(scan_ev.reason),
7205 		   le32_to_cpu(scan_ev.channel_freq),
7206 		   le32_to_cpu(scan_ev.scan_req_id),
7207 		   le32_to_cpu(scan_ev.scan_id),
7208 		   le32_to_cpu(scan_ev.vdev_id),
7209 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
7210 
7211 	switch (le32_to_cpu(scan_ev.event_type)) {
7212 	case WMI_SCAN_EVENT_STARTED:
7213 		ath12k_wmi_event_scan_started(ar);
7214 		break;
7215 	case WMI_SCAN_EVENT_COMPLETED:
7216 		ath12k_wmi_event_scan_completed(ar);
7217 		break;
7218 	case WMI_SCAN_EVENT_BSS_CHANNEL:
7219 		ath12k_wmi_event_scan_bss_chan(ar);
7220 		break;
7221 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
7222 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
7223 		break;
7224 	case WMI_SCAN_EVENT_START_FAILED:
7225 		ath12k_warn(ab, "received scan start failure event\n");
7226 		ath12k_wmi_event_scan_start_failed(ar);
7227 		break;
7228 	case WMI_SCAN_EVENT_DEQUEUED:
7229 		__ath12k_mac_scan_finish(ar);
7230 		break;
7231 	case WMI_SCAN_EVENT_PREEMPTED:
7232 	case WMI_SCAN_EVENT_RESTARTED:
7233 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
7234 	default:
7235 		break;
7236 	}
7237 
7238 	spin_unlock_bh(&ar->data_lock);
7239 
7240 	rcu_read_unlock();
7241 }
7242 
7243 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
7244 {
7245 	struct wmi_peer_sta_kickout_arg arg = {};
7246 	struct ieee80211_sta *sta;
7247 	struct ath12k_peer *peer;
7248 	struct ath12k *ar;
7249 
7250 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
7251 		ath12k_warn(ab, "failed to extract peer sta kickout event");
7252 		return;
7253 	}
7254 
7255 	rcu_read_lock();
7256 
7257 	spin_lock_bh(&ab->base_lock);
7258 
7259 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
7260 
7261 	if (!peer) {
7262 		ath12k_warn(ab, "peer not found %pM\n",
7263 			    arg.mac_addr);
7264 		goto exit;
7265 	}
7266 
7267 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
7268 	if (!ar) {
7269 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
7270 			    peer->vdev_id);
7271 		goto exit;
7272 	}
7273 
7274 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
7275 					   arg.mac_addr, NULL);
7276 	if (!sta) {
7277 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
7278 			    arg.mac_addr);
7279 		goto exit;
7280 	}
7281 
7282 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
7283 		   arg.mac_addr);
7284 
7285 	ieee80211_report_low_ack(sta, 10);
7286 
7287 exit:
7288 	spin_unlock_bh(&ab->base_lock);
7289 	rcu_read_unlock();
7290 }
7291 
7292 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
7293 {
7294 	struct wmi_roam_event roam_ev = {};
7295 	struct ath12k *ar;
7296 	u32 vdev_id;
7297 	u8 roam_reason;
7298 
7299 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
7300 		ath12k_warn(ab, "failed to extract roam event");
7301 		return;
7302 	}
7303 
7304 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
7305 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
7306 				   WMI_ROAM_REASON_MASK);
7307 
7308 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7309 		   "wmi roam event vdev %u reason %d rssi %d\n",
7310 		   vdev_id, roam_reason, roam_ev.rssi);
7311 
7312 	rcu_read_lock();
7313 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
7314 	if (!ar) {
7315 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
7316 		rcu_read_unlock();
7317 		return;
7318 	}
7319 
7320 	if (roam_reason >= WMI_ROAM_REASON_MAX)
7321 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
7322 			    roam_reason, vdev_id);
7323 
7324 	switch (roam_reason) {
7325 	case WMI_ROAM_REASON_BEACON_MISS:
7326 		ath12k_mac_handle_beacon_miss(ar, vdev_id);
7327 		break;
7328 	case WMI_ROAM_REASON_BETTER_AP:
7329 	case WMI_ROAM_REASON_LOW_RSSI:
7330 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
7331 	case WMI_ROAM_REASON_HO_FAILED:
7332 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
7333 			    roam_reason, vdev_id);
7334 		break;
7335 	}
7336 
7337 	rcu_read_unlock();
7338 }
7339 
7340 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7341 {
7342 	struct wmi_chan_info_event ch_info_ev = {0};
7343 	struct ath12k *ar;
7344 	struct survey_info *survey;
7345 	int idx;
7346 	/* HW channel counters frequency value in hertz */
7347 	u32 cc_freq_hz = ab->cc_freq_hz;
7348 
7349 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
7350 		ath12k_warn(ab, "failed to extract chan info event");
7351 		return;
7352 	}
7353 
7354 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7355 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
7356 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
7357 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
7358 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
7359 		   ch_info_ev.mac_clk_mhz);
7360 
7361 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
7362 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
7363 		return;
7364 	}
7365 
7366 	rcu_read_lock();
7367 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
7368 	if (!ar) {
7369 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
7370 			    ch_info_ev.vdev_id);
7371 		rcu_read_unlock();
7372 		return;
7373 	}
7374 	spin_lock_bh(&ar->data_lock);
7375 
7376 	switch (ar->scan.state) {
7377 	case ATH12K_SCAN_IDLE:
7378 	case ATH12K_SCAN_STARTING:
7379 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
7380 		goto exit;
7381 	case ATH12K_SCAN_RUNNING:
7382 	case ATH12K_SCAN_ABORTING:
7383 		break;
7384 	}
7385 
7386 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
7387 	if (idx >= ARRAY_SIZE(ar->survey)) {
7388 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
7389 			    ch_info_ev.freq, idx);
7390 		goto exit;
7391 	}
7392 
7393 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
7394 	 * HW channel counters frequency value
7395 	 */
7396 	if (ch_info_ev.mac_clk_mhz)
7397 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
7398 
7399 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
7400 		survey = &ar->survey[idx];
7401 		memset(survey, 0, sizeof(*survey));
7402 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
7403 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
7404 				 SURVEY_INFO_TIME_BUSY;
7405 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
7406 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
7407 					    cc_freq_hz);
7408 	}
7409 exit:
7410 	spin_unlock_bh(&ar->data_lock);
7411 	rcu_read_unlock();
7412 }
7413 
7414 static void
7415 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7416 {
7417 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
7418 	struct survey_info *survey;
7419 	struct ath12k *ar;
7420 	u32 cc_freq_hz = ab->cc_freq_hz;
7421 	u64 busy, total, tx, rx, rx_bss;
7422 	int idx;
7423 
7424 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
7425 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
7426 		return;
7427 	}
7428 
7429 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
7430 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
7431 
7432 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
7433 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
7434 
7435 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
7436 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
7437 
7438 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
7439 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
7440 
7441 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
7442 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
7443 
7444 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7445 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
7446 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
7447 		   bss_ch_info_ev.noise_floor, busy, total,
7448 		   tx, rx, rx_bss);
7449 
7450 	rcu_read_lock();
7451 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
7452 
7453 	if (!ar) {
7454 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
7455 			    bss_ch_info_ev.pdev_id);
7456 		rcu_read_unlock();
7457 		return;
7458 	}
7459 
7460 	spin_lock_bh(&ar->data_lock);
7461 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
7462 	if (idx >= ARRAY_SIZE(ar->survey)) {
7463 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
7464 			    bss_ch_info_ev.freq, idx);
7465 		goto exit;
7466 	}
7467 
7468 	survey = &ar->survey[idx];
7469 
7470 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
7471 	survey->time      = div_u64(total, cc_freq_hz);
7472 	survey->time_busy = div_u64(busy, cc_freq_hz);
7473 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
7474 	survey->time_tx   = div_u64(tx, cc_freq_hz);
7475 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
7476 			     SURVEY_INFO_TIME |
7477 			     SURVEY_INFO_TIME_BUSY |
7478 			     SURVEY_INFO_TIME_RX |
7479 			     SURVEY_INFO_TIME_TX);
7480 exit:
7481 	spin_unlock_bh(&ar->data_lock);
7482 	complete(&ar->bss_survey_done);
7483 
7484 	rcu_read_unlock();
7485 }
7486 
7487 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
7488 						struct sk_buff *skb)
7489 {
7490 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
7491 	struct ath12k *ar;
7492 
7493 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
7494 		ath12k_warn(ab, "failed to extract install key compl event");
7495 		return;
7496 	}
7497 
7498 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7499 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
7500 		   install_key_compl.key_idx, install_key_compl.key_flags,
7501 		   install_key_compl.macaddr, install_key_compl.status);
7502 
7503 	rcu_read_lock();
7504 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
7505 	if (!ar) {
7506 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
7507 			    install_key_compl.vdev_id);
7508 		rcu_read_unlock();
7509 		return;
7510 	}
7511 
7512 	ar->install_key_status = 0;
7513 
7514 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
7515 		ath12k_warn(ab, "install key failed for %pM status %d\n",
7516 			    install_key_compl.macaddr, install_key_compl.status);
7517 		ar->install_key_status = install_key_compl.status;
7518 	}
7519 
7520 	complete(&ar->install_key_done);
7521 	rcu_read_unlock();
7522 }
7523 
7524 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
7525 					  u16 tag, u16 len,
7526 					  const void *ptr,
7527 					  void *data)
7528 {
7529 	const struct wmi_service_available_event *ev;
7530 	u32 *wmi_ext2_service_bitmap;
7531 	int i, j;
7532 	u16 expected_len;
7533 
7534 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
7535 	if (len < expected_len) {
7536 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
7537 			    len, tag);
7538 		return -EINVAL;
7539 	}
7540 
7541 	switch (tag) {
7542 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
7543 		ev = (struct wmi_service_available_event *)ptr;
7544 		for (i = 0, j = WMI_MAX_SERVICE;
7545 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
7546 		     i++) {
7547 			do {
7548 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
7549 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7550 					set_bit(j, ab->wmi_ab.svc_map);
7551 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7552 		}
7553 
7554 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7555 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
7556 			   ev->wmi_service_segment_bitmap[0],
7557 			   ev->wmi_service_segment_bitmap[1],
7558 			   ev->wmi_service_segment_bitmap[2],
7559 			   ev->wmi_service_segment_bitmap[3]);
7560 		break;
7561 	case WMI_TAG_ARRAY_UINT32:
7562 		wmi_ext2_service_bitmap = (u32 *)ptr;
7563 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
7564 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
7565 		     i++) {
7566 			do {
7567 				if (wmi_ext2_service_bitmap[i] &
7568 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7569 					set_bit(j, ab->wmi_ab.svc_map);
7570 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7571 		}
7572 
7573 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7574 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
7575 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
7576 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
7577 		break;
7578 	}
7579 	return 0;
7580 }
7581 
7582 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
7583 {
7584 	int ret;
7585 
7586 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7587 				  ath12k_wmi_tlv_services_parser,
7588 				  NULL);
7589 	return ret;
7590 }
7591 
7592 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
7593 {
7594 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
7595 	struct ath12k *ar;
7596 
7597 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
7598 		ath12k_warn(ab, "failed to extract peer assoc conf event");
7599 		return;
7600 	}
7601 
7602 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7603 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
7604 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
7605 
7606 	rcu_read_lock();
7607 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
7608 
7609 	if (!ar) {
7610 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
7611 			    peer_assoc_conf.vdev_id);
7612 		rcu_read_unlock();
7613 		return;
7614 	}
7615 
7616 	complete(&ar->peer_assoc_done);
7617 	rcu_read_unlock();
7618 }
7619 
7620 static void
7621 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
7622 			      struct ath12k_fw_stats *fw_stats,
7623 			      char *buf, u32 *length)
7624 {
7625 	const struct ath12k_fw_stats_vdev *vdev;
7626 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7627 	struct ath12k_link_vif *arvif;
7628 	u32 len = *length;
7629 	u8 *vif_macaddr;
7630 	int i;
7631 
7632 	len += scnprintf(buf + len, buf_len - len, "\n");
7633 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7634 			 "ath12k VDEV stats");
7635 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7636 			 "=================");
7637 
7638 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
7639 		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
7640 		if (!arvif)
7641 			continue;
7642 		vif_macaddr = arvif->ahvif->vif->addr;
7643 
7644 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7645 				 "VDEV ID", vdev->vdev_id);
7646 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7647 				 "VDEV MAC address", vif_macaddr);
7648 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7649 				 "beacon snr", vdev->beacon_snr);
7650 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7651 				 "data snr", vdev->data_snr);
7652 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7653 				 "num rx frames", vdev->num_rx_frames);
7654 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7655 				 "num rts fail", vdev->num_rts_fail);
7656 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7657 				 "num rts success", vdev->num_rts_success);
7658 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7659 				 "num rx err", vdev->num_rx_err);
7660 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7661 				 "num rx discard", vdev->num_rx_discard);
7662 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7663 				 "num tx not acked", vdev->num_tx_not_acked);
7664 
7665 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7666 			len += scnprintf(buf + len, buf_len - len,
7667 					"%25s [%02d] %u\n",
7668 					"num tx frames", i,
7669 					vdev->num_tx_frames[i]);
7670 
7671 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7672 			len += scnprintf(buf + len, buf_len - len,
7673 					"%25s [%02d] %u\n",
7674 					"num tx frames retries", i,
7675 					vdev->num_tx_frames_retries[i]);
7676 
7677 		for (i = 0 ; i < WLAN_MAX_AC; i++)
7678 			len += scnprintf(buf + len, buf_len - len,
7679 					"%25s [%02d] %u\n",
7680 					"num tx frames failures", i,
7681 					vdev->num_tx_frames_failures[i]);
7682 
7683 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7684 			len += scnprintf(buf + len, buf_len - len,
7685 					"%25s [%02d] 0x%08x\n",
7686 					"tx rate history", i,
7687 					vdev->tx_rate_history[i]);
7688 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
7689 			len += scnprintf(buf + len, buf_len - len,
7690 					"%25s [%02d] %u\n",
7691 					"beacon rssi history", i,
7692 					vdev->beacon_rssi_history[i]);
7693 
7694 		len += scnprintf(buf + len, buf_len - len, "\n");
7695 		*length = len;
7696 	}
7697 }
7698 
7699 static void
7700 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
7701 			     struct ath12k_fw_stats *fw_stats,
7702 			     char *buf, u32 *length)
7703 {
7704 	const struct ath12k_fw_stats_bcn *bcn;
7705 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7706 	struct ath12k_link_vif *arvif;
7707 	u32 len = *length;
7708 	size_t num_bcn;
7709 
7710 	num_bcn = list_count_nodes(&fw_stats->bcn);
7711 
7712 	len += scnprintf(buf + len, buf_len - len, "\n");
7713 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
7714 			 "ath12k Beacon stats", num_bcn);
7715 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7716 			 "===================");
7717 
7718 	list_for_each_entry(bcn, &fw_stats->bcn, list) {
7719 		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
7720 		if (!arvif)
7721 			continue;
7722 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7723 				 "VDEV ID", bcn->vdev_id);
7724 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7725 				 "VDEV MAC address", arvif->ahvif->vif->addr);
7726 		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7727 				 "================");
7728 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7729 				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
7730 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7731 				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
7732 
7733 		len += scnprintf(buf + len, buf_len - len, "\n");
7734 		*length = len;
7735 	}
7736 }
7737 
7738 static void
7739 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7740 				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
7741 {
7742 	u32 len = *length;
7743 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7744 
7745 	len = scnprintf(buf + len, buf_len - len, "\n");
7746 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7747 			"ath12k PDEV stats");
7748 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7749 			"=================");
7750 
7751 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7752 			"Channel noise floor", pdev->ch_noise_floor);
7753 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7754 			"Channel TX power", pdev->chan_tx_power);
7755 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7756 			"TX frame count", pdev->tx_frame_count);
7757 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7758 			"RX frame count", pdev->rx_frame_count);
7759 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7760 			"RX clear count", pdev->rx_clear_count);
7761 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7762 			"Cycle count", pdev->cycle_count);
7763 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7764 			"PHY error count", pdev->phy_err_count);
7765 	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
7766 			"soc drop count", fw_soc_drop_cnt);
7767 
7768 	*length = len;
7769 }
7770 
7771 static void
7772 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7773 				 char *buf, u32 *length)
7774 {
7775 	u32 len = *length;
7776 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7777 
7778 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7779 			 "ath12k PDEV TX stats");
7780 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7781 			 "====================");
7782 
7783 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7784 			 "HTT cookies queued", pdev->comp_queued);
7785 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7786 			 "HTT cookies disp.", pdev->comp_delivered);
7787 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7788 			 "MSDU queued", pdev->msdu_enqued);
7789 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7790 			 "MPDU queued", pdev->mpdu_enqued);
7791 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7792 			 "MSDUs dropped", pdev->wmm_drop);
7793 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7794 			 "Local enqued", pdev->local_enqued);
7795 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7796 			 "Local freed", pdev->local_freed);
7797 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7798 			 "HW queued", pdev->hw_queued);
7799 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7800 			 "PPDUs reaped", pdev->hw_reaped);
7801 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7802 			 "Num underruns", pdev->underrun);
7803 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7804 			 "PPDUs cleaned", pdev->tx_abort);
7805 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7806 			 "MPDUs requeued", pdev->mpdus_requed);
7807 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7808 			 "Excessive retries", pdev->tx_ko);
7809 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7810 			 "HW rate", pdev->data_rc);
7811 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7812 			 "Sched self triggers", pdev->self_triggers);
7813 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7814 			 "Dropped due to SW retries",
7815 			 pdev->sw_retry_failure);
7816 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7817 			 "Illegal rate phy errors",
7818 			 pdev->illgl_rate_phy_err);
7819 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7820 			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
7821 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7822 			 "TX timeout", pdev->pdev_tx_timeout);
7823 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7824 			 "PDEV resets", pdev->pdev_resets);
7825 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7826 			 "Stateless TIDs alloc failures",
7827 			 pdev->stateless_tid_alloc_failure);
7828 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7829 			 "PHY underrun", pdev->phy_underrun);
7830 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7831 			 "MPDU is more than txop limit", pdev->txop_ovf);
7832 	*length = len;
7833 }
7834 
7835 static void
7836 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7837 				 char *buf, u32 *length)
7838 {
7839 	u32 len = *length;
7840 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7841 
7842 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7843 			 "ath12k PDEV RX stats");
7844 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7845 			 "====================");
7846 
7847 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7848 			 "Mid PPDU route change",
7849 			 pdev->mid_ppdu_route_change);
7850 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7851 			 "Tot. number of statuses", pdev->status_rcvd);
7852 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7853 			 "Extra frags on rings 0", pdev->r0_frags);
7854 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7855 			 "Extra frags on rings 1", pdev->r1_frags);
7856 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7857 			 "Extra frags on rings 2", pdev->r2_frags);
7858 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7859 			 "Extra frags on rings 3", pdev->r3_frags);
7860 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7861 			 "MSDUs delivered to HTT", pdev->htt_msdus);
7862 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7863 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
7864 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7865 			 "MSDUs delivered to stack", pdev->loc_msdus);
7866 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7867 			 "MPDUs delivered to stack", pdev->loc_mpdus);
7868 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7869 			 "Oversized AMSUs", pdev->oversize_amsdu);
7870 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7871 			 "PHY errors", pdev->phy_errs);
7872 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7873 			 "PHY errors drops", pdev->phy_err_drop);
7874 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7875 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
7876 	*length = len;
7877 }
7878 
7879 static void
7880 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
7881 			      struct ath12k_fw_stats *fw_stats,
7882 			      char *buf, u32 *length)
7883 {
7884 	const struct ath12k_fw_stats_pdev *pdev;
7885 	u32 len = *length;
7886 
7887 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
7888 					struct ath12k_fw_stats_pdev, list);
7889 	if (!pdev) {
7890 		ath12k_warn(ar->ab, "failed to get pdev stats\n");
7891 		return;
7892 	}
7893 
7894 	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
7895 					   ar->ab->fw_soc_drop_count);
7896 	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
7897 	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
7898 
7899 	*length = len;
7900 }
7901 
7902 void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
7903 			      struct ath12k_fw_stats *fw_stats,
7904 			      u32 stats_id, char *buf)
7905 {
7906 	u32 len = 0;
7907 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7908 
7909 	spin_lock_bh(&ar->data_lock);
7910 
7911 	switch (stats_id) {
7912 	case WMI_REQUEST_VDEV_STAT:
7913 		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
7914 		break;
7915 	case WMI_REQUEST_BCN_STAT:
7916 		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
7917 		break;
7918 	case WMI_REQUEST_PDEV_STAT:
7919 		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
7920 		break;
7921 	default:
7922 		break;
7923 	}
7924 
7925 	spin_unlock_bh(&ar->data_lock);
7926 
7927 	if (len >= buf_len)
7928 		buf[len - 1] = 0;
7929 	else
7930 		buf[len] = 0;
7931 
7932 	ath12k_fw_stats_reset(ar);
7933 }
7934 
7935 static void
7936 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
7937 			   struct ath12k_fw_stats_vdev *dst)
7938 {
7939 	int i;
7940 
7941 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7942 	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
7943 	dst->data_snr = le32_to_cpu(src->data_snr);
7944 	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
7945 	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
7946 	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
7947 	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
7948 	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
7949 	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
7950 
7951 	for (i = 0; i < WLAN_MAX_AC; i++)
7952 		dst->num_tx_frames[i] =
7953 			le32_to_cpu(src->num_tx_frames[i]);
7954 
7955 	for (i = 0; i < WLAN_MAX_AC; i++)
7956 		dst->num_tx_frames_retries[i] =
7957 			le32_to_cpu(src->num_tx_frames_retries[i]);
7958 
7959 	for (i = 0; i < WLAN_MAX_AC; i++)
7960 		dst->num_tx_frames_failures[i] =
7961 			le32_to_cpu(src->num_tx_frames_failures[i]);
7962 
7963 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7964 		dst->tx_rate_history[i] =
7965 			le32_to_cpu(src->tx_rate_history[i]);
7966 
7967 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7968 		dst->beacon_rssi_history[i] =
7969 			le32_to_cpu(src->beacon_rssi_history[i]);
7970 }
7971 
7972 static void
7973 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
7974 			  struct ath12k_fw_stats_bcn *dst)
7975 {
7976 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7977 	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
7978 	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
7979 }
7980 
7981 static void
7982 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
7983 				struct ath12k_fw_stats_pdev *dst)
7984 {
7985 	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
7986 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
7987 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
7988 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
7989 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
7990 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
7991 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
7992 }
7993 
7994 static void
7995 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
7996 			      struct ath12k_fw_stats_pdev *dst)
7997 {
7998 	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
7999 	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
8000 	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
8001 	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
8002 	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
8003 	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
8004 	dst->local_freed = a_sle32_to_cpu(src->local_freed);
8005 	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
8006 	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
8007 	dst->underrun = a_sle32_to_cpu(src->underrun);
8008 	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
8009 	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
8010 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
8011 	dst->data_rc = __le32_to_cpu(src->data_rc);
8012 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
8013 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
8014 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
8015 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
8016 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
8017 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
8018 	dst->stateless_tid_alloc_failure =
8019 		__le32_to_cpu(src->stateless_tid_alloc_failure);
8020 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
8021 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
8022 }
8023 
8024 static void
8025 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
8026 			      struct ath12k_fw_stats_pdev *dst)
8027 {
8028 	dst->mid_ppdu_route_change =
8029 		a_sle32_to_cpu(src->mid_ppdu_route_change);
8030 	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
8031 	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
8032 	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
8033 	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
8034 	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
8035 	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
8036 	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
8037 	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
8038 	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
8039 	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
8040 	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
8041 	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
8042 	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
8043 }
8044 
8045 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
8046 					      struct wmi_tlv_fw_stats_parse *parse,
8047 					      const void *ptr,
8048 					      u16 len)
8049 {
8050 	const struct wmi_stats_event *ev = parse->ev;
8051 	struct ath12k_fw_stats *stats = parse->stats;
8052 	struct ath12k *ar;
8053 	struct ath12k_link_vif *arvif;
8054 	struct ieee80211_sta *sta;
8055 	struct ath12k_sta *ahsta;
8056 	struct ath12k_link_sta *arsta;
8057 	int i, ret = 0;
8058 	const void *data = ptr;
8059 
8060 	if (!ev) {
8061 		ath12k_warn(ab, "failed to fetch update stats ev");
8062 		return -EPROTO;
8063 	}
8064 
8065 	if (!stats)
8066 		return -EINVAL;
8067 
8068 	rcu_read_lock();
8069 
8070 	stats->pdev_id = le32_to_cpu(ev->pdev_id);
8071 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
8072 	if (!ar) {
8073 		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
8074 			    le32_to_cpu(ev->pdev_id));
8075 		ret = -EPROTO;
8076 		goto exit;
8077 	}
8078 
8079 	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
8080 		const struct wmi_vdev_stats_params *src;
8081 		struct ath12k_fw_stats_vdev *dst;
8082 
8083 		src = data;
8084 		if (len < sizeof(*src)) {
8085 			ret = -EPROTO;
8086 			goto exit;
8087 		}
8088 
8089 		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
8090 		if (arvif) {
8091 			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
8092 							   arvif->bssid,
8093 							   NULL);
8094 			if (sta) {
8095 				ahsta = ath12k_sta_to_ahsta(sta);
8096 				arsta = &ahsta->deflink;
8097 				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
8098 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8099 					   "wmi stats vdev id %d snr %d\n",
8100 					   src->vdev_id, src->beacon_snr);
8101 			} else {
8102 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8103 					   "not found station bssid %pM for vdev stat\n",
8104 					   arvif->bssid);
8105 			}
8106 		}
8107 
8108 		data += sizeof(*src);
8109 		len -= sizeof(*src);
8110 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8111 		if (!dst)
8112 			continue;
8113 		ath12k_wmi_pull_vdev_stats(src, dst);
8114 		stats->stats_id = WMI_REQUEST_VDEV_STAT;
8115 		list_add_tail(&dst->list, &stats->vdevs);
8116 	}
8117 	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
8118 		const struct ath12k_wmi_bcn_stats_params *src;
8119 		struct ath12k_fw_stats_bcn *dst;
8120 
8121 		src = data;
8122 		if (len < sizeof(*src)) {
8123 			ret = -EPROTO;
8124 			goto exit;
8125 		}
8126 
8127 		data += sizeof(*src);
8128 		len -= sizeof(*src);
8129 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8130 		if (!dst)
8131 			continue;
8132 		ath12k_wmi_pull_bcn_stats(src, dst);
8133 		stats->stats_id = WMI_REQUEST_BCN_STAT;
8134 		list_add_tail(&dst->list, &stats->bcn);
8135 	}
8136 	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
8137 		const struct ath12k_wmi_pdev_stats_params *src;
8138 		struct ath12k_fw_stats_pdev *dst;
8139 
8140 		src = data;
8141 		if (len < sizeof(*src)) {
8142 			ret = -EPROTO;
8143 			goto exit;
8144 		}
8145 
8146 		stats->stats_id = WMI_REQUEST_PDEV_STAT;
8147 
8148 		data += sizeof(*src);
8149 		len -= sizeof(*src);
8150 
8151 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8152 		if (!dst)
8153 			continue;
8154 
8155 		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
8156 		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
8157 		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
8158 		list_add_tail(&dst->list, &stats->pdevs);
8159 	}
8160 
8161 exit:
8162 	rcu_read_unlock();
8163 	return ret;
8164 }
8165 
8166 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
8167 					 u16 tag, u16 len,
8168 					 const void *ptr, void *data)
8169 {
8170 	struct wmi_tlv_fw_stats_parse *parse = data;
8171 	int ret = 0;
8172 
8173 	switch (tag) {
8174 	case WMI_TAG_STATS_EVENT:
8175 		parse->ev = ptr;
8176 		break;
8177 	case WMI_TAG_ARRAY_BYTE:
8178 		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
8179 		break;
8180 	default:
8181 		break;
8182 	}
8183 	return ret;
8184 }
8185 
8186 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
8187 				    struct ath12k_fw_stats *stats)
8188 {
8189 	struct wmi_tlv_fw_stats_parse parse = {};
8190 
8191 	stats->stats_id = 0;
8192 	parse.stats = stats;
8193 
8194 	return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8195 				   ath12k_wmi_tlv_fw_stats_parse,
8196 				   &parse);
8197 }
8198 
8199 static void ath12k_wmi_fw_stats_process(struct ath12k *ar,
8200 					struct ath12k_fw_stats *stats)
8201 {
8202 	struct ath12k_base *ab = ar->ab;
8203 	struct ath12k_pdev *pdev;
8204 	bool is_end = true;
8205 	size_t total_vdevs_started = 0;
8206 	int i;
8207 
8208 	if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
8209 		if (list_empty(&stats->vdevs)) {
8210 			ath12k_warn(ab, "empty vdev stats");
8211 			return;
8212 		}
8213 		/* FW sends all the active VDEV stats irrespective of PDEV,
8214 		 * hence limit until the count of all VDEVs started
8215 		 */
8216 		rcu_read_lock();
8217 		for (i = 0; i < ab->num_radios; i++) {
8218 			pdev = rcu_dereference(ab->pdevs_active[i]);
8219 			if (pdev && pdev->ar)
8220 				total_vdevs_started += pdev->ar->num_started_vdevs;
8221 		}
8222 		rcu_read_unlock();
8223 
8224 		if (total_vdevs_started)
8225 			is_end = ((++ar->fw_stats.num_vdev_recvd) ==
8226 				  total_vdevs_started);
8227 
8228 		list_splice_tail_init(&stats->vdevs,
8229 				      &ar->fw_stats.vdevs);
8230 
8231 		if (is_end)
8232 			complete(&ar->fw_stats_done);
8233 
8234 		return;
8235 	}
8236 
8237 	if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
8238 		if (list_empty(&stats->bcn)) {
8239 			ath12k_warn(ab, "empty beacon stats");
8240 			return;
8241 		}
8242 		/* Mark end until we reached the count of all started VDEVs
8243 		 * within the PDEV
8244 		 */
8245 		if (ar->num_started_vdevs)
8246 			is_end = ((++ar->fw_stats.num_bcn_recvd) ==
8247 				  ar->num_started_vdevs);
8248 
8249 		list_splice_tail_init(&stats->bcn,
8250 				      &ar->fw_stats.bcn);
8251 
8252 		if (is_end)
8253 			complete(&ar->fw_stats_done);
8254 	}
8255 }
8256 
8257 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
8258 {
8259 	struct ath12k_fw_stats stats = {};
8260 	struct ath12k *ar;
8261 	int ret;
8262 
8263 	INIT_LIST_HEAD(&stats.pdevs);
8264 	INIT_LIST_HEAD(&stats.vdevs);
8265 	INIT_LIST_HEAD(&stats.bcn);
8266 
8267 	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
8268 	if (ret) {
8269 		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
8270 		goto free;
8271 	}
8272 
8273 	ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
8274 
8275 	rcu_read_lock();
8276 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
8277 	if (!ar) {
8278 		rcu_read_unlock();
8279 		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
8280 			    stats.pdev_id, ret);
8281 		goto free;
8282 	}
8283 
8284 	spin_lock_bh(&ar->data_lock);
8285 
8286 	/* Handle WMI_REQUEST_PDEV_STAT status update */
8287 	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
8288 		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
8289 		complete(&ar->fw_stats_done);
8290 		goto complete;
8291 	}
8292 
8293 	/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
8294 	ath12k_wmi_fw_stats_process(ar, &stats);
8295 
8296 complete:
8297 	complete(&ar->fw_stats_complete);
8298 	spin_unlock_bh(&ar->data_lock);
8299 	rcu_read_unlock();
8300 
8301 	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
8302 	 * at this point, no need to free the individual list.
8303 	 */
8304 	return;
8305 
8306 free:
8307 	ath12k_fw_stats_free(&stats);
8308 }
8309 
8310 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
8311  * is not part of BDF CTL(Conformance test limits) table entries.
8312  */
8313 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
8314 						 struct sk_buff *skb)
8315 {
8316 	const void **tb;
8317 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
8318 	int ret;
8319 
8320 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8321 	if (IS_ERR(tb)) {
8322 		ret = PTR_ERR(tb);
8323 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8324 		return;
8325 	}
8326 
8327 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
8328 	if (!ev) {
8329 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
8330 		kfree(tb);
8331 		return;
8332 	}
8333 
8334 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8335 		   "pdev ctl failsafe check ev status %d\n",
8336 		   ev->ctl_failsafe_status);
8337 
8338 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
8339 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
8340 	 */
8341 	if (ev->ctl_failsafe_status != 0)
8342 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
8343 			    ev->ctl_failsafe_status);
8344 
8345 	kfree(tb);
8346 }
8347 
8348 static void
8349 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
8350 					  const struct ath12k_wmi_pdev_csa_event *ev,
8351 					  const u32 *vdev_ids)
8352 {
8353 	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
8354 	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
8355 	struct ieee80211_bss_conf *conf;
8356 	struct ath12k_link_vif *arvif;
8357 	struct ath12k_vif *ahvif;
8358 	int i;
8359 
8360 	rcu_read_lock();
8361 	for (i = 0; i < num_vdevs; i++) {
8362 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
8363 
8364 		if (!arvif) {
8365 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
8366 				    vdev_ids[i]);
8367 			continue;
8368 		}
8369 		ahvif = arvif->ahvif;
8370 
8371 		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
8372 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
8373 				    arvif->link_id);
8374 			continue;
8375 		}
8376 
8377 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
8378 		if (!conf) {
8379 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
8380 				    ahvif->vif->addr, arvif->link_id);
8381 			continue;
8382 		}
8383 
8384 		if (!arvif->is_up || !conf->csa_active)
8385 			continue;
8386 
8387 		/* Finish CSA when counter reaches zero */
8388 		if (!current_switch_count) {
8389 			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
8390 			arvif->current_cntdown_counter = 0;
8391 		} else if (current_switch_count > 1) {
8392 			/* If the count in event is not what we expect, don't update the
8393 			 * mac80211 count. Since during beacon Tx failure, count in the
8394 			 * firmware will not decrement and this event will come with the
8395 			 * previous count value again
8396 			 */
8397 			if (current_switch_count != arvif->current_cntdown_counter)
8398 				continue;
8399 
8400 			arvif->current_cntdown_counter =
8401 				ieee80211_beacon_update_cntdwn(ahvif->vif,
8402 							       arvif->link_id);
8403 		}
8404 	}
8405 	rcu_read_unlock();
8406 }
8407 
8408 static void
8409 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
8410 					      struct sk_buff *skb)
8411 {
8412 	const void **tb;
8413 	const struct ath12k_wmi_pdev_csa_event *ev;
8414 	const u32 *vdev_ids;
8415 	int ret;
8416 
8417 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8418 	if (IS_ERR(tb)) {
8419 		ret = PTR_ERR(tb);
8420 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8421 		return;
8422 	}
8423 
8424 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
8425 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
8426 
8427 	if (!ev || !vdev_ids) {
8428 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
8429 		kfree(tb);
8430 		return;
8431 	}
8432 
8433 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8434 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
8435 		   ev->current_switch_count, ev->pdev_id,
8436 		   ev->num_vdevs);
8437 
8438 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
8439 
8440 	kfree(tb);
8441 }
8442 
8443 static void
8444 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
8445 {
8446 	const void **tb;
8447 	struct ath12k_mac_get_any_chanctx_conf_arg arg;
8448 	const struct ath12k_wmi_pdev_radar_event *ev;
8449 	struct ath12k *ar;
8450 	int ret;
8451 
8452 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8453 	if (IS_ERR(tb)) {
8454 		ret = PTR_ERR(tb);
8455 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8456 		return;
8457 	}
8458 
8459 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
8460 
8461 	if (!ev) {
8462 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
8463 		kfree(tb);
8464 		return;
8465 	}
8466 
8467 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8468 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
8469 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
8470 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
8471 		   ev->freq_offset, ev->sidx);
8472 
8473 	rcu_read_lock();
8474 
8475 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
8476 
8477 	if (!ar) {
8478 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
8479 			    ev->pdev_id);
8480 		goto exit;
8481 	}
8482 
8483 	arg.ar = ar;
8484 	arg.chanctx_conf = NULL;
8485 	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
8486 					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
8487 	if (!arg.chanctx_conf) {
8488 		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
8489 		goto exit;
8490 	}
8491 
8492 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
8493 		   ev->pdev_id);
8494 
8495 	if (ar->dfs_block_radar_events)
8496 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
8497 	else
8498 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
8499 
8500 exit:
8501 	rcu_read_unlock();
8502 
8503 	kfree(tb);
8504 }
8505 
8506 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
8507 					  struct sk_buff *skb)
8508 {
8509 	const struct ath12k_wmi_ftm_event *ev;
8510 	const void **tb;
8511 	int ret;
8512 	u16 length;
8513 
8514 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8515 
8516 	if (IS_ERR(tb)) {
8517 		ret = PTR_ERR(tb);
8518 		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
8519 		return;
8520 	}
8521 
8522 	ev = tb[WMI_TAG_ARRAY_BYTE];
8523 	if (!ev) {
8524 		ath12k_warn(ab, "failed to fetch ftm msg\n");
8525 		kfree(tb);
8526 		return;
8527 	}
8528 
8529 	length = skb->len - TLV_HDR_SIZE;
8530 	ath12k_tm_process_event(ab, cmd_id, ev, length);
8531 	kfree(tb);
8532 	tb = NULL;
8533 }
8534 
8535 static void
8536 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
8537 				  struct sk_buff *skb)
8538 {
8539 	struct ath12k *ar;
8540 	struct wmi_pdev_temperature_event ev = {0};
8541 
8542 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
8543 		ath12k_warn(ab, "failed to extract pdev temperature event");
8544 		return;
8545 	}
8546 
8547 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8548 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
8549 
8550 	rcu_read_lock();
8551 
8552 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
8553 	if (!ar) {
8554 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
8555 		goto exit;
8556 	}
8557 
8558 exit:
8559 	rcu_read_unlock();
8560 }
8561 
8562 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
8563 					struct sk_buff *skb)
8564 {
8565 	const void **tb;
8566 	const struct wmi_fils_discovery_event *ev;
8567 	int ret;
8568 
8569 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8570 	if (IS_ERR(tb)) {
8571 		ret = PTR_ERR(tb);
8572 		ath12k_warn(ab,
8573 			    "failed to parse FILS discovery event tlv %d\n",
8574 			    ret);
8575 		return;
8576 	}
8577 
8578 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
8579 	if (!ev) {
8580 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
8581 		kfree(tb);
8582 		return;
8583 	}
8584 
8585 	ath12k_warn(ab,
8586 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
8587 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
8588 
8589 	kfree(tb);
8590 }
8591 
8592 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
8593 					      struct sk_buff *skb)
8594 {
8595 	const void **tb;
8596 	const struct wmi_probe_resp_tx_status_event *ev;
8597 	int ret;
8598 
8599 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8600 	if (IS_ERR(tb)) {
8601 		ret = PTR_ERR(tb);
8602 		ath12k_warn(ab,
8603 			    "failed to parse probe response transmission status event tlv: %d\n",
8604 			    ret);
8605 		return;
8606 	}
8607 
8608 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
8609 	if (!ev) {
8610 		ath12k_warn(ab,
8611 			    "failed to fetch probe response transmission status event");
8612 		kfree(tb);
8613 		return;
8614 	}
8615 
8616 	if (ev->tx_status)
8617 		ath12k_warn(ab,
8618 			    "Probe response transmission failed for vdev_id %u, status %u\n",
8619 			    ev->vdev_id, ev->tx_status);
8620 
8621 	kfree(tb);
8622 }
8623 
8624 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
8625 				    struct sk_buff *skb)
8626 {
8627 	const void **tb;
8628 	const struct wmi_p2p_noa_event *ev;
8629 	const struct ath12k_wmi_p2p_noa_info *noa;
8630 	struct ath12k *ar;
8631 	int ret, vdev_id;
8632 
8633 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8634 	if (IS_ERR(tb)) {
8635 		ret = PTR_ERR(tb);
8636 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
8637 		return ret;
8638 	}
8639 
8640 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
8641 	noa = tb[WMI_TAG_P2P_NOA_INFO];
8642 
8643 	if (!ev || !noa) {
8644 		ret = -EPROTO;
8645 		goto out;
8646 	}
8647 
8648 	vdev_id = __le32_to_cpu(ev->vdev_id);
8649 
8650 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8651 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
8652 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
8653 
8654 	rcu_read_lock();
8655 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
8656 	if (!ar) {
8657 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
8658 			    vdev_id);
8659 		ret = -EINVAL;
8660 		goto unlock;
8661 	}
8662 
8663 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
8664 
8665 	ret = 0;
8666 
8667 unlock:
8668 	rcu_read_unlock();
8669 out:
8670 	kfree(tb);
8671 	return ret;
8672 }
8673 
8674 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
8675 					     struct sk_buff *skb)
8676 {
8677 	const struct wmi_rfkill_state_change_event *ev;
8678 	const void **tb;
8679 	int ret;
8680 
8681 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8682 	if (IS_ERR(tb)) {
8683 		ret = PTR_ERR(tb);
8684 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8685 		return;
8686 	}
8687 
8688 	ev = tb[WMI_TAG_RFKILL_EVENT];
8689 	if (!ev) {
8690 		kfree(tb);
8691 		return;
8692 	}
8693 
8694 	ath12k_dbg(ab, ATH12K_DBG_MAC,
8695 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
8696 		   le32_to_cpu(ev->gpio_pin_num),
8697 		   le32_to_cpu(ev->int_type),
8698 		   le32_to_cpu(ev->radio_state));
8699 
8700 	spin_lock_bh(&ab->base_lock);
8701 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
8702 	spin_unlock_bh(&ab->base_lock);
8703 
8704 	queue_work(ab->workqueue, &ab->rfkill_work);
8705 	kfree(tb);
8706 }
8707 
8708 static void
8709 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
8710 {
8711 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
8712 }
8713 
8714 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
8715 					struct sk_buff *skb)
8716 {
8717 	const void **tb;
8718 	const struct wmi_twt_enable_event *ev;
8719 	int ret;
8720 
8721 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8722 	if (IS_ERR(tb)) {
8723 		ret = PTR_ERR(tb);
8724 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
8725 			    ret);
8726 		return;
8727 	}
8728 
8729 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
8730 	if (!ev) {
8731 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
8732 		goto exit;
8733 	}
8734 
8735 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
8736 		   le32_to_cpu(ev->pdev_id),
8737 		   le32_to_cpu(ev->status));
8738 
8739 exit:
8740 	kfree(tb);
8741 }
8742 
8743 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
8744 					 struct sk_buff *skb)
8745 {
8746 	const void **tb;
8747 	const struct wmi_twt_disable_event *ev;
8748 	int ret;
8749 
8750 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8751 	if (IS_ERR(tb)) {
8752 		ret = PTR_ERR(tb);
8753 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
8754 			    ret);
8755 		return;
8756 	}
8757 
8758 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
8759 	if (!ev) {
8760 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
8761 		goto exit;
8762 	}
8763 
8764 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
8765 		   le32_to_cpu(ev->pdev_id),
8766 		   le32_to_cpu(ev->status));
8767 
8768 exit:
8769 	kfree(tb);
8770 }
8771 
8772 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
8773 					    u16 tag, u16 len,
8774 					    const void *ptr, void *data)
8775 {
8776 	const struct wmi_wow_ev_pg_fault_param *pf_param;
8777 	const struct wmi_wow_ev_param *param;
8778 	struct wmi_wow_ev_arg *arg = data;
8779 	int pf_len;
8780 
8781 	switch (tag) {
8782 	case WMI_TAG_WOW_EVENT_INFO:
8783 		param = ptr;
8784 		arg->wake_reason = le32_to_cpu(param->wake_reason);
8785 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
8786 			   arg->wake_reason, wow_reason(arg->wake_reason));
8787 		break;
8788 
8789 	case WMI_TAG_ARRAY_BYTE:
8790 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
8791 			pf_param = ptr;
8792 			pf_len = le32_to_cpu(pf_param->len);
8793 			if (pf_len > len - sizeof(pf_len) ||
8794 			    pf_len < 0) {
8795 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
8796 					    pf_len);
8797 				return -EINVAL;
8798 			}
8799 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
8800 				   pf_len);
8801 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
8802 					"wow_reason_page_fault packet present",
8803 					"wow_pg_fault ",
8804 					pf_param->data,
8805 					pf_len);
8806 		}
8807 		break;
8808 	default:
8809 		break;
8810 	}
8811 
8812 	return 0;
8813 }
8814 
8815 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
8816 {
8817 	struct wmi_wow_ev_arg arg = { };
8818 	int ret;
8819 
8820 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8821 				  ath12k_wmi_wow_wakeup_host_parse,
8822 				  &arg);
8823 	if (ret) {
8824 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
8825 			    ret);
8826 		return;
8827 	}
8828 
8829 	complete(&ab->wow.wakeup_completed);
8830 }
8831 
8832 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
8833 						struct sk_buff *skb)
8834 {
8835 	const struct wmi_gtk_offload_status_event *ev;
8836 	struct ath12k_link_vif *arvif;
8837 	__be64 replay_ctr_be;
8838 	u64 replay_ctr;
8839 	const void **tb;
8840 	int ret;
8841 
8842 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8843 	if (IS_ERR(tb)) {
8844 		ret = PTR_ERR(tb);
8845 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8846 		return;
8847 	}
8848 
8849 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
8850 	if (!ev) {
8851 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
8852 		kfree(tb);
8853 		return;
8854 	}
8855 
8856 	rcu_read_lock();
8857 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
8858 	if (!arvif) {
8859 		rcu_read_unlock();
8860 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
8861 			    le32_to_cpu(ev->vdev_id));
8862 		kfree(tb);
8863 		return;
8864 	}
8865 
8866 	replay_ctr = le64_to_cpu(ev->replay_ctr);
8867 	arvif->rekey_data.replay_ctr = replay_ctr;
8868 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
8869 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
8870 
8871 	/* supplicant expects big-endian replay counter */
8872 	replay_ctr_be = cpu_to_be64(replay_ctr);
8873 
8874 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
8875 				   (void *)&replay_ctr_be, GFP_ATOMIC);
8876 
8877 	rcu_read_unlock();
8878 
8879 	kfree(tb);
8880 }
8881 
8882 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
8883 						struct sk_buff *skb)
8884 {
8885 	const struct wmi_mlo_setup_complete_event *ev;
8886 	struct ath12k *ar = NULL;
8887 	struct ath12k_pdev *pdev;
8888 	const void **tb;
8889 	int ret, i;
8890 
8891 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8892 	if (IS_ERR(tb)) {
8893 		ret = PTR_ERR(tb);
8894 		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
8895 			    ret);
8896 		return;
8897 	}
8898 
8899 	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
8900 	if (!ev) {
8901 		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
8902 		kfree(tb);
8903 		return;
8904 	}
8905 
8906 	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
8907 		goto skip_lookup;
8908 
8909 	for (i = 0; i < ab->num_radios; i++) {
8910 		pdev = &ab->pdevs[i];
8911 		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
8912 			ar = pdev->ar;
8913 			break;
8914 		}
8915 	}
8916 
8917 skip_lookup:
8918 	if (!ar) {
8919 		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
8920 			    ev->pdev_id, ev->status);
8921 		goto out;
8922 	}
8923 
8924 	ar->mlo_setup_status = le32_to_cpu(ev->status);
8925 	complete(&ar->mlo_setup_done);
8926 
8927 out:
8928 	kfree(tb);
8929 }
8930 
8931 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
8932 					       struct sk_buff *skb)
8933 {
8934 	const struct wmi_mlo_teardown_complete_event *ev;
8935 	const void **tb;
8936 	int ret;
8937 
8938 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8939 	if (IS_ERR(tb)) {
8940 		ret = PTR_ERR(tb);
8941 		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
8942 		return;
8943 	}
8944 
8945 	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
8946 	if (!ev) {
8947 		ath12k_warn(ab, "failed to fetch teardown complete event\n");
8948 		kfree(tb);
8949 		return;
8950 	}
8951 
8952 	kfree(tb);
8953 }
8954 
8955 #ifdef CONFIG_ATH12K_DEBUGFS
8956 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
8957 					    const void *ptr, u16 tag, u16 len,
8958 					    struct wmi_tpc_stats_arg *tpc_stats)
8959 {
8960 	u32 len1, len2, len3, len4;
8961 	s16 *dst_ptr;
8962 	s8 *dst_ptr_ctl;
8963 
8964 	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
8965 	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
8966 	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
8967 	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
8968 
8969 	switch (tpc_stats->event_count) {
8970 	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
8971 		if (len1 > len)
8972 			return -ENOBUFS;
8973 
8974 		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
8975 			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
8976 			memcpy(dst_ptr, ptr, len1);
8977 		}
8978 		break;
8979 	case ATH12K_TPC_STATS_RATES_EVENT1:
8980 		if (len2 > len)
8981 			return -ENOBUFS;
8982 
8983 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
8984 			dst_ptr = tpc_stats->rates_array1.rate_array;
8985 			memcpy(dst_ptr, ptr, len2);
8986 		}
8987 		break;
8988 	case ATH12K_TPC_STATS_RATES_EVENT2:
8989 		if (len3 > len)
8990 			return -ENOBUFS;
8991 
8992 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
8993 			dst_ptr = tpc_stats->rates_array2.rate_array;
8994 			memcpy(dst_ptr, ptr, len3);
8995 		}
8996 		break;
8997 	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
8998 		if (len4 > len)
8999 			return -ENOBUFS;
9000 
9001 		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
9002 			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
9003 			memcpy(dst_ptr_ctl, ptr, len4);
9004 		}
9005 		break;
9006 	}
9007 	return 0;
9008 }
9009 
9010 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
9011 				  struct wmi_tpc_stats_arg *tpc_stats,
9012 				  struct wmi_max_reg_power_fixed_params *ev)
9013 {
9014 	struct wmi_max_reg_power_allowed_arg *reg_pwr;
9015 	u32 total_size;
9016 
9017 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9018 		   "Received reg power array type %d length %d for tpc stats\n",
9019 		   ev->reg_power_type, ev->reg_array_len);
9020 
9021 	switch (le32_to_cpu(ev->reg_power_type)) {
9022 	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
9023 		reg_pwr = &tpc_stats->max_reg_allowed_power;
9024 		break;
9025 	default:
9026 		return -EINVAL;
9027 	}
9028 
9029 	/* Each entry is 2 byte hence multiplying the indices with 2 */
9030 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9031 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
9032 	if (le32_to_cpu(ev->reg_array_len) != total_size) {
9033 		ath12k_warn(ab,
9034 			    "Total size and reg_array_len doesn't match for tpc stats\n");
9035 		return -EINVAL;
9036 	}
9037 
9038 	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
9039 
9040 	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
9041 					 GFP_ATOMIC);
9042 	if (!reg_pwr->reg_pwr_array)
9043 		return -ENOMEM;
9044 
9045 	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
9046 
9047 	return 0;
9048 }
9049 
9050 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
9051 				     struct wmi_tpc_stats_arg *tpc_stats,
9052 				     struct wmi_tpc_rates_array_fixed_params *ev)
9053 {
9054 	struct wmi_tpc_rates_array_arg *rates_array;
9055 	u32 flag = 0, rate_array_len;
9056 
9057 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9058 		   "Received rates array type %d length %d for tpc stats\n",
9059 		   ev->rate_array_type, ev->rate_array_len);
9060 
9061 	switch (le32_to_cpu(ev->rate_array_type)) {
9062 	case ATH12K_TPC_STATS_RATES_ARRAY1:
9063 		rates_array = &tpc_stats->rates_array1;
9064 		flag = WMI_TPC_RATES_ARRAY1;
9065 		break;
9066 	case ATH12K_TPC_STATS_RATES_ARRAY2:
9067 		rates_array = &tpc_stats->rates_array2;
9068 		flag = WMI_TPC_RATES_ARRAY2;
9069 		break;
9070 	default:
9071 		ath12k_warn(ab,
9072 			    "Received invalid type of rates array for tpc stats\n");
9073 		return -EINVAL;
9074 	}
9075 	memcpy(&rates_array->tpc_rates_array, ev,
9076 	       sizeof(struct wmi_tpc_rates_array_fixed_params));
9077 	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
9078 	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
9079 	if (!rates_array->rate_array)
9080 		return -ENOMEM;
9081 
9082 	tpc_stats->tlvs_rcvd |= flag;
9083 	return 0;
9084 }
9085 
9086 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
9087 				      struct wmi_tpc_stats_arg *tpc_stats,
9088 				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
9089 {
9090 	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
9091 	u32 total_size, ctl_array_len, flag = 0;
9092 
9093 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9094 		   "Received ctl array type %d length %d for tpc stats\n",
9095 		   ev->ctl_array_type, ev->ctl_array_len);
9096 
9097 	switch (le32_to_cpu(ev->ctl_array_type)) {
9098 	case ATH12K_TPC_STATS_CTL_ARRAY:
9099 		ctl_array = &tpc_stats->ctl_array;
9100 		flag = WMI_TPC_CTL_PWR_ARRAY;
9101 		break;
9102 	default:
9103 		ath12k_warn(ab,
9104 			    "Received invalid type of ctl pwr table for tpc stats\n");
9105 		return -EINVAL;
9106 	}
9107 
9108 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9109 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
9110 	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
9111 		ath12k_warn(ab,
9112 			    "Total size and ctl_array_len doesn't match for tpc stats\n");
9113 		return -EINVAL;
9114 	}
9115 
9116 	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
9117 	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
9118 	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
9119 	if (!ctl_array->ctl_pwr_table)
9120 		return -ENOMEM;
9121 
9122 	tpc_stats->tlvs_rcvd |= flag;
9123 	return 0;
9124 }
9125 
9126 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
9127 					      u16 tag, u16 len,
9128 					      const void *ptr, void *data)
9129 {
9130 	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
9131 	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
9132 	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
9133 	struct wmi_tpc_stats_arg *tpc_stats = data;
9134 	struct wmi_tpc_config_params *tpc_config;
9135 	int ret = 0;
9136 
9137 	if (!tpc_stats) {
9138 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9139 		return -EINVAL;
9140 	}
9141 
9142 	switch (tag) {
9143 	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
9144 		tpc_config = (struct wmi_tpc_config_params *)ptr;
9145 		memcpy(&tpc_stats->tpc_config, tpc_config,
9146 		       sizeof(struct wmi_tpc_config_params));
9147 		break;
9148 	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
9149 		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
9150 		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
9151 		break;
9152 	case WMI_TAG_TPC_STATS_RATES_ARRAY:
9153 		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
9154 		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
9155 		break;
9156 	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
9157 		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
9158 		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
9159 		break;
9160 	default:
9161 		ath12k_warn(ab,
9162 			    "Received invalid tag for tpc stats in subtlvs\n");
9163 		return -EINVAL;
9164 	}
9165 	return ret;
9166 }
9167 
9168 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
9169 					     u16 tag, u16 len,
9170 					     const void *ptr, void *data)
9171 {
9172 	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
9173 	int ret;
9174 
9175 	switch (tag) {
9176 	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
9177 		ret = 0;
9178 		/* Fixed param is already processed*/
9179 		break;
9180 	case WMI_TAG_ARRAY_STRUCT:
9181 		/* len 0 is expected for array of struct when there
9182 		 * is no content of that type to pack inside that tlv
9183 		 */
9184 		if (len == 0)
9185 			return 0;
9186 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9187 					  ath12k_wmi_tpc_stats_subtlv_parser,
9188 					  tpc_stats);
9189 		break;
9190 	case WMI_TAG_ARRAY_INT16:
9191 		if (len == 0)
9192 			return 0;
9193 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9194 						       WMI_TAG_ARRAY_INT16,
9195 						       len, tpc_stats);
9196 		break;
9197 	case WMI_TAG_ARRAY_BYTE:
9198 		if (len == 0)
9199 			return 0;
9200 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9201 						       WMI_TAG_ARRAY_BYTE,
9202 						       len, tpc_stats);
9203 		break;
9204 	default:
9205 		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
9206 		ret = -EINVAL;
9207 		break;
9208 	}
9209 	return ret;
9210 }
9211 
9212 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
9213 {
9214 	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
9215 
9216 	lockdep_assert_held(&ar->data_lock);
9217 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
9218 	if (tpc_stats) {
9219 		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
9220 		kfree(tpc_stats->rates_array1.rate_array);
9221 		kfree(tpc_stats->rates_array2.rate_array);
9222 		kfree(tpc_stats->ctl_array.ctl_pwr_table);
9223 		kfree(tpc_stats);
9224 		ar->debug.tpc_stats = NULL;
9225 	}
9226 }
9227 
9228 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9229 					 struct sk_buff *skb)
9230 {
9231 	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
9232 	struct wmi_tpc_stats_arg *tpc_stats;
9233 	const struct wmi_tlv *tlv;
9234 	void *ptr = skb->data;
9235 	struct ath12k *ar;
9236 	u16 tlv_tag;
9237 	u32 event_count;
9238 	int ret;
9239 
9240 	if (!skb->data) {
9241 		ath12k_warn(ab, "No data present in tpc stats event\n");
9242 		return;
9243 	}
9244 
9245 	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9246 		ath12k_warn(ab, "TPC stats event size invalid\n");
9247 		return;
9248 	}
9249 
9250 	tlv = (struct wmi_tlv *)ptr;
9251 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9252 	ptr += sizeof(*tlv);
9253 
9254 	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
9255 		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
9256 		return;
9257 	}
9258 
9259 	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
9260 	rcu_read_lock();
9261 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
9262 	if (!ar) {
9263 		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
9264 		rcu_read_unlock();
9265 		return;
9266 	}
9267 	spin_lock_bh(&ar->data_lock);
9268 	if (!ar->debug.tpc_request) {
9269 		/* Event is received either without request or the
9270 		 * timeout, if memory is already allocated free it
9271 		 */
9272 		if (ar->debug.tpc_stats) {
9273 			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
9274 			ath12k_wmi_free_tpc_stats_mem(ar);
9275 		}
9276 		goto unlock;
9277 	}
9278 
9279 	event_count = le32_to_cpu(fixed_param->event_count);
9280 	if (event_count == 0) {
9281 		if (ar->debug.tpc_stats) {
9282 			ath12k_warn(ab,
9283 				    "Invalid tpc memory present\n");
9284 			goto unlock;
9285 		}
9286 		ar->debug.tpc_stats =
9287 			kzalloc(sizeof(struct wmi_tpc_stats_arg),
9288 				GFP_ATOMIC);
9289 		if (!ar->debug.tpc_stats) {
9290 			ath12k_warn(ab,
9291 				    "Failed to allocate memory for tpc stats\n");
9292 			goto unlock;
9293 		}
9294 	}
9295 
9296 	tpc_stats = ar->debug.tpc_stats;
9297 	if (!tpc_stats) {
9298 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9299 		goto unlock;
9300 	}
9301 
9302 	if (!(event_count == 0)) {
9303 		if (event_count != tpc_stats->event_count + 1) {
9304 			ath12k_warn(ab,
9305 				    "Invalid tpc event received\n");
9306 			goto unlock;
9307 		}
9308 	}
9309 	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
9310 	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
9311 	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
9312 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9313 		   "tpc stats event_count %d\n",
9314 		   tpc_stats->event_count);
9315 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9316 				  ath12k_wmi_tpc_stats_event_parser,
9317 				  tpc_stats);
9318 	if (ret) {
9319 		ath12k_wmi_free_tpc_stats_mem(ar);
9320 		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
9321 		goto unlock;
9322 	}
9323 
9324 	if (tpc_stats->end_of_event)
9325 		complete(&ar->debug.tpc_complete);
9326 
9327 unlock:
9328 	spin_unlock_bh(&ar->data_lock);
9329 	rcu_read_unlock();
9330 }
9331 #else
9332 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9333 					 struct sk_buff *skb)
9334 {
9335 }
9336 #endif
9337 
9338 static int
9339 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab,
9340 						u16 tag, u16 len,
9341 						const void *ptr, void *data)
9342 {
9343 	const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info;
9344 	const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info;
9345 	struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data;
9346 	struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg;
9347 	s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM];
9348 	u8 num_20mhz_segments;
9349 	s8 min_nf, *nf_ptr;
9350 	int i, j;
9351 
9352 	switch (tag) {
9353 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO:
9354 		if (len < sizeof(*param_info)) {
9355 			ath12k_warn(ab,
9356 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9357 				    tag, len);
9358 			return -EINVAL;
9359 		}
9360 
9361 		param_info = ptr;
9362 
9363 		param_arg.curr_bw = le32_to_cpu(param_info->curr_bw);
9364 		param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask);
9365 
9366 		/* The received array is actually a 2D byte-array for per chain,
9367 		 * per 20MHz subband. Convert to 2D byte-array
9368 		 */
9369 		nf_ptr = &param_arg.nf_hw_dbm[0][0];
9370 
9371 		for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) {
9372 			nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]);
9373 
9374 			for (j = 0; j < 4; j++) {
9375 				*nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF;
9376 				nf_ptr++;
9377 			}
9378 		}
9379 
9380 		switch (param_arg.curr_bw) {
9381 		case WMI_CHAN_WIDTH_20:
9382 			num_20mhz_segments = 1;
9383 			break;
9384 		case WMI_CHAN_WIDTH_40:
9385 			num_20mhz_segments = 2;
9386 			break;
9387 		case WMI_CHAN_WIDTH_80:
9388 			num_20mhz_segments = 4;
9389 			break;
9390 		case WMI_CHAN_WIDTH_160:
9391 			num_20mhz_segments = 8;
9392 			break;
9393 		case WMI_CHAN_WIDTH_320:
9394 			num_20mhz_segments = 16;
9395 			break;
9396 		default:
9397 			ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event",
9398 				    param_arg.curr_bw);
9399 			/* In error case, still consider the primary 20 MHz segment since
9400 			 * that would be much better than instead of dropping the whole
9401 			 * event
9402 			 */
9403 			num_20mhz_segments = 1;
9404 		}
9405 
9406 		min_nf = ATH12K_DEFAULT_NOISE_FLOOR;
9407 
9408 		for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) {
9409 			if (!(param_arg.curr_rx_chainmask & BIT(i)))
9410 				continue;
9411 
9412 			for (j = 0; j < num_20mhz_segments; j++) {
9413 				if (param_arg.nf_hw_dbm[i][j] < min_nf)
9414 					min_nf = param_arg.nf_hw_dbm[i][j];
9415 			}
9416 		}
9417 
9418 		rssi_info->min_nf_dbm = min_nf;
9419 		rssi_info->nf_dbm_present = true;
9420 		break;
9421 	case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO:
9422 		if (len < sizeof(*temp_info)) {
9423 			ath12k_warn(ab,
9424 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9425 				    tag, len);
9426 			return -EINVAL;
9427 		}
9428 
9429 		temp_info = ptr;
9430 		rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset);
9431 		rssi_info->temp_offset_present = true;
9432 		break;
9433 	default:
9434 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9435 			   "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag);
9436 	}
9437 
9438 	return 0;
9439 }
9440 
9441 static int
9442 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab,
9443 					   u16 tag, u16 len,
9444 					   const void *ptr, void *data)
9445 {
9446 	int ret = 0;
9447 
9448 	switch (tag) {
9449 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM:
9450 		/* Fixed param is already processed*/
9451 		break;
9452 	case WMI_TAG_ARRAY_STRUCT:
9453 		/* len 0 is expected for array of struct when there
9454 		 * is no content of that type inside that tlv
9455 		 */
9456 		if (len == 0)
9457 			return 0;
9458 
9459 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9460 					  ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser,
9461 					  data);
9462 		break;
9463 	default:
9464 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9465 			   "Received invalid tag 0x%x for RSSI dbm conv info event\n",
9466 			   tag);
9467 		break;
9468 	}
9469 
9470 	return ret;
9471 }
9472 
9473 static int
9474 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr,
9475 						  size_t len, int *pdev_id)
9476 {
9477 	struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param;
9478 	const struct wmi_tlv *tlv;
9479 	u16 tlv_tag;
9480 
9481 	if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9482 		ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len);
9483 		return -EINVAL;
9484 	}
9485 
9486 	tlv = (struct wmi_tlv *)ptr;
9487 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9488 	ptr += sizeof(*tlv);
9489 
9490 	if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) {
9491 		ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n");
9492 		return -EINVAL;
9493 	}
9494 
9495 	fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr;
9496 	*pdev_id = le32_to_cpu(fixed_param->pdev_id);
9497 
9498 	return 0;
9499 }
9500 
9501 static void
9502 ath12k_wmi_update_rssi_offsets(struct ath12k *ar,
9503 			       struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info)
9504 {
9505 	struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info;
9506 
9507 	lockdep_assert_held(&ar->data_lock);
9508 
9509 	if (rssi_info->temp_offset_present)
9510 		info->temp_offset = rssi_info->temp_offset;
9511 
9512 	if (rssi_info->nf_dbm_present)
9513 		info->min_nf_dbm = rssi_info->min_nf_dbm;
9514 
9515 	info->noise_floor = info->min_nf_dbm + info->temp_offset;
9516 }
9517 
9518 static void
9519 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab,
9520 						 struct sk_buff *skb)
9521 {
9522 	struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info;
9523 	struct ath12k *ar;
9524 	s32 noise_floor;
9525 	u32 pdev_id;
9526 	int ret;
9527 
9528 	ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len,
9529 								&pdev_id);
9530 	if (ret) {
9531 		ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n",
9532 			    ret);
9533 		return;
9534 	}
9535 
9536 	rcu_read_lock();
9537 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
9538 	/* If pdev is not active, ignore the event */
9539 	if (!ar)
9540 		goto out_unlock;
9541 
9542 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9543 				  ath12k_wmi_rssi_dbm_conv_info_event_parser,
9544 				  &rssi_info);
9545 	if (ret) {
9546 		ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n");
9547 		goto out_unlock;
9548 	}
9549 
9550 	spin_lock_bh(&ar->data_lock);
9551 	ath12k_wmi_update_rssi_offsets(ar, &rssi_info);
9552 	noise_floor = ath12k_pdev_get_noise_floor(ar);
9553 	spin_unlock_bh(&ar->data_lock);
9554 
9555 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9556 		   "RSSI noise floor updated, new value is %d dbm\n", noise_floor);
9557 out_unlock:
9558 	rcu_read_unlock();
9559 }
9560 
9561 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
9562 {
9563 	struct wmi_cmd_hdr *cmd_hdr;
9564 	enum wmi_tlv_event_id id;
9565 
9566 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
9567 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
9568 
9569 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
9570 		goto out;
9571 
9572 	switch (id) {
9573 		/* Process all the WMI events here */
9574 	case WMI_SERVICE_READY_EVENTID:
9575 		ath12k_service_ready_event(ab, skb);
9576 		break;
9577 	case WMI_SERVICE_READY_EXT_EVENTID:
9578 		ath12k_service_ready_ext_event(ab, skb);
9579 		break;
9580 	case WMI_SERVICE_READY_EXT2_EVENTID:
9581 		ath12k_service_ready_ext2_event(ab, skb);
9582 		break;
9583 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
9584 		ath12k_reg_chan_list_event(ab, skb);
9585 		break;
9586 	case WMI_READY_EVENTID:
9587 		ath12k_ready_event(ab, skb);
9588 		break;
9589 	case WMI_PEER_DELETE_RESP_EVENTID:
9590 		ath12k_peer_delete_resp_event(ab, skb);
9591 		break;
9592 	case WMI_VDEV_START_RESP_EVENTID:
9593 		ath12k_vdev_start_resp_event(ab, skb);
9594 		break;
9595 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
9596 		ath12k_bcn_tx_status_event(ab, skb);
9597 		break;
9598 	case WMI_VDEV_STOPPED_EVENTID:
9599 		ath12k_vdev_stopped_event(ab, skb);
9600 		break;
9601 	case WMI_MGMT_RX_EVENTID:
9602 		ath12k_mgmt_rx_event(ab, skb);
9603 		/* mgmt_rx_event() owns the skb now! */
9604 		return;
9605 	case WMI_MGMT_TX_COMPLETION_EVENTID:
9606 		ath12k_mgmt_tx_compl_event(ab, skb);
9607 		break;
9608 	case WMI_SCAN_EVENTID:
9609 		ath12k_scan_event(ab, skb);
9610 		break;
9611 	case WMI_PEER_STA_KICKOUT_EVENTID:
9612 		ath12k_peer_sta_kickout_event(ab, skb);
9613 		break;
9614 	case WMI_ROAM_EVENTID:
9615 		ath12k_roam_event(ab, skb);
9616 		break;
9617 	case WMI_CHAN_INFO_EVENTID:
9618 		ath12k_chan_info_event(ab, skb);
9619 		break;
9620 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
9621 		ath12k_pdev_bss_chan_info_event(ab, skb);
9622 		break;
9623 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
9624 		ath12k_vdev_install_key_compl_event(ab, skb);
9625 		break;
9626 	case WMI_SERVICE_AVAILABLE_EVENTID:
9627 		ath12k_service_available_event(ab, skb);
9628 		break;
9629 	case WMI_PEER_ASSOC_CONF_EVENTID:
9630 		ath12k_peer_assoc_conf_event(ab, skb);
9631 		break;
9632 	case WMI_UPDATE_STATS_EVENTID:
9633 		ath12k_update_stats_event(ab, skb);
9634 		break;
9635 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
9636 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
9637 		break;
9638 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
9639 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
9640 		break;
9641 	case WMI_PDEV_TEMPERATURE_EVENTID:
9642 		ath12k_wmi_pdev_temperature_event(ab, skb);
9643 		break;
9644 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
9645 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
9646 		break;
9647 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
9648 		ath12k_fils_discovery_event(ab, skb);
9649 		break;
9650 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
9651 		ath12k_probe_resp_tx_status_event(ab, skb);
9652 		break;
9653 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
9654 		ath12k_rfkill_state_change_event(ab, skb);
9655 		break;
9656 	case WMI_TWT_ENABLE_EVENTID:
9657 		ath12k_wmi_twt_enable_event(ab, skb);
9658 		break;
9659 	case WMI_TWT_DISABLE_EVENTID:
9660 		ath12k_wmi_twt_disable_event(ab, skb);
9661 		break;
9662 	case WMI_P2P_NOA_EVENTID:
9663 		ath12k_wmi_p2p_noa_event(ab, skb);
9664 		break;
9665 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
9666 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
9667 		break;
9668 	case WMI_VDEV_DELETE_RESP_EVENTID:
9669 		ath12k_vdev_delete_resp_event(ab, skb);
9670 		break;
9671 	case WMI_DIAG_EVENTID:
9672 		ath12k_wmi_diag_event(ab, skb);
9673 		break;
9674 	case WMI_WOW_WAKEUP_HOST_EVENTID:
9675 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
9676 		break;
9677 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
9678 		ath12k_wmi_gtk_offload_status_event(ab, skb);
9679 		break;
9680 	case WMI_MLO_SETUP_COMPLETE_EVENTID:
9681 		ath12k_wmi_event_mlo_setup_complete(ab, skb);
9682 		break;
9683 	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
9684 		ath12k_wmi_event_teardown_complete(ab, skb);
9685 		break;
9686 	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
9687 		ath12k_wmi_process_tpc_stats(ab, skb);
9688 		break;
9689 	case WMI_11D_NEW_COUNTRY_EVENTID:
9690 		ath12k_reg_11d_new_cc_event(ab, skb);
9691 		break;
9692 	case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID:
9693 		ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb);
9694 		break;
9695 	/* add Unsupported events (rare) here */
9696 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
9697 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
9698 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
9699 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9700 			   "ignoring unsupported event 0x%x\n", id);
9701 		break;
9702 	/* add Unsupported events (frequent) here */
9703 	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
9704 	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
9705 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
9706 		/* debug might flood hence silently ignore (no-op) */
9707 		break;
9708 	case WMI_PDEV_UTF_EVENTID:
9709 		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
9710 			ath12k_tm_wmi_event_segmented(ab, id, skb);
9711 		else
9712 			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
9713 		break;
9714 	default:
9715 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
9716 		break;
9717 	}
9718 
9719 out:
9720 	dev_kfree_skb(skb);
9721 }
9722 
9723 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
9724 					   u32 pdev_idx)
9725 {
9726 	int status;
9727 	static const u32 svc_id[] = {
9728 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
9729 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
9730 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
9731 	};
9732 	struct ath12k_htc_svc_conn_req conn_req = {};
9733 	struct ath12k_htc_svc_conn_resp conn_resp = {};
9734 
9735 	/* these fields are the same for all service endpoints */
9736 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
9737 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
9738 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
9739 
9740 	/* connect to control service */
9741 	conn_req.service_id = svc_id[pdev_idx];
9742 
9743 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
9744 	if (status) {
9745 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
9746 			    status);
9747 		return status;
9748 	}
9749 
9750 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
9751 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
9752 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
9753 
9754 	return 0;
9755 }
9756 
9757 static int
9758 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
9759 			      struct wmi_unit_test_cmd ut_cmd,
9760 			      u32 *test_args)
9761 {
9762 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9763 	struct wmi_unit_test_cmd *cmd;
9764 	struct sk_buff *skb;
9765 	struct wmi_tlv *tlv;
9766 	void *ptr;
9767 	u32 *ut_cmd_args;
9768 	int buf_len, arg_len;
9769 	int ret;
9770 	int i;
9771 
9772 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
9773 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
9774 
9775 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9776 	if (!skb)
9777 		return -ENOMEM;
9778 
9779 	cmd = (struct wmi_unit_test_cmd *)skb->data;
9780 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
9781 						 sizeof(ut_cmd));
9782 
9783 	cmd->vdev_id = ut_cmd.vdev_id;
9784 	cmd->module_id = ut_cmd.module_id;
9785 	cmd->num_args = ut_cmd.num_args;
9786 	cmd->diag_token = ut_cmd.diag_token;
9787 
9788 	ptr = skb->data + sizeof(ut_cmd);
9789 
9790 	tlv = ptr;
9791 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
9792 
9793 	ptr += TLV_HDR_SIZE;
9794 
9795 	ut_cmd_args = ptr;
9796 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
9797 		ut_cmd_args[i] = test_args[i];
9798 
9799 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9800 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
9801 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
9802 		   cmd->diag_token);
9803 
9804 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
9805 
9806 	if (ret) {
9807 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
9808 			    ret);
9809 		dev_kfree_skb(skb);
9810 	}
9811 
9812 	return ret;
9813 }
9814 
9815 int ath12k_wmi_simulate_radar(struct ath12k *ar)
9816 {
9817 	struct ath12k_link_vif *arvif;
9818 	u32 dfs_args[DFS_MAX_TEST_ARGS];
9819 	struct wmi_unit_test_cmd wmi_ut;
9820 	bool arvif_found = false;
9821 
9822 	list_for_each_entry(arvif, &ar->arvifs, list) {
9823 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
9824 			arvif_found = true;
9825 			break;
9826 		}
9827 	}
9828 
9829 	if (!arvif_found)
9830 		return -EINVAL;
9831 
9832 	dfs_args[DFS_TEST_CMDID] = 0;
9833 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
9834 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
9835 	 * freq offset (b3 - b10) to unit test. For simulation
9836 	 * purpose this can be set to 0 which is valid.
9837 	 */
9838 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
9839 
9840 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
9841 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
9842 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
9843 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
9844 
9845 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
9846 
9847 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
9848 }
9849 
9850 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
9851 				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
9852 {
9853 	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
9854 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9855 	struct sk_buff *skb;
9856 	struct wmi_tlv *tlv;
9857 	__le32 *pdev_id;
9858 	u32 buf_len;
9859 	void *ptr;
9860 	int ret;
9861 
9862 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
9863 
9864 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9865 	if (!skb)
9866 		return -ENOMEM;
9867 	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
9868 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
9869 						 sizeof(*cmd));
9870 
9871 	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
9872 	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
9873 	cmd->subid = cpu_to_le32(tpc_stats_type);
9874 
9875 	ptr = skb->data + sizeof(*cmd);
9876 
9877 	/* The below TLV arrays optionally follow this fixed param TLV structure
9878 	 * 1. ARRAY_UINT32 pdev_ids[]
9879 	 *      If this array is present and non-zero length, stats should only
9880 	 *      be provided from the pdevs identified in the array.
9881 	 * 2. ARRAY_UNIT32 vdev_ids[]
9882 	 *      If this array is present and non-zero length, stats should only
9883 	 *      be provided from the vdevs identified in the array.
9884 	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
9885 	 *      If this array is present and non-zero length, stats should only
9886 	 *      be provided from the peers with the MAC addresses specified
9887 	 *      in the array
9888 	 */
9889 	tlv = ptr;
9890 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9891 	ptr += TLV_HDR_SIZE;
9892 
9893 	pdev_id = ptr;
9894 	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
9895 	ptr += sizeof(*pdev_id);
9896 
9897 	tlv = ptr;
9898 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9899 	ptr += TLV_HDR_SIZE;
9900 
9901 	tlv = ptr;
9902 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
9903 	ptr += TLV_HDR_SIZE;
9904 
9905 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
9906 	if (ret) {
9907 		ath12k_warn(ar->ab,
9908 			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
9909 		dev_kfree_skb(skb);
9910 		return ret;
9911 	}
9912 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
9913 		   ar->pdev->pdev_id);
9914 
9915 	return ret;
9916 }
9917 
9918 int ath12k_wmi_connect(struct ath12k_base *ab)
9919 {
9920 	u32 i;
9921 	u8 wmi_ep_count;
9922 
9923 	wmi_ep_count = ab->htc.wmi_ep_count;
9924 	if (wmi_ep_count > ab->hw_params->max_radios)
9925 		return -1;
9926 
9927 	for (i = 0; i < wmi_ep_count; i++)
9928 		ath12k_connect_pdev_htc_service(ab, i);
9929 
9930 	return 0;
9931 }
9932 
9933 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
9934 {
9935 	if (WARN_ON(pdev_id >= MAX_RADIOS))
9936 		return;
9937 
9938 	/* TODO: Deinit any pdev specific wmi resource */
9939 }
9940 
9941 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
9942 			   u8 pdev_id)
9943 {
9944 	struct ath12k_wmi_pdev *wmi_handle;
9945 
9946 	if (pdev_id >= ab->hw_params->max_radios)
9947 		return -EINVAL;
9948 
9949 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
9950 
9951 	wmi_handle->wmi_ab = &ab->wmi_ab;
9952 
9953 	ab->wmi_ab.ab = ab;
9954 	/* TODO: Init remaining resource specific to pdev */
9955 
9956 	return 0;
9957 }
9958 
9959 int ath12k_wmi_attach(struct ath12k_base *ab)
9960 {
9961 	int ret;
9962 
9963 	ret = ath12k_wmi_pdev_attach(ab, 0);
9964 	if (ret)
9965 		return ret;
9966 
9967 	ab->wmi_ab.ab = ab;
9968 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
9969 
9970 	/* It's overwritten when service_ext_ready is handled */
9971 	if (ab->hw_params->single_pdev_only)
9972 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
9973 
9974 	/* TODO: Init remaining wmi soc resources required */
9975 	init_completion(&ab->wmi_ab.service_ready);
9976 	init_completion(&ab->wmi_ab.unified_ready);
9977 
9978 	return 0;
9979 }
9980 
9981 void ath12k_wmi_detach(struct ath12k_base *ab)
9982 {
9983 	int i;
9984 
9985 	/* TODO: Deinit wmi resource specific to SOC as required */
9986 
9987 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
9988 		ath12k_wmi_pdev_detach(ab, i);
9989 
9990 	ath12k_wmi_free_dbring_caps(ab);
9991 }
9992 
9993 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
9994 {
9995 	struct wmi_hw_data_filter_cmd *cmd;
9996 	struct sk_buff *skb;
9997 	int len;
9998 
9999 	len = sizeof(*cmd);
10000 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10001 
10002 	if (!skb)
10003 		return -ENOMEM;
10004 
10005 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
10006 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
10007 						 sizeof(*cmd));
10008 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
10009 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
10010 
10011 	/* Set all modes in case of disable */
10012 	if (arg->enable)
10013 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
10014 	else
10015 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
10016 
10017 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10018 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
10019 		   arg->enable, arg->hw_filter_bitmap);
10020 
10021 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
10022 }
10023 
10024 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
10025 {
10026 	struct wmi_wow_host_wakeup_cmd *cmd;
10027 	struct sk_buff *skb;
10028 	size_t len;
10029 
10030 	len = sizeof(*cmd);
10031 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10032 	if (!skb)
10033 		return -ENOMEM;
10034 
10035 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
10036 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
10037 						 sizeof(*cmd));
10038 
10039 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
10040 
10041 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
10042 }
10043 
10044 int ath12k_wmi_wow_enable(struct ath12k *ar)
10045 {
10046 	struct wmi_wow_enable_cmd *cmd;
10047 	struct sk_buff *skb;
10048 	int len;
10049 
10050 	len = sizeof(*cmd);
10051 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10052 	if (!skb)
10053 		return -ENOMEM;
10054 
10055 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
10056 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
10057 						 sizeof(*cmd));
10058 
10059 	cmd->enable = cpu_to_le32(1);
10060 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
10061 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
10062 
10063 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
10064 }
10065 
10066 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
10067 				    enum wmi_wow_wakeup_event event,
10068 				    u32 enable)
10069 {
10070 	struct wmi_wow_add_del_event_cmd *cmd;
10071 	struct sk_buff *skb;
10072 	size_t len;
10073 
10074 	len = sizeof(*cmd);
10075 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10076 	if (!skb)
10077 		return -ENOMEM;
10078 
10079 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
10080 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
10081 						 sizeof(*cmd));
10082 	cmd->vdev_id = cpu_to_le32(vdev_id);
10083 	cmd->is_add = cpu_to_le32(enable);
10084 	cmd->event_bitmap = cpu_to_le32((1 << event));
10085 
10086 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
10087 		   wow_wakeup_event(event), enable, vdev_id);
10088 
10089 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
10090 }
10091 
10092 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
10093 			       const u8 *pattern, const u8 *mask,
10094 			       int pattern_len, int pattern_offset)
10095 {
10096 	struct wmi_wow_add_pattern_cmd *cmd;
10097 	struct wmi_wow_bitmap_pattern_params *bitmap;
10098 	struct wmi_tlv *tlv;
10099 	struct sk_buff *skb;
10100 	void *ptr;
10101 	size_t len;
10102 
10103 	len = sizeof(*cmd) +
10104 	      sizeof(*tlv) +			/* array struct */
10105 	      sizeof(*bitmap) +			/* bitmap */
10106 	      sizeof(*tlv) +			/* empty ipv4 sync */
10107 	      sizeof(*tlv) +			/* empty ipv6 sync */
10108 	      sizeof(*tlv) +			/* empty magic */
10109 	      sizeof(*tlv) +			/* empty info timeout */
10110 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
10111 
10112 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10113 	if (!skb)
10114 		return -ENOMEM;
10115 
10116 	/* cmd */
10117 	ptr = skb->data;
10118 	cmd = ptr;
10119 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
10120 						 sizeof(*cmd));
10121 	cmd->vdev_id = cpu_to_le32(vdev_id);
10122 	cmd->pattern_id = cpu_to_le32(pattern_id);
10123 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10124 
10125 	ptr += sizeof(*cmd);
10126 
10127 	/* bitmap */
10128 	tlv = ptr;
10129 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
10130 
10131 	ptr += sizeof(*tlv);
10132 
10133 	bitmap = ptr;
10134 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
10135 						    sizeof(*bitmap));
10136 	memcpy(bitmap->patternbuf, pattern, pattern_len);
10137 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
10138 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
10139 	bitmap->pattern_len = cpu_to_le32(pattern_len);
10140 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
10141 	bitmap->pattern_id = cpu_to_le32(pattern_id);
10142 
10143 	ptr += sizeof(*bitmap);
10144 
10145 	/* ipv4 sync */
10146 	tlv = ptr;
10147 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10148 
10149 	ptr += sizeof(*tlv);
10150 
10151 	/* ipv6 sync */
10152 	tlv = ptr;
10153 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10154 
10155 	ptr += sizeof(*tlv);
10156 
10157 	/* magic */
10158 	tlv = ptr;
10159 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10160 
10161 	ptr += sizeof(*tlv);
10162 
10163 	/* pattern info timeout */
10164 	tlv = ptr;
10165 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10166 
10167 	ptr += sizeof(*tlv);
10168 
10169 	/* ratelimit interval */
10170 	tlv = ptr;
10171 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
10172 
10173 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
10174 		   vdev_id, pattern_id, pattern_offset, pattern_len);
10175 
10176 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
10177 			bitmap->patternbuf, pattern_len);
10178 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
10179 			bitmap->bitmaskbuf, pattern_len);
10180 
10181 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
10182 }
10183 
10184 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
10185 {
10186 	struct wmi_wow_del_pattern_cmd *cmd;
10187 	struct sk_buff *skb;
10188 	size_t len;
10189 
10190 	len = sizeof(*cmd);
10191 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10192 	if (!skb)
10193 		return -ENOMEM;
10194 
10195 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
10196 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
10197 						 sizeof(*cmd));
10198 	cmd->vdev_id = cpu_to_le32(vdev_id);
10199 	cmd->pattern_id = cpu_to_le32(pattern_id);
10200 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10201 
10202 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
10203 		   vdev_id, pattern_id);
10204 
10205 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
10206 }
10207 
10208 static struct sk_buff *
10209 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
10210 				   struct wmi_pno_scan_req_arg *pno)
10211 {
10212 	struct nlo_configured_params *nlo_list;
10213 	size_t len, nlo_list_len, channel_list_len;
10214 	struct wmi_wow_nlo_config_cmd *cmd;
10215 	__le32 *channel_list;
10216 	struct wmi_tlv *tlv;
10217 	struct sk_buff *skb;
10218 	void *ptr;
10219 	u32 i;
10220 
10221 	len = sizeof(*cmd) +
10222 	      sizeof(*tlv) +
10223 	      /* TLV place holder for array of structures
10224 	       * nlo_configured_params(nlo_list)
10225 	       */
10226 	      sizeof(*tlv);
10227 	      /* TLV place holder for array of uint32 channel_list */
10228 
10229 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
10230 	len += channel_list_len;
10231 
10232 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
10233 	len += nlo_list_len;
10234 
10235 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10236 	if (!skb)
10237 		return ERR_PTR(-ENOMEM);
10238 
10239 	ptr = skb->data;
10240 	cmd = ptr;
10241 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
10242 
10243 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
10244 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
10245 
10246 	/* current FW does not support min-max range for dwell time */
10247 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
10248 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
10249 
10250 	if (pno->do_passive_scan)
10251 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
10252 
10253 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
10254 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
10255 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
10256 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
10257 
10258 	if (pno->enable_pno_scan_randomization) {
10259 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
10260 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
10261 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
10262 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
10263 	}
10264 
10265 	ptr += sizeof(*cmd);
10266 
10267 	/* nlo_configured_params(nlo_list) */
10268 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
10269 	tlv = ptr;
10270 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
10271 
10272 	ptr += sizeof(*tlv);
10273 	nlo_list = ptr;
10274 	for (i = 0; i < pno->uc_networks_count; i++) {
10275 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
10276 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
10277 						     sizeof(*nlo_list));
10278 
10279 		nlo_list[i].ssid.valid = cpu_to_le32(1);
10280 		nlo_list[i].ssid.ssid.ssid_len =
10281 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
10282 		memcpy(nlo_list[i].ssid.ssid.ssid,
10283 		       pno->a_networks[i].ssid.ssid,
10284 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
10285 
10286 		if (pno->a_networks[i].rssi_threshold &&
10287 		    pno->a_networks[i].rssi_threshold > -300) {
10288 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
10289 			nlo_list[i].rssi_cond.rssi =
10290 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
10291 		}
10292 
10293 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
10294 		nlo_list[i].bcast_nw_type.bcast_nw_type =
10295 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
10296 	}
10297 
10298 	ptr += nlo_list_len;
10299 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
10300 	tlv = ptr;
10301 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
10302 	ptr += sizeof(*tlv);
10303 	channel_list = ptr;
10304 
10305 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
10306 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
10307 
10308 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
10309 		   vdev_id);
10310 
10311 	return skb;
10312 }
10313 
10314 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
10315 							 u32 vdev_id)
10316 {
10317 	struct wmi_wow_nlo_config_cmd *cmd;
10318 	struct sk_buff *skb;
10319 	size_t len;
10320 
10321 	len = sizeof(*cmd);
10322 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10323 	if (!skb)
10324 		return ERR_PTR(-ENOMEM);
10325 
10326 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
10327 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
10328 
10329 	cmd->vdev_id = cpu_to_le32(vdev_id);
10330 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
10331 
10332 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10333 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
10334 	return skb;
10335 }
10336 
10337 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
10338 			      struct wmi_pno_scan_req_arg  *pno_scan)
10339 {
10340 	struct sk_buff *skb;
10341 
10342 	if (pno_scan->enable)
10343 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
10344 	else
10345 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
10346 
10347 	if (IS_ERR_OR_NULL(skb))
10348 		return -ENOMEM;
10349 
10350 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
10351 }
10352 
10353 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
10354 				       struct wmi_arp_ns_offload_arg *offload,
10355 				       void **ptr,
10356 				       bool enable,
10357 				       bool ext)
10358 {
10359 	struct wmi_ns_offload_params *ns;
10360 	struct wmi_tlv *tlv;
10361 	void *buf_ptr = *ptr;
10362 	u32 ns_cnt, ns_ext_tuples;
10363 	int i, max_offloads;
10364 
10365 	ns_cnt = offload->ipv6_count;
10366 
10367 	tlv  = buf_ptr;
10368 
10369 	if (ext) {
10370 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
10371 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10372 						 ns_ext_tuples * sizeof(*ns));
10373 		i = WMI_MAX_NS_OFFLOADS;
10374 		max_offloads = offload->ipv6_count;
10375 	} else {
10376 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10377 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
10378 		i = 0;
10379 		max_offloads = WMI_MAX_NS_OFFLOADS;
10380 	}
10381 
10382 	buf_ptr += sizeof(*tlv);
10383 
10384 	for (; i < max_offloads; i++) {
10385 		ns = buf_ptr;
10386 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
10387 							sizeof(*ns));
10388 
10389 		if (enable) {
10390 			if (i < ns_cnt)
10391 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
10392 
10393 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
10394 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
10395 
10396 			if (offload->ipv6_type[i])
10397 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
10398 
10399 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
10400 
10401 			if (!is_zero_ether_addr(ns->target_mac.addr))
10402 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
10403 
10404 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10405 				   "wmi index %d ns_solicited %pI6 target %pI6",
10406 				   i, ns->solicitation_ipaddr,
10407 				   ns->target_ipaddr[0]);
10408 		}
10409 
10410 		buf_ptr += sizeof(*ns);
10411 	}
10412 
10413 	*ptr = buf_ptr;
10414 }
10415 
10416 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
10417 					struct wmi_arp_ns_offload_arg *offload,
10418 					void **ptr,
10419 					bool enable)
10420 {
10421 	struct wmi_arp_offload_params *arp;
10422 	struct wmi_tlv *tlv;
10423 	void *buf_ptr = *ptr;
10424 	int i;
10425 
10426 	/* fill arp tuple */
10427 	tlv = buf_ptr;
10428 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10429 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
10430 	buf_ptr += sizeof(*tlv);
10431 
10432 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
10433 		arp = buf_ptr;
10434 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
10435 							 sizeof(*arp));
10436 
10437 		if (enable && i < offload->ipv4_count) {
10438 			/* Copy the target ip addr and flags */
10439 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
10440 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
10441 
10442 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
10443 				   arp->target_ipaddr);
10444 		}
10445 
10446 		buf_ptr += sizeof(*arp);
10447 	}
10448 
10449 	*ptr = buf_ptr;
10450 }
10451 
10452 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
10453 			      struct ath12k_link_vif *arvif,
10454 			      struct wmi_arp_ns_offload_arg *offload,
10455 			      bool enable)
10456 {
10457 	struct wmi_set_arp_ns_offload_cmd *cmd;
10458 	struct wmi_tlv *tlv;
10459 	struct sk_buff *skb;
10460 	void *buf_ptr;
10461 	size_t len;
10462 	u8 ns_cnt, ns_ext_tuples = 0;
10463 
10464 	ns_cnt = offload->ipv6_count;
10465 
10466 	len = sizeof(*cmd) +
10467 	      sizeof(*tlv) +
10468 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
10469 	      sizeof(*tlv) +
10470 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
10471 
10472 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
10473 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
10474 		len += sizeof(*tlv) +
10475 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
10476 	}
10477 
10478 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10479 	if (!skb)
10480 		return -ENOMEM;
10481 
10482 	buf_ptr = skb->data;
10483 	cmd = buf_ptr;
10484 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
10485 						 sizeof(*cmd));
10486 	cmd->flags = cpu_to_le32(0);
10487 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10488 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
10489 
10490 	buf_ptr += sizeof(*cmd);
10491 
10492 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
10493 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
10494 
10495 	if (ns_ext_tuples)
10496 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
10497 
10498 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
10499 }
10500 
10501 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
10502 				 struct ath12k_link_vif *arvif, bool enable)
10503 {
10504 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
10505 	struct wmi_gtk_rekey_offload_cmd *cmd;
10506 	struct sk_buff *skb;
10507 	__le64 replay_ctr;
10508 	int len;
10509 
10510 	len = sizeof(*cmd);
10511 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10512 	if (!skb)
10513 		return -ENOMEM;
10514 
10515 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
10516 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
10517 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10518 
10519 	if (enable) {
10520 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
10521 
10522 		/* the length in rekey_data and cmd is equal */
10523 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
10524 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
10525 
10526 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
10527 		memcpy(cmd->replay_ctr, &replay_ctr,
10528 		       sizeof(replay_ctr));
10529 	} else {
10530 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
10531 	}
10532 
10533 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
10534 		   arvif->vdev_id, enable);
10535 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
10536 }
10537 
10538 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
10539 				 struct ath12k_link_vif *arvif)
10540 {
10541 	struct wmi_gtk_rekey_offload_cmd *cmd;
10542 	struct sk_buff *skb;
10543 	int len;
10544 
10545 	len = sizeof(*cmd);
10546 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10547 	if (!skb)
10548 		return -ENOMEM;
10549 
10550 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
10551 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
10552 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10553 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
10554 
10555 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
10556 		   arvif->vdev_id);
10557 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
10558 }
10559 
10560 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
10561 			     const struct wmi_sta_keepalive_arg *arg)
10562 {
10563 	struct wmi_sta_keepalive_arp_resp_params *arp;
10564 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10565 	struct wmi_sta_keepalive_cmd *cmd;
10566 	struct sk_buff *skb;
10567 	size_t len;
10568 
10569 	len = sizeof(*cmd) + sizeof(*arp);
10570 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10571 	if (!skb)
10572 		return -ENOMEM;
10573 
10574 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
10575 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
10576 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
10577 	cmd->enabled = cpu_to_le32(arg->enabled);
10578 	cmd->interval = cpu_to_le32(arg->interval);
10579 	cmd->method = cpu_to_le32(arg->method);
10580 
10581 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
10582 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
10583 						 sizeof(*arp));
10584 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
10585 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
10586 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
10587 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
10588 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
10589 	}
10590 
10591 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10592 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
10593 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
10594 
10595 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
10596 }
10597 
10598 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
10599 {
10600 	struct wmi_mlo_setup_cmd *cmd;
10601 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10602 	u32 *partner_links, num_links;
10603 	int i, ret, buf_len, arg_len;
10604 	struct sk_buff *skb;
10605 	struct wmi_tlv *tlv;
10606 	void *ptr;
10607 
10608 	num_links = mlo_params->num_partner_links;
10609 	arg_len = num_links * sizeof(u32);
10610 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
10611 
10612 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
10613 	if (!skb)
10614 		return -ENOMEM;
10615 
10616 	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
10617 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
10618 						 sizeof(*cmd));
10619 	cmd->mld_group_id = mlo_params->group_id;
10620 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10621 	ptr = skb->data + sizeof(*cmd);
10622 
10623 	tlv = ptr;
10624 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
10625 	ptr += TLV_HDR_SIZE;
10626 
10627 	partner_links = ptr;
10628 	for (i = 0; i < num_links; i++)
10629 		partner_links[i] = mlo_params->partner_link_id[i];
10630 
10631 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
10632 	if (ret) {
10633 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
10634 			    ret);
10635 		dev_kfree_skb(skb);
10636 		return ret;
10637 	}
10638 
10639 	return 0;
10640 }
10641 
10642 int ath12k_wmi_mlo_ready(struct ath12k *ar)
10643 {
10644 	struct wmi_mlo_ready_cmd *cmd;
10645 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10646 	struct sk_buff *skb;
10647 	int ret, len;
10648 
10649 	len = sizeof(*cmd);
10650 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10651 	if (!skb)
10652 		return -ENOMEM;
10653 
10654 	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
10655 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
10656 						 sizeof(*cmd));
10657 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10658 
10659 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
10660 	if (ret) {
10661 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
10662 			    ret);
10663 		dev_kfree_skb(skb);
10664 		return ret;
10665 	}
10666 
10667 	return 0;
10668 }
10669 
10670 int ath12k_wmi_mlo_teardown(struct ath12k *ar)
10671 {
10672 	struct wmi_mlo_teardown_cmd *cmd;
10673 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10674 	struct sk_buff *skb;
10675 	int ret, len;
10676 
10677 	len = sizeof(*cmd);
10678 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10679 	if (!skb)
10680 		return -ENOMEM;
10681 
10682 	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
10683 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
10684 						 sizeof(*cmd));
10685 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
10686 	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
10687 
10688 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
10689 	if (ret) {
10690 		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
10691 			    ret);
10692 		dev_kfree_skb(skb);
10693 		return ret;
10694 	}
10695 
10696 	return 0;
10697 }
10698 
10699 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
10700 {
10701 	return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
10702 			ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
10703 }
10704 
10705 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
10706 				       u32 vdev_id,
10707 				       struct ath12k_reg_tpc_power_info *param)
10708 {
10709 	struct wmi_vdev_set_tpc_power_cmd *cmd;
10710 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10711 	struct wmi_vdev_ch_power_params *ch;
10712 	int i, ret, len, array_len;
10713 	struct sk_buff *skb;
10714 	struct wmi_tlv *tlv;
10715 	u8 *ptr;
10716 
10717 	array_len = sizeof(*ch) * param->num_pwr_levels;
10718 	len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
10719 
10720 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
10721 	if (!skb)
10722 		return -ENOMEM;
10723 
10724 	ptr = skb->data;
10725 
10726 	cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
10727 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
10728 						 sizeof(*cmd));
10729 	cmd->vdev_id = cpu_to_le32(vdev_id);
10730 	cmd->psd_power = cpu_to_le32(param->is_psd_power);
10731 	cmd->eirp_power = cpu_to_le32(param->eirp_power);
10732 	cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
10733 
10734 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10735 		   "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
10736 		   vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
10737 
10738 	ptr += sizeof(*cmd);
10739 	tlv = (struct wmi_tlv *)ptr;
10740 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
10741 
10742 	ptr += TLV_HDR_SIZE;
10743 	ch = (struct wmi_vdev_ch_power_params *)ptr;
10744 
10745 	for (i = 0; i < param->num_pwr_levels; i++, ch++) {
10746 		ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
10747 							sizeof(*ch));
10748 		ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
10749 		ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
10750 
10751 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
10752 			   ch->chan_cfreq, ch->tx_power);
10753 	}
10754 
10755 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
10756 	if (ret) {
10757 		ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
10758 		dev_kfree_skb(skb);
10759 		return ret;
10760 	}
10761 
10762 	return 0;
10763 }
10764 
10765 static int
10766 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab,
10767 				struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap,
10768 				struct wmi_mlo_link_set_active_arg *arg)
10769 {
10770 	struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg;
10771 	u8 i;
10772 
10773 	if (arg->num_disallow_mode_comb >
10774 	    ARRAY_SIZE(arg->disallow_bmap)) {
10775 		ath12k_warn(ab, "invalid num_disallow_mode_comb: %d",
10776 			    arg->num_disallow_mode_comb);
10777 		return -EINVAL;
10778 	}
10779 
10780 	dislw_bmap_arg = &arg->disallow_bmap[0];
10781 	for (i = 0; i < arg->num_disallow_mode_comb; i++) {
10782 		dislw_bmap->tlv_header =
10783 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap));
10784 		dislw_bmap->disallowed_mode_bitmap =
10785 				cpu_to_le32(dislw_bmap_arg->disallowed_mode);
10786 		dislw_bmap->ieee_link_id_comb =
10787 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[0],
10788 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) |
10789 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[1],
10790 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) |
10791 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[2],
10792 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) |
10793 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[3],
10794 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4);
10795 
10796 		ath12k_dbg(ab, ATH12K_DBG_WMI,
10797 			   "entry %d disallowed_mode %d ieee_link_id_comb 0x%x",
10798 			   i, dislw_bmap_arg->disallowed_mode,
10799 			   dislw_bmap_arg->ieee_link_id_comb);
10800 		dislw_bmap++;
10801 		dislw_bmap_arg++;
10802 	}
10803 
10804 	return 0;
10805 }
10806 
10807 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
10808 					    struct wmi_mlo_link_set_active_arg *arg)
10809 {
10810 	struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap;
10811 	struct wmi_mlo_set_active_link_number_params *link_num_param;
10812 	u32 num_link_num_param = 0, num_vdev_bitmap = 0;
10813 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
10814 	struct wmi_mlo_link_set_active_cmd *cmd;
10815 	u32 num_inactive_vdev_bitmap = 0;
10816 	u32 num_disallow_mode_comb = 0;
10817 	struct wmi_tlv *tlv;
10818 	struct sk_buff *skb;
10819 	__le32 *vdev_bitmap;
10820 	void *buf_ptr;
10821 	int i, ret;
10822 	u32 len;
10823 
10824 	if (!arg->num_vdev_bitmap && !arg->num_link_entry) {
10825 		ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry");
10826 		return -EINVAL;
10827 	}
10828 
10829 	switch (arg->force_mode) {
10830 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM:
10831 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM:
10832 		num_link_num_param = arg->num_link_entry;
10833 		fallthrough;
10834 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE:
10835 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE:
10836 	case WMI_MLO_LINK_FORCE_MODE_NO_FORCE:
10837 		num_vdev_bitmap = arg->num_vdev_bitmap;
10838 		break;
10839 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE:
10840 		num_vdev_bitmap = arg->num_vdev_bitmap;
10841 		num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap;
10842 		break;
10843 	default:
10844 		ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode);
10845 		return -EINVAL;
10846 	}
10847 
10848 	num_disallow_mode_comb = arg->num_disallow_mode_comb;
10849 	len = sizeof(*cmd) +
10850 	      TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param +
10851 	      TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap +
10852 	      TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE +
10853 	      TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb;
10854 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE)
10855 		len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
10856 
10857 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
10858 	if (!skb)
10859 		return -ENOMEM;
10860 
10861 	cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data;
10862 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD,
10863 						 sizeof(*cmd));
10864 	cmd->force_mode = cpu_to_le32(arg->force_mode);
10865 	cmd->reason = cpu_to_le32(arg->reason);
10866 	ath12k_dbg(ab, ATH12K_DBG_WMI,
10867 		   "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d",
10868 		   arg->force_mode, arg->reason, num_link_num_param,
10869 		   num_vdev_bitmap, num_inactive_vdev_bitmap,
10870 		   num_disallow_mode_comb);
10871 
10872 	buf_ptr = skb->data + sizeof(*cmd);
10873 	tlv = buf_ptr;
10874 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10875 					 sizeof(*link_num_param) * num_link_num_param);
10876 	buf_ptr += TLV_HDR_SIZE;
10877 
10878 	if (num_link_num_param) {
10879 		cmd->ctrl_flags =
10880 			le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0,
10881 					 CRTL_F_DYNC_FORCE_LINK_NUM);
10882 
10883 		link_num_param = buf_ptr;
10884 		for (i = 0; i < num_link_num_param; i++) {
10885 			link_num_param->tlv_header =
10886 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param));
10887 			link_num_param->num_of_link =
10888 				cpu_to_le32(arg->link_num[i].num_of_link);
10889 			link_num_param->vdev_type =
10890 				cpu_to_le32(arg->link_num[i].vdev_type);
10891 			link_num_param->vdev_subtype =
10892 				cpu_to_le32(arg->link_num[i].vdev_subtype);
10893 			link_num_param->home_freq =
10894 				cpu_to_le32(arg->link_num[i].home_freq);
10895 			ath12k_dbg(ab, ATH12K_DBG_WMI,
10896 				   "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d",
10897 				   i, arg->link_num[i].num_of_link,
10898 				   arg->link_num[i].vdev_type,
10899 				   arg->link_num[i].vdev_subtype,
10900 				   arg->link_num[i].home_freq,
10901 				   __le32_to_cpu(cmd->ctrl_flags));
10902 			link_num_param++;
10903 		}
10904 
10905 		buf_ptr += sizeof(*link_num_param) * num_link_num_param;
10906 	}
10907 
10908 	tlv = buf_ptr;
10909 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
10910 					 sizeof(*vdev_bitmap) * num_vdev_bitmap);
10911 	buf_ptr += TLV_HDR_SIZE;
10912 
10913 	if (num_vdev_bitmap) {
10914 		vdev_bitmap = buf_ptr;
10915 		for (i = 0; i < num_vdev_bitmap; i++) {
10916 			vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]);
10917 			ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x",
10918 				   i, arg->vdev_bitmap[i]);
10919 		}
10920 
10921 		buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap;
10922 	}
10923 
10924 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
10925 		tlv = buf_ptr;
10926 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
10927 						 sizeof(*vdev_bitmap) *
10928 						 num_inactive_vdev_bitmap);
10929 		buf_ptr += TLV_HDR_SIZE;
10930 
10931 		if (num_inactive_vdev_bitmap) {
10932 			vdev_bitmap = buf_ptr;
10933 			for (i = 0; i < num_inactive_vdev_bitmap; i++) {
10934 				vdev_bitmap[i] =
10935 					cpu_to_le32(arg->inactive_vdev_bitmap[i]);
10936 				ath12k_dbg(ab, ATH12K_DBG_WMI,
10937 					   "entry %d inactive_vdev_id_bitmap 0x%x",
10938 					    i, arg->inactive_vdev_bitmap[i]);
10939 			}
10940 
10941 			buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
10942 		}
10943 	} else {
10944 		/* add empty vdev bitmap2 tlv */
10945 		tlv = buf_ptr;
10946 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10947 		buf_ptr += TLV_HDR_SIZE;
10948 	}
10949 
10950 	/* add empty ieee_link_id_bitmap tlv */
10951 	tlv = buf_ptr;
10952 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10953 	buf_ptr += TLV_HDR_SIZE;
10954 
10955 	/* add empty ieee_link_id_bitmap2 tlv */
10956 	tlv = buf_ptr;
10957 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10958 	buf_ptr += TLV_HDR_SIZE;
10959 
10960 	tlv = buf_ptr;
10961 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10962 					 sizeof(*disallowed_mode_bmap) *
10963 					 arg->num_disallow_mode_comb);
10964 	buf_ptr += TLV_HDR_SIZE;
10965 
10966 	ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg);
10967 	if (ret)
10968 		goto free_skb;
10969 
10970 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID);
10971 	if (ret) {
10972 		ath12k_warn(ab,
10973 			    "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret);
10974 		goto free_skb;
10975 	}
10976 
10977 	ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd");
10978 
10979 	return ret;
10980 
10981 free_skb:
10982 	dev_kfree_skb(skb);
10983 	return ret;
10984 }
10985