xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision ee975351cf0c2a11cdf97eae58265c126cb32850)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debug.h"
19 #include "mac.h"
20 #include "hw.h"
21 #include "peer.h"
22 
23 struct ath12k_wmi_svc_ready_parse {
24 	bool wmi_svc_bitmap_done;
25 };
26 
27 struct ath12k_wmi_dma_ring_caps_parse {
28 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
29 	u32 n_dma_ring_caps;
30 };
31 
32 struct ath12k_wmi_service_ext_arg {
33 	u32 default_conc_scan_config_bits;
34 	u32 default_fw_config_bits;
35 	struct ath12k_wmi_ppe_threshold_arg ppet;
36 	u32 he_cap_info;
37 	u32 mpdu_density;
38 	u32 max_bssid_rx_filters;
39 	u32 num_hw_modes;
40 	u32 num_phy;
41 };
42 
43 struct ath12k_wmi_svc_rdy_ext_parse {
44 	struct ath12k_wmi_service_ext_arg arg;
45 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
46 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
47 	u32 n_hw_mode_caps;
48 	u32 tot_phy_id;
49 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
50 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
51 	u32 n_mac_phy_caps;
52 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
53 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
54 	u32 n_ext_hal_reg_caps;
55 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
56 	bool hw_mode_done;
57 	bool mac_phy_done;
58 	bool ext_hal_reg_done;
59 	bool mac_phy_chainmask_combo_done;
60 	bool mac_phy_chainmask_cap_done;
61 	bool oem_dma_ring_cap_done;
62 	bool dma_ring_cap_done;
63 };
64 
65 struct ath12k_wmi_svc_rdy_ext2_arg {
66 	u32 reg_db_version;
67 	u32 hw_min_max_tx_power_2ghz;
68 	u32 hw_min_max_tx_power_5ghz;
69 	u32 chwidth_num_peer_caps;
70 	u32 preamble_puncture_bw;
71 	u32 max_user_per_ppdu_ofdma;
72 	u32 max_user_per_ppdu_mumimo;
73 	u32 target_cap_flags;
74 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
75 	u32 max_num_linkview_peers;
76 	u32 max_num_msduq_supported_per_tid;
77 	u32 default_num_msduq_supported_per_tid;
78 };
79 
80 struct ath12k_wmi_svc_rdy_ext2_parse {
81 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
82 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
83 	bool dma_ring_cap_done;
84 	bool spectral_bin_scaling_done;
85 	bool mac_phy_caps_ext_done;
86 };
87 
88 struct ath12k_wmi_rdy_parse {
89 	u32 num_extra_mac_addr;
90 };
91 
92 struct ath12k_wmi_dma_buf_release_arg {
93 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
94 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
95 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
96 	u32 num_buf_entry;
97 	u32 num_meta;
98 	bool buf_entry_done;
99 	bool meta_data_done;
100 };
101 
102 struct ath12k_wmi_tlv_policy {
103 	size_t min_len;
104 };
105 
106 struct wmi_tlv_mgmt_rx_parse {
107 	const struct ath12k_wmi_mgmt_rx_params *fixed;
108 	const u8 *frame_buf;
109 	bool frame_buf_done;
110 };
111 
112 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
113 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
114 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
115 	[WMI_TAG_SERVICE_READY_EVENT] = {
116 		.min_len = sizeof(struct wmi_service_ready_event) },
117 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
118 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
119 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
120 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
121 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
122 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
123 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
124 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
125 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
126 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
127 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
128 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
129 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
130 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
131 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
132 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
133 	[WMI_TAG_MGMT_RX_HDR] = {
134 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
135 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
136 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
137 	[WMI_TAG_SCAN_EVENT] = {
138 		.min_len = sizeof(struct wmi_scan_event) },
139 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
140 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
141 	[WMI_TAG_ROAM_EVENT] = {
142 		.min_len = sizeof(struct wmi_roam_event) },
143 	[WMI_TAG_CHAN_INFO_EVENT] = {
144 		.min_len = sizeof(struct wmi_chan_info_event) },
145 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
146 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
147 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
148 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
149 	[WMI_TAG_READY_EVENT] = {
150 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
151 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
152 		.min_len = sizeof(struct wmi_service_available_event) },
153 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
154 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
155 	[WMI_TAG_RFKILL_EVENT] = {
156 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
157 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
158 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
159 	[WMI_TAG_HOST_SWFDA_EVENT] = {
160 		.min_len = sizeof(struct wmi_fils_discovery_event) },
161 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
162 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
163 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
164 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
165 };
166 
167 static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
168 {
169 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
170 		le32_encode_bits(len, WMI_TLV_LEN);
171 }
172 
173 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
174 {
175 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
176 }
177 
178 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
179 			     struct ath12k_wmi_resource_config_arg *config)
180 {
181 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
182 
183 	if (ab->num_radios == 2) {
184 		config->num_peers = TARGET_NUM_PEERS(DBS);
185 		config->num_tids = TARGET_NUM_TIDS(DBS);
186 	} else if (ab->num_radios == 3) {
187 		config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
188 		config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
189 	} else {
190 		/* Control should not reach here */
191 		config->num_peers = TARGET_NUM_PEERS(SINGLE);
192 		config->num_tids = TARGET_NUM_TIDS(SINGLE);
193 	}
194 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
195 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
196 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
197 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
198 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
199 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
200 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
201 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
202 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
203 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
204 
205 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
206 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
207 	else
208 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
209 
210 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
211 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
212 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
213 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
214 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
215 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
216 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
217 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
218 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
219 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
220 	config->rx_skip_defrag_timeout_dup_detection_check =
221 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
222 	config->vow_config = TARGET_VOW_CONFIG;
223 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
224 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
225 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
226 	config->rx_batchmode = TARGET_RX_BATCHMODE;
227 	/* Indicates host supports peer map v3 and unmap v2 support */
228 	config->peer_map_unmap_version = 0x32;
229 	config->twt_ap_pdev_count = ab->num_radios;
230 	config->twt_ap_sta_count = 1000;
231 }
232 
233 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
234 			     struct ath12k_wmi_resource_config_arg *config)
235 {
236 	config->num_vdevs = 4;
237 	config->num_peers = 16;
238 	config->num_tids = 32;
239 
240 	config->num_offload_peers = 3;
241 	config->num_offload_reorder_buffs = 3;
242 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
243 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
244 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
245 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
246 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
247 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
248 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
249 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
250 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
251 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
252 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
253 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
254 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
255 	config->num_mcast_groups = 0;
256 	config->num_mcast_table_elems = 0;
257 	config->mcast2ucast_mode = 0;
258 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
259 	config->num_wds_entries = 0;
260 	config->dma_burst_size = 0;
261 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
262 	config->vow_config = TARGET_VOW_CONFIG;
263 	config->gtk_offload_max_vdev = 2;
264 	config->num_msdu_desc = 0x400;
265 	config->beacon_tx_offload_max_vdev = 2;
266 	config->rx_batchmode = TARGET_RX_BATCHMODE;
267 
268 	config->peer_map_unmap_version = 0x1;
269 	config->use_pdev_id = 1;
270 	config->max_frag_entries = 0xa;
271 	config->num_tdls_vdevs = 0x1;
272 	config->num_tdls_conn_table_entries = 8;
273 	config->beacon_tx_offload_max_vdev = 0x2;
274 	config->num_multicast_filter_entries = 0x20;
275 	config->num_wow_filters = 0x16;
276 	config->num_keep_alive_pattern = 0;
277 }
278 
279 #define PRIMAP(_hw_mode_) \
280 	[_hw_mode_] = _hw_mode_##_PRI
281 
282 static const int ath12k_hw_mode_pri_map[] = {
283 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
284 	PRIMAP(WMI_HOST_HW_MODE_DBS),
285 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
286 	PRIMAP(WMI_HOST_HW_MODE_SBS),
287 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
288 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
289 	/* keep last */
290 	PRIMAP(WMI_HOST_HW_MODE_MAX),
291 };
292 
293 static int
294 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
295 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
296 				const void *ptr, void *data),
297 		    void *data)
298 {
299 	const void *begin = ptr;
300 	const struct wmi_tlv *tlv;
301 	u16 tlv_tag, tlv_len;
302 	int ret;
303 
304 	while (len > 0) {
305 		if (len < sizeof(*tlv)) {
306 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
307 				   ptr - begin, len, sizeof(*tlv));
308 			return -EINVAL;
309 		}
310 
311 		tlv = ptr;
312 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
313 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
314 		ptr += sizeof(*tlv);
315 		len -= sizeof(*tlv);
316 
317 		if (tlv_len > len) {
318 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
319 				   tlv_tag, ptr - begin, len, tlv_len);
320 			return -EINVAL;
321 		}
322 
323 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
324 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
325 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
326 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
327 				   tlv_tag, ptr - begin, tlv_len,
328 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
329 			return -EINVAL;
330 		}
331 
332 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
333 		if (ret)
334 			return ret;
335 
336 		ptr += tlv_len;
337 		len -= tlv_len;
338 	}
339 
340 	return 0;
341 }
342 
343 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
344 				     const void *ptr, void *data)
345 {
346 	const void **tb = data;
347 
348 	if (tag < WMI_TAG_MAX)
349 		tb[tag] = ptr;
350 
351 	return 0;
352 }
353 
354 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
355 				const void *ptr, size_t len)
356 {
357 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
358 				   (void *)tb);
359 }
360 
361 static const void **
362 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
363 			   struct sk_buff *skb, gfp_t gfp)
364 {
365 	const void **tb;
366 	int ret;
367 
368 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
369 	if (!tb)
370 		return ERR_PTR(-ENOMEM);
371 
372 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
373 	if (ret) {
374 		kfree(tb);
375 		return ERR_PTR(ret);
376 	}
377 
378 	return tb;
379 }
380 
381 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
382 				      u32 cmd_id)
383 {
384 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
385 	struct ath12k_base *ab = wmi->wmi_ab->ab;
386 	struct wmi_cmd_hdr *cmd_hdr;
387 	int ret;
388 
389 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
390 		return -ENOMEM;
391 
392 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
393 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
394 
395 	memset(skb_cb, 0, sizeof(*skb_cb));
396 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
397 
398 	if (ret)
399 		goto err_pull;
400 
401 	return 0;
402 
403 err_pull:
404 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
405 	return ret;
406 }
407 
408 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
409 			u32 cmd_id)
410 {
411 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
412 	int ret = -EOPNOTSUPP;
413 
414 	might_sleep();
415 
416 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
417 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
418 
419 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
420 			ret = -ESHUTDOWN;
421 
422 		(ret != -EAGAIN);
423 	}), WMI_SEND_TIMEOUT_HZ);
424 
425 	if (ret == -EAGAIN)
426 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
427 
428 	return ret;
429 }
430 
431 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
432 				     const void *ptr,
433 				     struct ath12k_wmi_service_ext_arg *arg)
434 {
435 	const struct wmi_service_ready_ext_event *ev = ptr;
436 	int i;
437 
438 	if (!ev)
439 		return -EINVAL;
440 
441 	/* Move this to host based bitmap */
442 	arg->default_conc_scan_config_bits =
443 		le32_to_cpu(ev->default_conc_scan_config_bits);
444 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
445 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
446 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
447 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
448 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
449 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
450 
451 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
452 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
453 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
454 
455 	return 0;
456 }
457 
458 static int
459 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
460 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
461 				      u8 hw_mode_id, u8 phy_id,
462 				      struct ath12k_pdev *pdev)
463 {
464 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
465 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
466 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
467 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
468 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
469 	struct ath12k_band_cap *cap_band;
470 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
471 	struct ath12k_fw_pdev *fw_pdev;
472 	u32 phy_map;
473 	u32 hw_idx, phy_idx = 0;
474 	int i;
475 
476 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
477 		return -EINVAL;
478 
479 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
480 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
481 			break;
482 
483 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
484 		phy_idx = fls(phy_map);
485 	}
486 
487 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
488 		return -EINVAL;
489 
490 	phy_idx += phy_id;
491 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
492 		return -EINVAL;
493 
494 	mac_caps = wmi_mac_phy_caps + phy_idx;
495 
496 	pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
497 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
498 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
499 
500 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
501 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
502 	fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
503 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
504 	ab->fw_pdev_count++;
505 
506 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
507 	 * band to band for a single radio, need to see how this should be
508 	 * handled.
509 	 */
510 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
511 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
512 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
513 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
514 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
515 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
516 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
517 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
518 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
519 	} else {
520 		return -EINVAL;
521 	}
522 
523 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
524 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
525 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
526 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
527 	 * will be advertised for second mac or vice-versa. Compute the shift value
528 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
529 	 * mac80211.
530 	 */
531 	pdev_cap->tx_chain_mask_shift =
532 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
533 	pdev_cap->rx_chain_mask_shift =
534 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
535 
536 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
537 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
538 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
539 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
540 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
541 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
542 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
543 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
544 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
545 			cap_band->he_cap_phy_info[i] =
546 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
547 
548 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
549 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
550 
551 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
552 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
553 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
554 	}
555 
556 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
557 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
558 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
559 		cap_band->max_bw_supported =
560 			le32_to_cpu(mac_caps->max_bw_supported_5g);
561 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
562 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
563 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
564 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
565 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
566 			cap_band->he_cap_phy_info[i] =
567 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
568 
569 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
570 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
571 
572 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
573 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
574 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
575 
576 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
577 		cap_band->max_bw_supported =
578 			le32_to_cpu(mac_caps->max_bw_supported_5g);
579 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
580 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
581 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
582 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
583 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
584 			cap_band->he_cap_phy_info[i] =
585 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
586 
587 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
588 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
589 
590 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
591 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
592 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
593 	}
594 
595 	return 0;
596 }
597 
598 static int
599 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
600 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
601 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
602 				u8 phy_idx,
603 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
604 {
605 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
606 
607 	if (!reg_caps || !ext_caps)
608 		return -EINVAL;
609 
610 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
611 		return -EINVAL;
612 
613 	ext_reg_cap = &ext_caps[phy_idx];
614 
615 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
616 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
617 	param->eeprom_reg_domain_ext =
618 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
619 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
620 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
621 	/* check if param->wireless_mode is needed */
622 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
623 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
624 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
625 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
626 
627 	return 0;
628 }
629 
630 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
631 					 const void *evt_buf,
632 					 struct ath12k_wmi_target_cap_arg *cap)
633 {
634 	const struct wmi_service_ready_event *ev = evt_buf;
635 
636 	if (!ev) {
637 		ath12k_err(ab, "%s: failed by NULL param\n",
638 			   __func__);
639 		return -EINVAL;
640 	}
641 
642 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
643 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
644 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
645 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
646 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
647 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
648 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
649 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
650 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
651 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
652 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
653 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
654 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
655 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
656 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
657 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
658 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
659 
660 	return 0;
661 }
662 
663 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
664  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
665  * 4-byte word.
666  */
667 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
668 					   const u32 *wmi_svc_bm)
669 {
670 	int i, j;
671 
672 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
673 		do {
674 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
675 				set_bit(j, wmi->wmi_ab->svc_map);
676 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
677 	}
678 }
679 
680 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
681 				    const void *ptr, void *data)
682 {
683 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
684 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
685 	u16 expect_len;
686 
687 	switch (tag) {
688 	case WMI_TAG_SERVICE_READY_EVENT:
689 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
690 			return -EINVAL;
691 		break;
692 
693 	case WMI_TAG_ARRAY_UINT32:
694 		if (!svc_ready->wmi_svc_bitmap_done) {
695 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
696 			if (len < expect_len) {
697 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
698 					    len, tag);
699 				return -EINVAL;
700 			}
701 
702 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
703 
704 			svc_ready->wmi_svc_bitmap_done = true;
705 		}
706 		break;
707 	default:
708 		break;
709 	}
710 
711 	return 0;
712 }
713 
714 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
715 {
716 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
717 	int ret;
718 
719 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
720 				  ath12k_wmi_svc_rdy_parse,
721 				  &svc_ready);
722 	if (ret) {
723 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
724 		return ret;
725 	}
726 
727 	return 0;
728 }
729 
730 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
731 {
732 	struct sk_buff *skb;
733 	struct ath12k_base *ab = wmi_ab->ab;
734 	u32 round_len = roundup(len, 4);
735 
736 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
737 	if (!skb)
738 		return NULL;
739 
740 	skb_reserve(skb, WMI_SKB_HEADROOM);
741 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
742 		ath12k_warn(ab, "unaligned WMI skb data\n");
743 
744 	skb_put(skb, round_len);
745 	memset(skb->data, 0, round_len);
746 
747 	return skb;
748 }
749 
750 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
751 			 struct sk_buff *frame)
752 {
753 	struct ath12k_wmi_pdev *wmi = ar->wmi;
754 	struct wmi_mgmt_send_cmd *cmd;
755 	struct wmi_tlv *frame_tlv;
756 	struct sk_buff *skb;
757 	u32 buf_len;
758 	int ret, len;
759 
760 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
761 
762 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
763 
764 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
765 	if (!skb)
766 		return -ENOMEM;
767 
768 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
769 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
770 						 sizeof(*cmd));
771 	cmd->vdev_id = cpu_to_le32(vdev_id);
772 	cmd->desc_id = cpu_to_le32(buf_id);
773 	cmd->chanfreq = 0;
774 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
775 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
776 	cmd->frame_len = cpu_to_le32(frame->len);
777 	cmd->buf_len = cpu_to_le32(buf_len);
778 	cmd->tx_params_valid = 0;
779 
780 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
781 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
782 
783 	memcpy(frame_tlv->value, frame->data, buf_len);
784 
785 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
786 	if (ret) {
787 		ath12k_warn(ar->ab,
788 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
789 		dev_kfree_skb(skb);
790 	}
791 
792 	return ret;
793 }
794 
795 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
796 			   struct ath12k_wmi_vdev_create_arg *args)
797 {
798 	struct ath12k_wmi_pdev *wmi = ar->wmi;
799 	struct wmi_vdev_create_cmd *cmd;
800 	struct sk_buff *skb;
801 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
802 	struct wmi_tlv *tlv;
803 	int ret, len;
804 	void *ptr;
805 
806 	/* It can be optimized my sending tx/rx chain configuration
807 	 * only for supported bands instead of always sending it for
808 	 * both the bands.
809 	 */
810 	len = sizeof(*cmd) + TLV_HDR_SIZE +
811 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
812 
813 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
814 	if (!skb)
815 		return -ENOMEM;
816 
817 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
818 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
819 						 sizeof(*cmd));
820 
821 	cmd->vdev_id = cpu_to_le32(args->if_id);
822 	cmd->vdev_type = cpu_to_le32(args->type);
823 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
824 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
825 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
826 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
827 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
828 
829 	ptr = skb->data + sizeof(*cmd);
830 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
831 
832 	tlv = ptr;
833 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
834 
835 	ptr += TLV_HDR_SIZE;
836 	txrx_streams = ptr;
837 	len = sizeof(*txrx_streams);
838 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
839 							  len);
840 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
841 	txrx_streams->supported_tx_streams =
842 				 args->chains[NL80211_BAND_2GHZ].tx;
843 	txrx_streams->supported_rx_streams =
844 				 args->chains[NL80211_BAND_2GHZ].rx;
845 
846 	txrx_streams++;
847 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
848 							  len);
849 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
850 	txrx_streams->supported_tx_streams =
851 				 args->chains[NL80211_BAND_5GHZ].tx;
852 	txrx_streams->supported_rx_streams =
853 				 args->chains[NL80211_BAND_5GHZ].rx;
854 
855 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
856 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
857 		   args->if_id, args->type, args->subtype,
858 		   macaddr, args->pdev_id);
859 
860 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
861 	if (ret) {
862 		ath12k_warn(ar->ab,
863 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
864 		dev_kfree_skb(skb);
865 	}
866 
867 	return ret;
868 }
869 
870 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
871 {
872 	struct ath12k_wmi_pdev *wmi = ar->wmi;
873 	struct wmi_vdev_delete_cmd *cmd;
874 	struct sk_buff *skb;
875 	int ret;
876 
877 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
878 	if (!skb)
879 		return -ENOMEM;
880 
881 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
882 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
883 						 sizeof(*cmd));
884 	cmd->vdev_id = cpu_to_le32(vdev_id);
885 
886 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
887 
888 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
889 	if (ret) {
890 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
891 		dev_kfree_skb(skb);
892 	}
893 
894 	return ret;
895 }
896 
897 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
898 {
899 	struct ath12k_wmi_pdev *wmi = ar->wmi;
900 	struct wmi_vdev_stop_cmd *cmd;
901 	struct sk_buff *skb;
902 	int ret;
903 
904 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
905 	if (!skb)
906 		return -ENOMEM;
907 
908 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
909 
910 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
911 						 sizeof(*cmd));
912 	cmd->vdev_id = cpu_to_le32(vdev_id);
913 
914 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
915 
916 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
917 	if (ret) {
918 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
919 		dev_kfree_skb(skb);
920 	}
921 
922 	return ret;
923 }
924 
925 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
926 {
927 	struct ath12k_wmi_pdev *wmi = ar->wmi;
928 	struct wmi_vdev_down_cmd *cmd;
929 	struct sk_buff *skb;
930 	int ret;
931 
932 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
933 	if (!skb)
934 		return -ENOMEM;
935 
936 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
937 
938 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
939 						 sizeof(*cmd));
940 	cmd->vdev_id = cpu_to_le32(vdev_id);
941 
942 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
943 
944 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
945 	if (ret) {
946 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
947 		dev_kfree_skb(skb);
948 	}
949 
950 	return ret;
951 }
952 
953 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
954 				       struct wmi_vdev_start_req_arg *arg)
955 {
956 	memset(chan, 0, sizeof(*chan));
957 
958 	chan->mhz = cpu_to_le32(arg->freq);
959 	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
960 	if (arg->mode == MODE_11AC_VHT80_80)
961 		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
962 	else
963 		chan->band_center_freq2 = 0;
964 
965 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
966 	if (arg->passive)
967 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
968 	if (arg->allow_ibss)
969 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
970 	if (arg->allow_ht)
971 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
972 	if (arg->allow_vht)
973 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
974 	if (arg->allow_he)
975 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
976 	if (arg->ht40plus)
977 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
978 	if (arg->chan_radar)
979 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
980 	if (arg->freq2_radar)
981 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
982 
983 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
984 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
985 		le32_encode_bits(arg->max_reg_power,
986 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
987 
988 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
989 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
990 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
991 }
992 
993 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
994 			  bool restart)
995 {
996 	struct ath12k_wmi_pdev *wmi = ar->wmi;
997 	struct wmi_vdev_start_request_cmd *cmd;
998 	struct sk_buff *skb;
999 	struct ath12k_wmi_channel_params *chan;
1000 	struct wmi_tlv *tlv;
1001 	void *ptr;
1002 	int ret, len;
1003 
1004 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1005 		return -EINVAL;
1006 
1007 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1008 
1009 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1010 	if (!skb)
1011 		return -ENOMEM;
1012 
1013 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1014 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1015 						 sizeof(*cmd));
1016 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1017 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1018 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1019 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1020 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1021 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1022 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1023 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1024 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1025 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1026 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1027 
1028 	if (!restart) {
1029 		if (arg->ssid) {
1030 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1031 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1032 		}
1033 		if (arg->hidden_ssid)
1034 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1035 		if (arg->pmf_enabled)
1036 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1037 	}
1038 
1039 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1040 
1041 	ptr = skb->data + sizeof(*cmd);
1042 	chan = ptr;
1043 
1044 	ath12k_wmi_put_wmi_channel(chan, arg);
1045 
1046 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1047 						  sizeof(*chan));
1048 	ptr += sizeof(*chan);
1049 
1050 	tlv = ptr;
1051 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1052 
1053 	/* Note: This is a nested TLV containing:
1054 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
1055 	 */
1056 
1057 	ptr += sizeof(*tlv);
1058 
1059 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1060 		   restart ? "restart" : "start", arg->vdev_id,
1061 		   arg->freq, arg->mode);
1062 
1063 	if (restart)
1064 		ret = ath12k_wmi_cmd_send(wmi, skb,
1065 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1066 	else
1067 		ret = ath12k_wmi_cmd_send(wmi, skb,
1068 					  WMI_VDEV_START_REQUEST_CMDID);
1069 	if (ret) {
1070 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1071 			    restart ? "restart" : "start");
1072 		dev_kfree_skb(skb);
1073 	}
1074 
1075 	return ret;
1076 }
1077 
1078 int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1079 {
1080 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1081 	struct wmi_vdev_up_cmd *cmd;
1082 	struct sk_buff *skb;
1083 	int ret;
1084 
1085 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1086 	if (!skb)
1087 		return -ENOMEM;
1088 
1089 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1090 
1091 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1092 						 sizeof(*cmd));
1093 	cmd->vdev_id = cpu_to_le32(vdev_id);
1094 	cmd->vdev_assoc_id = cpu_to_le32(aid);
1095 
1096 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1097 
1098 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1099 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1100 		   vdev_id, aid, bssid);
1101 
1102 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1103 	if (ret) {
1104 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1105 		dev_kfree_skb(skb);
1106 	}
1107 
1108 	return ret;
1109 }
1110 
1111 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1112 				    struct ath12k_wmi_peer_create_arg *arg)
1113 {
1114 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1115 	struct wmi_peer_create_cmd *cmd;
1116 	struct sk_buff *skb;
1117 	int ret;
1118 
1119 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1120 	if (!skb)
1121 		return -ENOMEM;
1122 
1123 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1124 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1125 						 sizeof(*cmd));
1126 
1127 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1128 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1129 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1130 
1131 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1132 		   "WMI peer create vdev_id %d peer_addr %pM\n",
1133 		   arg->vdev_id, arg->peer_addr);
1134 
1135 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1136 	if (ret) {
1137 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1138 		dev_kfree_skb(skb);
1139 	}
1140 
1141 	return ret;
1142 }
1143 
1144 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1145 				    const u8 *peer_addr, u8 vdev_id)
1146 {
1147 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1148 	struct wmi_peer_delete_cmd *cmd;
1149 	struct sk_buff *skb;
1150 	int ret;
1151 
1152 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1153 	if (!skb)
1154 		return -ENOMEM;
1155 
1156 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1157 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1158 						 sizeof(*cmd));
1159 
1160 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1161 	cmd->vdev_id = cpu_to_le32(vdev_id);
1162 
1163 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1164 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1165 		   vdev_id,  peer_addr);
1166 
1167 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1168 	if (ret) {
1169 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1170 		dev_kfree_skb(skb);
1171 	}
1172 
1173 	return ret;
1174 }
1175 
1176 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1177 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1178 {
1179 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1180 	struct wmi_pdev_set_regdomain_cmd *cmd;
1181 	struct sk_buff *skb;
1182 	int ret;
1183 
1184 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1185 	if (!skb)
1186 		return -ENOMEM;
1187 
1188 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1189 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1190 						 sizeof(*cmd));
1191 
1192 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1193 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1194 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1195 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1196 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1197 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1198 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1199 
1200 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1201 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1202 		   arg->current_rd_in_use, arg->current_rd_2g,
1203 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1204 
1205 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1206 	if (ret) {
1207 		ath12k_warn(ar->ab,
1208 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1209 		dev_kfree_skb(skb);
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1216 			      u32 vdev_id, u32 param_id, u32 param_val)
1217 {
1218 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1219 	struct wmi_peer_set_param_cmd *cmd;
1220 	struct sk_buff *skb;
1221 	int ret;
1222 
1223 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1224 	if (!skb)
1225 		return -ENOMEM;
1226 
1227 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1228 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1229 						 sizeof(*cmd));
1230 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1231 	cmd->vdev_id = cpu_to_le32(vdev_id);
1232 	cmd->param_id = cpu_to_le32(param_id);
1233 	cmd->param_value = cpu_to_le32(param_val);
1234 
1235 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1236 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1237 		   vdev_id, peer_addr, param_id, param_val);
1238 
1239 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1240 	if (ret) {
1241 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1242 		dev_kfree_skb(skb);
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1249 					u8 peer_addr[ETH_ALEN],
1250 					u32 peer_tid_bitmap,
1251 					u8 vdev_id)
1252 {
1253 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1254 	struct wmi_peer_flush_tids_cmd *cmd;
1255 	struct sk_buff *skb;
1256 	int ret;
1257 
1258 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1259 	if (!skb)
1260 		return -ENOMEM;
1261 
1262 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1263 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1264 						 sizeof(*cmd));
1265 
1266 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1267 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1268 	cmd->vdev_id = cpu_to_le32(vdev_id);
1269 
1270 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1271 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1272 		   vdev_id, peer_addr, peer_tid_bitmap);
1273 
1274 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1275 	if (ret) {
1276 		ath12k_warn(ar->ab,
1277 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1278 		dev_kfree_skb(skb);
1279 	}
1280 
1281 	return ret;
1282 }
1283 
1284 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1285 					   int vdev_id, const u8 *addr,
1286 					   dma_addr_t paddr, u8 tid,
1287 					   u8 ba_window_size_valid,
1288 					   u32 ba_window_size)
1289 {
1290 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1291 	struct sk_buff *skb;
1292 	int ret;
1293 
1294 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1295 	if (!skb)
1296 		return -ENOMEM;
1297 
1298 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1299 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1300 						 sizeof(*cmd));
1301 
1302 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1303 	cmd->vdev_id = cpu_to_le32(vdev_id);
1304 	cmd->tid = cpu_to_le32(tid);
1305 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1306 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1307 	cmd->queue_no = cpu_to_le32(tid);
1308 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1309 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1310 
1311 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1312 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1313 		   addr, vdev_id, tid);
1314 
1315 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1316 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1317 	if (ret) {
1318 		ath12k_warn(ar->ab,
1319 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1320 		dev_kfree_skb(skb);
1321 	}
1322 
1323 	return ret;
1324 }
1325 
1326 int
1327 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1328 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1329 {
1330 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1331 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1332 	struct sk_buff *skb;
1333 	int ret;
1334 
1335 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1336 	if (!skb)
1337 		return -ENOMEM;
1338 
1339 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1340 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1341 						 sizeof(*cmd));
1342 
1343 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1344 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1345 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1346 
1347 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1348 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1349 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1350 
1351 	ret = ath12k_wmi_cmd_send(wmi, skb,
1352 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1353 	if (ret) {
1354 		ath12k_warn(ar->ab,
1355 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1356 		dev_kfree_skb(skb);
1357 	}
1358 
1359 	return ret;
1360 }
1361 
1362 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1363 			      u32 param_value, u8 pdev_id)
1364 {
1365 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1366 	struct wmi_pdev_set_param_cmd *cmd;
1367 	struct sk_buff *skb;
1368 	int ret;
1369 
1370 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1371 	if (!skb)
1372 		return -ENOMEM;
1373 
1374 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1375 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1376 						 sizeof(*cmd));
1377 	cmd->pdev_id = cpu_to_le32(pdev_id);
1378 	cmd->param_id = cpu_to_le32(param_id);
1379 	cmd->param_value = cpu_to_le32(param_value);
1380 
1381 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1382 		   "WMI pdev set param %d pdev id %d value %d\n",
1383 		   param_id, pdev_id, param_value);
1384 
1385 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1386 	if (ret) {
1387 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1388 		dev_kfree_skb(skb);
1389 	}
1390 
1391 	return ret;
1392 }
1393 
1394 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1395 {
1396 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1397 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1398 	struct sk_buff *skb;
1399 	int ret;
1400 
1401 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1402 	if (!skb)
1403 		return -ENOMEM;
1404 
1405 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1406 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1407 						 sizeof(*cmd));
1408 	cmd->vdev_id = cpu_to_le32(vdev_id);
1409 	cmd->sta_ps_mode = cpu_to_le32(enable);
1410 
1411 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1412 		   "WMI vdev set psmode %d vdev id %d\n",
1413 		   enable, vdev_id);
1414 
1415 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1416 	if (ret) {
1417 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1418 		dev_kfree_skb(skb);
1419 	}
1420 
1421 	return ret;
1422 }
1423 
1424 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1425 			    u32 pdev_id)
1426 {
1427 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1428 	struct wmi_pdev_suspend_cmd *cmd;
1429 	struct sk_buff *skb;
1430 	int ret;
1431 
1432 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1433 	if (!skb)
1434 		return -ENOMEM;
1435 
1436 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1437 
1438 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1439 						 sizeof(*cmd));
1440 
1441 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1442 	cmd->pdev_id = cpu_to_le32(pdev_id);
1443 
1444 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1445 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1446 
1447 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1448 	if (ret) {
1449 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1450 		dev_kfree_skb(skb);
1451 	}
1452 
1453 	return ret;
1454 }
1455 
1456 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1457 {
1458 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1459 	struct wmi_pdev_resume_cmd *cmd;
1460 	struct sk_buff *skb;
1461 	int ret;
1462 
1463 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1464 	if (!skb)
1465 		return -ENOMEM;
1466 
1467 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1468 
1469 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1470 						 sizeof(*cmd));
1471 	cmd->pdev_id = cpu_to_le32(pdev_id);
1472 
1473 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1474 		   "WMI pdev resume pdev id %d\n", pdev_id);
1475 
1476 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1477 	if (ret) {
1478 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1479 		dev_kfree_skb(skb);
1480 	}
1481 
1482 	return ret;
1483 }
1484 
1485 /* TODO FW Support for the cmd is not available yet.
1486  * Can be tested once the command and corresponding
1487  * event is implemented in FW
1488  */
1489 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1490 					  enum wmi_bss_chan_info_req_type type)
1491 {
1492 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1493 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1494 	struct sk_buff *skb;
1495 	int ret;
1496 
1497 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1498 	if (!skb)
1499 		return -ENOMEM;
1500 
1501 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1502 
1503 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1504 						 sizeof(*cmd));
1505 	cmd->req_type = cpu_to_le32(type);
1506 
1507 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1508 		   "WMI bss chan info req type %d\n", type);
1509 
1510 	ret = ath12k_wmi_cmd_send(wmi, skb,
1511 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1512 	if (ret) {
1513 		ath12k_warn(ar->ab,
1514 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1515 		dev_kfree_skb(skb);
1516 	}
1517 
1518 	return ret;
1519 }
1520 
1521 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1522 					struct ath12k_wmi_ap_ps_arg *arg)
1523 {
1524 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1525 	struct wmi_ap_ps_peer_cmd *cmd;
1526 	struct sk_buff *skb;
1527 	int ret;
1528 
1529 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1530 	if (!skb)
1531 		return -ENOMEM;
1532 
1533 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1534 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1535 						 sizeof(*cmd));
1536 
1537 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1538 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1539 	cmd->param = cpu_to_le32(arg->param);
1540 	cmd->value = cpu_to_le32(arg->value);
1541 
1542 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1543 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1544 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1545 
1546 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1547 	if (ret) {
1548 		ath12k_warn(ar->ab,
1549 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1550 		dev_kfree_skb(skb);
1551 	}
1552 
1553 	return ret;
1554 }
1555 
1556 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1557 				u32 param, u32 param_value)
1558 {
1559 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1560 	struct wmi_sta_powersave_param_cmd *cmd;
1561 	struct sk_buff *skb;
1562 	int ret;
1563 
1564 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1565 	if (!skb)
1566 		return -ENOMEM;
1567 
1568 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1569 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1570 						 sizeof(*cmd));
1571 
1572 	cmd->vdev_id = cpu_to_le32(vdev_id);
1573 	cmd->param = cpu_to_le32(param);
1574 	cmd->value = cpu_to_le32(param_value);
1575 
1576 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1577 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1578 		   vdev_id, param, param_value);
1579 
1580 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1581 	if (ret) {
1582 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1583 		dev_kfree_skb(skb);
1584 	}
1585 
1586 	return ret;
1587 }
1588 
1589 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1590 {
1591 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1592 	struct wmi_force_fw_hang_cmd *cmd;
1593 	struct sk_buff *skb;
1594 	int ret, len;
1595 
1596 	len = sizeof(*cmd);
1597 
1598 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1599 	if (!skb)
1600 		return -ENOMEM;
1601 
1602 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1603 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1604 						 len);
1605 
1606 	cmd->type = cpu_to_le32(type);
1607 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1608 
1609 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1610 
1611 	if (ret) {
1612 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1613 		dev_kfree_skb(skb);
1614 	}
1615 	return ret;
1616 }
1617 
1618 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1619 				  u32 param_id, u32 param_value)
1620 {
1621 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1622 	struct wmi_vdev_set_param_cmd *cmd;
1623 	struct sk_buff *skb;
1624 	int ret;
1625 
1626 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1627 	if (!skb)
1628 		return -ENOMEM;
1629 
1630 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1631 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1632 						 sizeof(*cmd));
1633 
1634 	cmd->vdev_id = cpu_to_le32(vdev_id);
1635 	cmd->param_id = cpu_to_le32(param_id);
1636 	cmd->param_value = cpu_to_le32(param_value);
1637 
1638 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1639 		   "WMI vdev id 0x%x set param %d value %d\n",
1640 		   vdev_id, param_id, param_value);
1641 
1642 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1643 	if (ret) {
1644 		ath12k_warn(ar->ab,
1645 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1646 		dev_kfree_skb(skb);
1647 	}
1648 
1649 	return ret;
1650 }
1651 
1652 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1653 {
1654 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1655 	struct wmi_get_pdev_temperature_cmd *cmd;
1656 	struct sk_buff *skb;
1657 	int ret;
1658 
1659 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1660 	if (!skb)
1661 		return -ENOMEM;
1662 
1663 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1664 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1665 						 sizeof(*cmd));
1666 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1667 
1668 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1669 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1670 
1671 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1672 	if (ret) {
1673 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1674 		dev_kfree_skb(skb);
1675 	}
1676 
1677 	return ret;
1678 }
1679 
1680 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1681 					    u32 vdev_id, u32 bcn_ctrl_op)
1682 {
1683 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1684 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1685 	struct sk_buff *skb;
1686 	int ret;
1687 
1688 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1689 	if (!skb)
1690 		return -ENOMEM;
1691 
1692 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1693 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1694 						 sizeof(*cmd));
1695 
1696 	cmd->vdev_id = cpu_to_le32(vdev_id);
1697 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1698 
1699 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1700 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1701 		   vdev_id, bcn_ctrl_op);
1702 
1703 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1704 	if (ret) {
1705 		ath12k_warn(ar->ab,
1706 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1707 		dev_kfree_skb(skb);
1708 	}
1709 
1710 	return ret;
1711 }
1712 
1713 int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
1714 			struct ieee80211_mutable_offsets *offs,
1715 			struct sk_buff *bcn)
1716 {
1717 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1718 	struct wmi_bcn_tmpl_cmd *cmd;
1719 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1720 	struct wmi_tlv *tlv;
1721 	struct sk_buff *skb;
1722 	void *ptr;
1723 	int ret, len;
1724 	size_t aligned_len = roundup(bcn->len, 4);
1725 
1726 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1727 
1728 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1729 	if (!skb)
1730 		return -ENOMEM;
1731 
1732 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1733 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1734 						 sizeof(*cmd));
1735 	cmd->vdev_id = cpu_to_le32(vdev_id);
1736 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1737 	cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
1738 	cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
1739 	cmd->buf_len = cpu_to_le32(bcn->len);
1740 
1741 	ptr = skb->data + sizeof(*cmd);
1742 
1743 	bcn_prb_info = ptr;
1744 	len = sizeof(*bcn_prb_info);
1745 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
1746 							  len);
1747 	bcn_prb_info->caps = 0;
1748 	bcn_prb_info->erp = 0;
1749 
1750 	ptr += sizeof(*bcn_prb_info);
1751 
1752 	tlv = ptr;
1753 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
1754 	memcpy(tlv->value, bcn->data, bcn->len);
1755 
1756 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
1757 	if (ret) {
1758 		ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
1759 		dev_kfree_skb(skb);
1760 	}
1761 
1762 	return ret;
1763 }
1764 
1765 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
1766 				struct wmi_vdev_install_key_arg *arg)
1767 {
1768 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1769 	struct wmi_vdev_install_key_cmd *cmd;
1770 	struct wmi_tlv *tlv;
1771 	struct sk_buff *skb;
1772 	int ret, len, key_len_aligned;
1773 
1774 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
1775 	 * length is specified in cmd->key_len.
1776 	 */
1777 	key_len_aligned = roundup(arg->key_len, 4);
1778 
1779 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
1780 
1781 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1782 	if (!skb)
1783 		return -ENOMEM;
1784 
1785 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1786 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
1787 						 sizeof(*cmd));
1788 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1789 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1790 	cmd->key_idx = cpu_to_le32(arg->key_idx);
1791 	cmd->key_flags = cpu_to_le32(arg->key_flags);
1792 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
1793 	cmd->key_len = cpu_to_le32(arg->key_len);
1794 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
1795 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
1796 
1797 	if (arg->key_rsc_counter)
1798 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
1799 
1800 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
1801 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
1802 	memcpy(tlv->value, arg->key_data, arg->key_len);
1803 
1804 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1805 		   "WMI vdev install key idx %d cipher %d len %d\n",
1806 		   arg->key_idx, arg->key_cipher, arg->key_len);
1807 
1808 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1809 	if (ret) {
1810 		ath12k_warn(ar->ab,
1811 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1812 		dev_kfree_skb(skb);
1813 	}
1814 
1815 	return ret;
1816 }
1817 
1818 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
1819 				       struct ath12k_wmi_peer_assoc_arg *arg,
1820 				       bool hw_crypto_disabled)
1821 {
1822 	cmd->peer_flags = 0;
1823 	cmd->peer_flags_ext = 0;
1824 
1825 	if (arg->is_wme_set) {
1826 		if (arg->qos_flag)
1827 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
1828 		if (arg->apsd_flag)
1829 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
1830 		if (arg->ht_flag)
1831 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
1832 		if (arg->bw_40)
1833 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
1834 		if (arg->bw_80)
1835 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
1836 		if (arg->bw_160)
1837 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
1838 		if (arg->bw_320)
1839 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
1840 
1841 		/* Typically if STBC is enabled for VHT it should be enabled
1842 		 * for HT as well
1843 		 **/
1844 		if (arg->stbc_flag)
1845 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
1846 
1847 		/* Typically if LDPC is enabled for VHT it should be enabled
1848 		 * for HT as well
1849 		 **/
1850 		if (arg->ldpc_flag)
1851 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
1852 
1853 		if (arg->static_mimops_flag)
1854 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
1855 		if (arg->dynamic_mimops_flag)
1856 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
1857 		if (arg->spatial_mux_flag)
1858 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
1859 		if (arg->vht_flag)
1860 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
1861 		if (arg->he_flag)
1862 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
1863 		if (arg->twt_requester)
1864 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
1865 		if (arg->twt_responder)
1866 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
1867 		if (arg->eht_flag)
1868 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
1869 	}
1870 
1871 	/* Suppress authorization for all AUTH modes that need 4-way handshake
1872 	 * (during re-association).
1873 	 * Authorization will be done for these modes on key installation.
1874 	 */
1875 	if (arg->auth_flag)
1876 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
1877 	if (arg->need_ptk_4_way) {
1878 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
1879 		if (!hw_crypto_disabled)
1880 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
1881 	}
1882 	if (arg->need_gtk_2_way)
1883 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
1884 	/* safe mode bypass the 4-way handshake */
1885 	if (arg->safe_mode_enabled)
1886 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
1887 						 WMI_PEER_NEED_GTK_2_WAY));
1888 
1889 	if (arg->is_pmf_enabled)
1890 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
1891 
1892 	/* Disable AMSDU for station transmit, if user configures it */
1893 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
1894 	 * it
1895 	 * if (arg->amsdu_disable) Add after FW support
1896 	 **/
1897 
1898 	/* Target asserts if node is marked HT and all MCS is set to 0.
1899 	 * Mark the node as non-HT if all the mcs rates are disabled through
1900 	 * iwpriv
1901 	 **/
1902 	if (arg->peer_ht_rates.num_rates == 0)
1903 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
1904 }
1905 
1906 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
1907 				   struct ath12k_wmi_peer_assoc_arg *arg)
1908 {
1909 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1910 	struct wmi_peer_assoc_complete_cmd *cmd;
1911 	struct ath12k_wmi_vht_rate_set_params *mcs;
1912 	struct ath12k_wmi_he_rate_set_params *he_mcs;
1913 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
1914 	struct sk_buff *skb;
1915 	struct wmi_tlv *tlv;
1916 	void *ptr;
1917 	u32 peer_legacy_rates_align;
1918 	u32 peer_ht_rates_align;
1919 	int i, ret, len;
1920 
1921 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
1922 					  sizeof(u32));
1923 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
1924 				      sizeof(u32));
1925 
1926 	len = sizeof(*cmd) +
1927 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
1928 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
1929 	      sizeof(*mcs) + TLV_HDR_SIZE +
1930 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
1931 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
1932 	      TLV_HDR_SIZE + TLV_HDR_SIZE;
1933 
1934 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1935 	if (!skb)
1936 		return -ENOMEM;
1937 
1938 	ptr = skb->data;
1939 
1940 	cmd = ptr;
1941 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
1942 						 sizeof(*cmd));
1943 
1944 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1945 
1946 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
1947 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
1948 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1949 
1950 	ath12k_wmi_copy_peer_flags(cmd, arg,
1951 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
1952 					    &ar->ab->dev_flags));
1953 
1954 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
1955 
1956 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
1957 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
1958 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
1959 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
1960 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
1961 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
1962 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
1963 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
1964 
1965 	/* Update 11ax capabilities */
1966 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
1967 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
1968 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
1969 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
1970 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
1971 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
1972 		cmd->peer_he_cap_phy[i] =
1973 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
1974 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
1975 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
1976 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
1977 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
1978 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
1979 
1980 	/* Update 11be capabilities */
1981 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
1982 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
1983 		       0);
1984 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
1985 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
1986 		       0);
1987 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
1988 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
1989 
1990 	/* Update peer legacy rate information */
1991 	ptr += sizeof(*cmd);
1992 
1993 	tlv = ptr;
1994 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
1995 
1996 	ptr += TLV_HDR_SIZE;
1997 
1998 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
1999 	memcpy(ptr, arg->peer_legacy_rates.rates,
2000 	       arg->peer_legacy_rates.num_rates);
2001 
2002 	/* Update peer HT rate information */
2003 	ptr += peer_legacy_rates_align;
2004 
2005 	tlv = ptr;
2006 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2007 	ptr += TLV_HDR_SIZE;
2008 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2009 	memcpy(ptr, arg->peer_ht_rates.rates,
2010 	       arg->peer_ht_rates.num_rates);
2011 
2012 	/* VHT Rates */
2013 	ptr += peer_ht_rates_align;
2014 
2015 	mcs = ptr;
2016 
2017 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2018 						 sizeof(*mcs));
2019 
2020 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2021 
2022 	/* Update bandwidth-NSS mapping */
2023 	cmd->peer_bw_rxnss_override = 0;
2024 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2025 
2026 	if (arg->vht_capable) {
2027 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2028 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2029 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2030 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2031 	}
2032 
2033 	/* HE Rates */
2034 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2035 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2036 
2037 	ptr += sizeof(*mcs);
2038 
2039 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2040 
2041 	tlv = ptr;
2042 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2043 	ptr += TLV_HDR_SIZE;
2044 
2045 	/* Loop through the HE rate set */
2046 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2047 		he_mcs = ptr;
2048 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2049 							    sizeof(*he_mcs));
2050 
2051 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2052 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2053 		ptr += sizeof(*he_mcs);
2054 	}
2055 
2056 	/* MLO header tag with 0 length */
2057 	len = 0;
2058 	tlv = ptr;
2059 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2060 	ptr += TLV_HDR_SIZE;
2061 
2062 	/* Loop through the EHT rate set */
2063 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2064 	tlv = ptr;
2065 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2066 	ptr += TLV_HDR_SIZE;
2067 
2068 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2069 		eht_mcs = ptr;
2070 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2071 							     sizeof(*eht_mcs));
2072 
2073 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2074 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2075 		ptr += sizeof(*eht_mcs);
2076 	}
2077 
2078 	/* ML partner links tag with 0 length */
2079 	len = 0;
2080 	tlv = ptr;
2081 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2082 	ptr += TLV_HDR_SIZE;
2083 
2084 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2085 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
2086 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2087 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2088 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2089 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2090 		   cmd->peer_mpdu_density,
2091 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2092 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2093 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2094 		   cmd->peer_he_cap_phy[2],
2095 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2096 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2097 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2098 		   cmd->peer_eht_cap_phy[2]);
2099 
2100 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2101 	if (ret) {
2102 		ath12k_warn(ar->ab,
2103 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2104 		dev_kfree_skb(skb);
2105 	}
2106 
2107 	return ret;
2108 }
2109 
2110 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2111 				struct ath12k_wmi_scan_req_arg *arg)
2112 {
2113 	/* setup commonly used values */
2114 	arg->scan_req_id = 1;
2115 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2116 	arg->dwell_time_active = 50;
2117 	arg->dwell_time_active_2g = 0;
2118 	arg->dwell_time_passive = 150;
2119 	arg->dwell_time_active_6g = 40;
2120 	arg->dwell_time_passive_6g = 30;
2121 	arg->min_rest_time = 50;
2122 	arg->max_rest_time = 500;
2123 	arg->repeat_probe_time = 0;
2124 	arg->probe_spacing_time = 0;
2125 	arg->idle_time = 0;
2126 	arg->max_scan_time = 20000;
2127 	arg->probe_delay = 5;
2128 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2129 				  WMI_SCAN_EVENT_COMPLETED |
2130 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2131 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2132 				  WMI_SCAN_EVENT_DEQUEUED;
2133 	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
2134 	arg->num_bssid = 1;
2135 
2136 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2137 	 * ZEROs in probe request
2138 	 */
2139 	eth_broadcast_addr(arg->bssid_list[0].addr);
2140 }
2141 
2142 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2143 						   struct ath12k_wmi_scan_req_arg *arg)
2144 {
2145 	/* Scan events subscription */
2146 	if (arg->scan_ev_started)
2147 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2148 	if (arg->scan_ev_completed)
2149 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2150 	if (arg->scan_ev_bss_chan)
2151 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2152 	if (arg->scan_ev_foreign_chan)
2153 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2154 	if (arg->scan_ev_dequeued)
2155 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2156 	if (arg->scan_ev_preempted)
2157 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2158 	if (arg->scan_ev_start_failed)
2159 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2160 	if (arg->scan_ev_restarted)
2161 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2162 	if (arg->scan_ev_foreign_chn_exit)
2163 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2164 	if (arg->scan_ev_suspended)
2165 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2166 	if (arg->scan_ev_resumed)
2167 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2168 
2169 	/** Set scan control flags */
2170 	cmd->scan_ctrl_flags = 0;
2171 	if (arg->scan_f_passive)
2172 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2173 	if (arg->scan_f_strict_passive_pch)
2174 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2175 	if (arg->scan_f_promisc_mode)
2176 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2177 	if (arg->scan_f_capture_phy_err)
2178 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2179 	if (arg->scan_f_half_rate)
2180 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2181 	if (arg->scan_f_quarter_rate)
2182 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2183 	if (arg->scan_f_cck_rates)
2184 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2185 	if (arg->scan_f_ofdm_rates)
2186 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2187 	if (arg->scan_f_chan_stat_evnt)
2188 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2189 	if (arg->scan_f_filter_prb_req)
2190 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2191 	if (arg->scan_f_bcast_probe)
2192 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2193 	if (arg->scan_f_offchan_mgmt_tx)
2194 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2195 	if (arg->scan_f_offchan_data_tx)
2196 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2197 	if (arg->scan_f_force_active_dfs_chn)
2198 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2199 	if (arg->scan_f_add_tpc_ie_in_probe)
2200 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2201 	if (arg->scan_f_add_ds_ie_in_probe)
2202 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2203 	if (arg->scan_f_add_spoofed_mac_in_probe)
2204 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2205 	if (arg->scan_f_add_rand_seq_in_probe)
2206 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2207 	if (arg->scan_f_en_ie_whitelist_in_probe)
2208 		cmd->scan_ctrl_flags |=
2209 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2210 
2211 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2212 						 WMI_SCAN_DWELL_MODE_MASK);
2213 }
2214 
2215 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2216 				   struct ath12k_wmi_scan_req_arg *arg)
2217 {
2218 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2219 	struct wmi_start_scan_cmd *cmd;
2220 	struct ath12k_wmi_ssid_params *ssid = NULL;
2221 	struct ath12k_wmi_mac_addr_params *bssid;
2222 	struct sk_buff *skb;
2223 	struct wmi_tlv *tlv;
2224 	void *ptr;
2225 	int i, ret, len;
2226 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2227 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2228 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2229 
2230 	len = sizeof(*cmd);
2231 
2232 	len += TLV_HDR_SIZE;
2233 	if (arg->num_chan)
2234 		len += arg->num_chan * sizeof(u32);
2235 
2236 	len += TLV_HDR_SIZE;
2237 	if (arg->num_ssids)
2238 		len += arg->num_ssids * sizeof(*ssid);
2239 
2240 	len += TLV_HDR_SIZE;
2241 	if (arg->num_bssid)
2242 		len += sizeof(*bssid) * arg->num_bssid;
2243 
2244 	if (arg->num_hint_bssid)
2245 		len += TLV_HDR_SIZE +
2246 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2247 
2248 	if (arg->num_hint_s_ssid)
2249 		len += TLV_HDR_SIZE +
2250 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2251 
2252 	len += TLV_HDR_SIZE;
2253 	if (arg->extraie.len)
2254 		extraie_len_with_pad =
2255 			roundup(arg->extraie.len, sizeof(u32));
2256 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2257 		len += extraie_len_with_pad;
2258 	} else {
2259 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2260 			    arg->extraie.len);
2261 		extraie_len_with_pad = 0;
2262 	}
2263 
2264 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2265 	if (!skb)
2266 		return -ENOMEM;
2267 
2268 	ptr = skb->data;
2269 
2270 	cmd = ptr;
2271 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2272 						 sizeof(*cmd));
2273 
2274 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2275 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2276 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2277 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
2278 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2279 
2280 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2281 
2282 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2283 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2284 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2285 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2286 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2287 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2288 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2289 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2290 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2291 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2292 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2293 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2294 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2295 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2296 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2297 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2298 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2299 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2300 
2301 	ptr += sizeof(*cmd);
2302 
2303 	len = arg->num_chan * sizeof(u32);
2304 
2305 	tlv = ptr;
2306 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2307 	ptr += TLV_HDR_SIZE;
2308 	tmp_ptr = (u32 *)ptr;
2309 
2310 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2311 
2312 	ptr += len;
2313 
2314 	len = arg->num_ssids * sizeof(*ssid);
2315 	tlv = ptr;
2316 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2317 
2318 	ptr += TLV_HDR_SIZE;
2319 
2320 	if (arg->num_ssids) {
2321 		ssid = ptr;
2322 		for (i = 0; i < arg->num_ssids; ++i) {
2323 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2324 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2325 			       arg->ssid[i].ssid_len);
2326 			ssid++;
2327 		}
2328 	}
2329 
2330 	ptr += (arg->num_ssids * sizeof(*ssid));
2331 	len = arg->num_bssid * sizeof(*bssid);
2332 	tlv = ptr;
2333 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2334 
2335 	ptr += TLV_HDR_SIZE;
2336 	bssid = ptr;
2337 
2338 	if (arg->num_bssid) {
2339 		for (i = 0; i < arg->num_bssid; ++i) {
2340 			ether_addr_copy(bssid->addr,
2341 					arg->bssid_list[i].addr);
2342 			bssid++;
2343 		}
2344 	}
2345 
2346 	ptr += arg->num_bssid * sizeof(*bssid);
2347 
2348 	len = extraie_len_with_pad;
2349 	tlv = ptr;
2350 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2351 	ptr += TLV_HDR_SIZE;
2352 
2353 	if (extraie_len_with_pad)
2354 		memcpy(ptr, arg->extraie.ptr,
2355 		       arg->extraie.len);
2356 
2357 	ptr += extraie_len_with_pad;
2358 
2359 	if (arg->num_hint_s_ssid) {
2360 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2361 		tlv = ptr;
2362 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2363 		ptr += TLV_HDR_SIZE;
2364 		s_ssid = ptr;
2365 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2366 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2367 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2368 			s_ssid++;
2369 		}
2370 		ptr += len;
2371 	}
2372 
2373 	if (arg->num_hint_bssid) {
2374 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2375 		tlv = ptr;
2376 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2377 		ptr += TLV_HDR_SIZE;
2378 		hint_bssid = ptr;
2379 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2380 			hint_bssid->freq_flags =
2381 				arg->hint_bssid[i].freq_flags;
2382 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2383 					&hint_bssid->bssid.addr[0]);
2384 			hint_bssid++;
2385 		}
2386 	}
2387 
2388 	ret = ath12k_wmi_cmd_send(wmi, skb,
2389 				  WMI_START_SCAN_CMDID);
2390 	if (ret) {
2391 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2392 		dev_kfree_skb(skb);
2393 	}
2394 
2395 	return ret;
2396 }
2397 
2398 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2399 				  struct ath12k_wmi_scan_cancel_arg *arg)
2400 {
2401 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2402 	struct wmi_stop_scan_cmd *cmd;
2403 	struct sk_buff *skb;
2404 	int ret;
2405 
2406 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2407 	if (!skb)
2408 		return -ENOMEM;
2409 
2410 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2411 
2412 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2413 						 sizeof(*cmd));
2414 
2415 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2416 	cmd->requestor = cpu_to_le32(arg->requester);
2417 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2418 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2419 	/* stop the scan with the corresponding scan_id */
2420 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2421 		/* Cancelling all scans */
2422 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2423 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2424 		/* Cancelling VAP scans */
2425 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2426 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2427 		/* Cancelling specific scan */
2428 		cmd->req_type = WMI_SCAN_STOP_ONE;
2429 	} else {
2430 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2431 			    arg->req_type);
2432 		dev_kfree_skb(skb);
2433 		return -EINVAL;
2434 	}
2435 
2436 	ret = ath12k_wmi_cmd_send(wmi, skb,
2437 				  WMI_STOP_SCAN_CMDID);
2438 	if (ret) {
2439 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2440 		dev_kfree_skb(skb);
2441 	}
2442 
2443 	return ret;
2444 }
2445 
2446 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2447 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2448 {
2449 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2450 	struct wmi_scan_chan_list_cmd *cmd;
2451 	struct sk_buff *skb;
2452 	struct ath12k_wmi_channel_params *chan_info;
2453 	struct ath12k_wmi_channel_arg *channel_arg;
2454 	struct wmi_tlv *tlv;
2455 	void *ptr;
2456 	int i, ret, len;
2457 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2458 	__le32 *reg1, *reg2;
2459 
2460 	channel_arg = &arg->channel[0];
2461 	while (arg->nallchans) {
2462 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2463 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2464 			sizeof(*chan_info);
2465 
2466 		num_send_chans = min(arg->nallchans, max_chan_limit);
2467 
2468 		arg->nallchans -= num_send_chans;
2469 		len += sizeof(*chan_info) * num_send_chans;
2470 
2471 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2472 		if (!skb)
2473 			return -ENOMEM;
2474 
2475 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2476 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2477 							 sizeof(*cmd));
2478 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2479 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2480 		if (num_sends)
2481 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2482 
2483 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2484 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2485 			   num_send_chans, len, cmd->pdev_id, num_sends);
2486 
2487 		ptr = skb->data + sizeof(*cmd);
2488 
2489 		len = sizeof(*chan_info) * num_send_chans;
2490 		tlv = ptr;
2491 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2492 						     len);
2493 		ptr += TLV_HDR_SIZE;
2494 
2495 		for (i = 0; i < num_send_chans; ++i) {
2496 			chan_info = ptr;
2497 			memset(chan_info, 0, sizeof(*chan_info));
2498 			len = sizeof(*chan_info);
2499 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2500 								       len);
2501 
2502 			reg1 = &chan_info->reg_info_1;
2503 			reg2 = &chan_info->reg_info_2;
2504 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2505 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2506 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2507 
2508 			if (channel_arg->is_chan_passive)
2509 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2510 			if (channel_arg->allow_he)
2511 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2512 			else if (channel_arg->allow_vht)
2513 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2514 			else if (channel_arg->allow_ht)
2515 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2516 			if (channel_arg->half_rate)
2517 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2518 			if (channel_arg->quarter_rate)
2519 				chan_info->info |=
2520 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2521 
2522 			if (channel_arg->psc_channel)
2523 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2524 
2525 			if (channel_arg->dfs_set)
2526 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2527 
2528 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2529 							    WMI_CHAN_INFO_MODE);
2530 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2531 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2532 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2533 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2534 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2535 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2536 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2537 						  WMI_CHAN_REG_INFO1_REG_CLS);
2538 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2539 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2540 
2541 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2542 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2543 				   i, chan_info->mhz, chan_info->info);
2544 
2545 			ptr += sizeof(*chan_info);
2546 
2547 			channel_arg++;
2548 		}
2549 
2550 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2551 		if (ret) {
2552 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2553 			dev_kfree_skb(skb);
2554 			return ret;
2555 		}
2556 
2557 		num_sends++;
2558 	}
2559 
2560 	return 0;
2561 }
2562 
2563 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2564 				   struct wmi_wmm_params_all_arg *param)
2565 {
2566 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2567 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2568 	struct wmi_wmm_params *wmm_param;
2569 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2570 	struct sk_buff *skb;
2571 	int ret, ac;
2572 
2573 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2574 	if (!skb)
2575 		return -ENOMEM;
2576 
2577 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2578 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2579 						 sizeof(*cmd));
2580 
2581 	cmd->vdev_id = cpu_to_le32(vdev_id);
2582 	cmd->wmm_param_type = 0;
2583 
2584 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2585 		switch (ac) {
2586 		case WME_AC_BE:
2587 			wmi_wmm_arg = &param->ac_be;
2588 			break;
2589 		case WME_AC_BK:
2590 			wmi_wmm_arg = &param->ac_bk;
2591 			break;
2592 		case WME_AC_VI:
2593 			wmi_wmm_arg = &param->ac_vi;
2594 			break;
2595 		case WME_AC_VO:
2596 			wmi_wmm_arg = &param->ac_vo;
2597 			break;
2598 		}
2599 
2600 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2601 		wmm_param->tlv_header =
2602 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2603 					       sizeof(*wmm_param));
2604 
2605 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2606 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2607 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2608 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2609 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2610 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2611 
2612 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2613 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2614 			   ac, wmm_param->aifs, wmm_param->cwmin,
2615 			   wmm_param->cwmax, wmm_param->txoplimit,
2616 			   wmm_param->acm, wmm_param->no_ack);
2617 	}
2618 	ret = ath12k_wmi_cmd_send(wmi, skb,
2619 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2620 	if (ret) {
2621 		ath12k_warn(ar->ab,
2622 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2623 		dev_kfree_skb(skb);
2624 	}
2625 
2626 	return ret;
2627 }
2628 
2629 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2630 						  u32 pdev_id)
2631 {
2632 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2633 	struct wmi_dfs_phyerr_offload_cmd *cmd;
2634 	struct sk_buff *skb;
2635 	int ret;
2636 
2637 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2638 	if (!skb)
2639 		return -ENOMEM;
2640 
2641 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2642 	cmd->tlv_header =
2643 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2644 				       sizeof(*cmd));
2645 
2646 	cmd->pdev_id = cpu_to_le32(pdev_id);
2647 
2648 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2649 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2650 
2651 	ret = ath12k_wmi_cmd_send(wmi, skb,
2652 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2653 	if (ret) {
2654 		ath12k_warn(ar->ab,
2655 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2656 		dev_kfree_skb(skb);
2657 	}
2658 
2659 	return ret;
2660 }
2661 
2662 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2663 			  u32 tid, u32 initiator, u32 reason)
2664 {
2665 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2666 	struct wmi_delba_send_cmd *cmd;
2667 	struct sk_buff *skb;
2668 	int ret;
2669 
2670 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2671 	if (!skb)
2672 		return -ENOMEM;
2673 
2674 	cmd = (struct wmi_delba_send_cmd *)skb->data;
2675 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
2676 						 sizeof(*cmd));
2677 	cmd->vdev_id = cpu_to_le32(vdev_id);
2678 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2679 	cmd->tid = cpu_to_le32(tid);
2680 	cmd->initiator = cpu_to_le32(initiator);
2681 	cmd->reasoncode = cpu_to_le32(reason);
2682 
2683 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2684 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2685 		   vdev_id, mac, tid, initiator, reason);
2686 
2687 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
2688 
2689 	if (ret) {
2690 		ath12k_warn(ar->ab,
2691 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2692 		dev_kfree_skb(skb);
2693 	}
2694 
2695 	return ret;
2696 }
2697 
2698 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2699 			      u32 tid, u32 status)
2700 {
2701 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2702 	struct wmi_addba_setresponse_cmd *cmd;
2703 	struct sk_buff *skb;
2704 	int ret;
2705 
2706 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2707 	if (!skb)
2708 		return -ENOMEM;
2709 
2710 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
2711 	cmd->tlv_header =
2712 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
2713 				       sizeof(*cmd));
2714 	cmd->vdev_id = cpu_to_le32(vdev_id);
2715 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2716 	cmd->tid = cpu_to_le32(tid);
2717 	cmd->statuscode = cpu_to_le32(status);
2718 
2719 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2720 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2721 		   vdev_id, mac, tid, status);
2722 
2723 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
2724 
2725 	if (ret) {
2726 		ath12k_warn(ar->ab,
2727 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2728 		dev_kfree_skb(skb);
2729 	}
2730 
2731 	return ret;
2732 }
2733 
2734 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2735 			  u32 tid, u32 buf_size)
2736 {
2737 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2738 	struct wmi_addba_send_cmd *cmd;
2739 	struct sk_buff *skb;
2740 	int ret;
2741 
2742 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2743 	if (!skb)
2744 		return -ENOMEM;
2745 
2746 	cmd = (struct wmi_addba_send_cmd *)skb->data;
2747 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
2748 						 sizeof(*cmd));
2749 	cmd->vdev_id = cpu_to_le32(vdev_id);
2750 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2751 	cmd->tid = cpu_to_le32(tid);
2752 	cmd->buffersize = cpu_to_le32(buf_size);
2753 
2754 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2755 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2756 		   vdev_id, mac, tid, buf_size);
2757 
2758 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
2759 
2760 	if (ret) {
2761 		ath12k_warn(ar->ab,
2762 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2763 		dev_kfree_skb(skb);
2764 	}
2765 
2766 	return ret;
2767 }
2768 
2769 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
2770 {
2771 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2772 	struct wmi_addba_clear_resp_cmd *cmd;
2773 	struct sk_buff *skb;
2774 	int ret;
2775 
2776 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2777 	if (!skb)
2778 		return -ENOMEM;
2779 
2780 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
2781 	cmd->tlv_header =
2782 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
2783 				       sizeof(*cmd));
2784 	cmd->vdev_id = cpu_to_le32(vdev_id);
2785 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2786 
2787 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2788 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
2789 		   vdev_id, mac);
2790 
2791 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
2792 
2793 	if (ret) {
2794 		ath12k_warn(ar->ab,
2795 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
2796 		dev_kfree_skb(skb);
2797 	}
2798 
2799 	return ret;
2800 }
2801 
2802 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
2803 				     struct ath12k_wmi_init_country_arg *arg)
2804 {
2805 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2806 	struct wmi_init_country_cmd *cmd;
2807 	struct sk_buff *skb;
2808 	int ret;
2809 
2810 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2811 	if (!skb)
2812 		return -ENOMEM;
2813 
2814 	cmd = (struct wmi_init_country_cmd *)skb->data;
2815 	cmd->tlv_header =
2816 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
2817 				       sizeof(*cmd));
2818 
2819 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
2820 
2821 	switch (arg->flags) {
2822 	case ALPHA_IS_SET:
2823 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
2824 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
2825 		break;
2826 	case CC_IS_SET:
2827 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
2828 		cmd->cc_info.country_code =
2829 			cpu_to_le32(arg->cc_info.country_code);
2830 		break;
2831 	case REGDMN_IS_SET:
2832 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
2833 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
2834 		break;
2835 	default:
2836 		ret = -EINVAL;
2837 		goto out;
2838 	}
2839 
2840 	ret = ath12k_wmi_cmd_send(wmi, skb,
2841 				  WMI_SET_INIT_COUNTRY_CMDID);
2842 
2843 out:
2844 	if (ret) {
2845 		ath12k_warn(ar->ab,
2846 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2847 			    ret);
2848 		dev_kfree_skb(skb);
2849 	}
2850 
2851 	return ret;
2852 }
2853 
2854 int
2855 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
2856 {
2857 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2858 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2859 	struct wmi_twt_enable_params_cmd *cmd;
2860 	struct sk_buff *skb;
2861 	int ret, len;
2862 
2863 	len = sizeof(*cmd);
2864 
2865 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2866 	if (!skb)
2867 		return -ENOMEM;
2868 
2869 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
2870 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
2871 						 len);
2872 	cmd->pdev_id = cpu_to_le32(pdev_id);
2873 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
2874 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
2875 	cmd->congestion_thresh_setup =
2876 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
2877 	cmd->congestion_thresh_teardown =
2878 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
2879 	cmd->congestion_thresh_critical =
2880 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
2881 	cmd->interference_thresh_teardown =
2882 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
2883 	cmd->interference_thresh_setup =
2884 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
2885 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
2886 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
2887 	cmd->no_of_bcast_mcast_slots =
2888 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
2889 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
2890 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
2891 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
2892 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
2893 	cmd->remove_sta_slot_interval =
2894 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
2895 	/* TODO add MBSSID support */
2896 	cmd->mbss_support = 0;
2897 
2898 	ret = ath12k_wmi_cmd_send(wmi, skb,
2899 				  WMI_TWT_ENABLE_CMDID);
2900 	if (ret) {
2901 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
2902 		dev_kfree_skb(skb);
2903 	}
2904 	return ret;
2905 }
2906 
2907 int
2908 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
2909 {
2910 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2911 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2912 	struct wmi_twt_disable_params_cmd *cmd;
2913 	struct sk_buff *skb;
2914 	int ret, len;
2915 
2916 	len = sizeof(*cmd);
2917 
2918 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2919 	if (!skb)
2920 		return -ENOMEM;
2921 
2922 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
2923 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
2924 						 len);
2925 	cmd->pdev_id = cpu_to_le32(pdev_id);
2926 
2927 	ret = ath12k_wmi_cmd_send(wmi, skb,
2928 				  WMI_TWT_DISABLE_CMDID);
2929 	if (ret) {
2930 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
2931 		dev_kfree_skb(skb);
2932 	}
2933 	return ret;
2934 }
2935 
2936 int
2937 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
2938 			     struct ieee80211_he_obss_pd *he_obss_pd)
2939 {
2940 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2941 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2942 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
2943 	struct sk_buff *skb;
2944 	int ret, len;
2945 
2946 	len = sizeof(*cmd);
2947 
2948 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2949 	if (!skb)
2950 		return -ENOMEM;
2951 
2952 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
2953 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
2954 						 len);
2955 	cmd->vdev_id = cpu_to_le32(vdev_id);
2956 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
2957 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
2958 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
2959 
2960 	ret = ath12k_wmi_cmd_send(wmi, skb,
2961 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
2962 	if (ret) {
2963 		ath12k_warn(ab,
2964 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
2965 		dev_kfree_skb(skb);
2966 	}
2967 	return ret;
2968 }
2969 
2970 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
2971 				  u8 bss_color, u32 period,
2972 				  bool enable)
2973 {
2974 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2975 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2976 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
2977 	struct sk_buff *skb;
2978 	int ret, len;
2979 
2980 	len = sizeof(*cmd);
2981 
2982 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2983 	if (!skb)
2984 		return -ENOMEM;
2985 
2986 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
2987 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
2988 						 len);
2989 	cmd->vdev_id = cpu_to_le32(vdev_id);
2990 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
2991 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
2992 	cmd->current_bss_color = cpu_to_le32(bss_color);
2993 	cmd->detection_period_ms = cpu_to_le32(period);
2994 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
2995 	cmd->free_slot_expiry_time_ms = 0;
2996 	cmd->flags = 0;
2997 
2998 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2999 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3000 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3001 		   cmd->detection_period_ms, cmd->scan_period_ms);
3002 
3003 	ret = ath12k_wmi_cmd_send(wmi, skb,
3004 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3005 	if (ret) {
3006 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3007 		dev_kfree_skb(skb);
3008 	}
3009 	return ret;
3010 }
3011 
3012 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3013 						bool enable)
3014 {
3015 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3016 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3017 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3018 	struct sk_buff *skb;
3019 	int ret, len;
3020 
3021 	len = sizeof(*cmd);
3022 
3023 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3024 	if (!skb)
3025 		return -ENOMEM;
3026 
3027 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3028 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3029 						 len);
3030 	cmd->vdev_id = cpu_to_le32(vdev_id);
3031 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3032 
3033 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3034 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3035 		   cmd->vdev_id, cmd->enable);
3036 
3037 	ret = ath12k_wmi_cmd_send(wmi, skb,
3038 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3039 	if (ret) {
3040 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3041 		dev_kfree_skb(skb);
3042 	}
3043 	return ret;
3044 }
3045 
3046 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3047 				   struct sk_buff *tmpl)
3048 {
3049 	struct wmi_tlv *tlv;
3050 	struct sk_buff *skb;
3051 	void *ptr;
3052 	int ret, len;
3053 	size_t aligned_len;
3054 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3055 
3056 	aligned_len = roundup(tmpl->len, 4);
3057 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3058 
3059 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3060 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3061 
3062 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3063 	if (!skb)
3064 		return -ENOMEM;
3065 
3066 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3067 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3068 						 sizeof(*cmd));
3069 	cmd->vdev_id = cpu_to_le32(vdev_id);
3070 	cmd->buf_len = cpu_to_le32(tmpl->len);
3071 	ptr = skb->data + sizeof(*cmd);
3072 
3073 	tlv = ptr;
3074 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3075 	memcpy(tlv->value, tmpl->data, tmpl->len);
3076 
3077 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3078 	if (ret) {
3079 		ath12k_warn(ar->ab,
3080 			    "WMI vdev %i failed to send FILS discovery template command\n",
3081 			    vdev_id);
3082 		dev_kfree_skb(skb);
3083 	}
3084 	return ret;
3085 }
3086 
3087 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3088 			       struct sk_buff *tmpl)
3089 {
3090 	struct wmi_probe_tmpl_cmd *cmd;
3091 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3092 	struct wmi_tlv *tlv;
3093 	struct sk_buff *skb;
3094 	void *ptr;
3095 	int ret, len;
3096 	size_t aligned_len = roundup(tmpl->len, 4);
3097 
3098 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3099 		   "WMI vdev %i set probe response template\n", vdev_id);
3100 
3101 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3102 
3103 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3104 	if (!skb)
3105 		return -ENOMEM;
3106 
3107 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3108 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3109 						 sizeof(*cmd));
3110 	cmd->vdev_id = cpu_to_le32(vdev_id);
3111 	cmd->buf_len = cpu_to_le32(tmpl->len);
3112 
3113 	ptr = skb->data + sizeof(*cmd);
3114 
3115 	probe_info = ptr;
3116 	len = sizeof(*probe_info);
3117 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3118 							len);
3119 	probe_info->caps = 0;
3120 	probe_info->erp = 0;
3121 
3122 	ptr += sizeof(*probe_info);
3123 
3124 	tlv = ptr;
3125 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3126 	memcpy(tlv->value, tmpl->data, tmpl->len);
3127 
3128 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3129 	if (ret) {
3130 		ath12k_warn(ar->ab,
3131 			    "WMI vdev %i failed to send probe response template command\n",
3132 			    vdev_id);
3133 		dev_kfree_skb(skb);
3134 	}
3135 	return ret;
3136 }
3137 
3138 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3139 			      bool unsol_bcast_probe_resp_enabled)
3140 {
3141 	struct sk_buff *skb;
3142 	int ret, len;
3143 	struct wmi_fils_discovery_cmd *cmd;
3144 
3145 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3146 		   "WMI vdev %i set %s interval to %u TU\n",
3147 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3148 		   "unsolicited broadcast probe response" : "FILS discovery",
3149 		   interval);
3150 
3151 	len = sizeof(*cmd);
3152 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3153 	if (!skb)
3154 		return -ENOMEM;
3155 
3156 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3157 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3158 						 len);
3159 	cmd->vdev_id = cpu_to_le32(vdev_id);
3160 	cmd->interval = cpu_to_le32(interval);
3161 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3162 
3163 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3164 	if (ret) {
3165 		ath12k_warn(ar->ab,
3166 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3167 			    vdev_id);
3168 		dev_kfree_skb(skb);
3169 	}
3170 	return ret;
3171 }
3172 
3173 static void
3174 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3175 			      struct ath12k_wmi_pdev_band_arg *arg)
3176 {
3177 	u8 i;
3178 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3179 	struct ath12k_pdev *pdev;
3180 
3181 	for (i = 0; i < soc->num_radios; i++) {
3182 		pdev = &soc->pdevs[i];
3183 		hal_reg_cap = &soc->hal_reg_cap[i];
3184 		arg[i].pdev_id = pdev->pdev_id;
3185 
3186 		switch (pdev->cap.supported_bands) {
3187 		case WMI_HOST_WLAN_2G_5G_CAP:
3188 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3189 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3190 			break;
3191 		case WMI_HOST_WLAN_2G_CAP:
3192 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3193 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3194 			break;
3195 		case WMI_HOST_WLAN_5G_CAP:
3196 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3197 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3198 			break;
3199 		default:
3200 			break;
3201 		}
3202 	}
3203 }
3204 
3205 static void
3206 ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
3207 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3208 {
3209 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3210 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3211 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3212 	wmi_cfg->num_offload_reorder_buffs =
3213 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3214 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3215 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3216 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3217 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3218 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3219 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3220 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3221 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3222 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3223 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3224 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3225 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3226 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3227 	wmi_cfg->roam_offload_max_ap_profiles =
3228 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3229 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3230 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3231 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3232 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3233 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3234 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3235 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3236 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3237 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3238 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3239 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3240 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3241 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3242 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3243 	wmi_cfg->num_tdls_conn_table_entries =
3244 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3245 	wmi_cfg->beacon_tx_offload_max_vdev =
3246 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3247 	wmi_cfg->num_multicast_filter_entries =
3248 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3249 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3250 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3251 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3252 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3253 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3254 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3255 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3256 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3257 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3258 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3259 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3260 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3261 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3262 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3263 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
3264 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3265 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3266 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3267 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3268 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3269 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3270 }
3271 
3272 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3273 				struct ath12k_wmi_init_cmd_arg *arg)
3274 {
3275 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3276 	struct sk_buff *skb;
3277 	struct wmi_init_cmd *cmd;
3278 	struct ath12k_wmi_resource_config_params *cfg;
3279 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3280 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3281 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3282 	struct wmi_tlv *tlv;
3283 	size_t ret, len;
3284 	void *ptr;
3285 	u32 hw_mode_len = 0;
3286 	u16 idx;
3287 
3288 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3289 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3290 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3291 
3292 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3293 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3294 
3295 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3296 	if (!skb)
3297 		return -ENOMEM;
3298 
3299 	cmd = (struct wmi_init_cmd *)skb->data;
3300 
3301 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3302 						 sizeof(*cmd));
3303 
3304 	ptr = skb->data + sizeof(*cmd);
3305 	cfg = ptr;
3306 
3307 	ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
3308 
3309 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3310 						 sizeof(*cfg));
3311 
3312 	ptr += sizeof(*cfg);
3313 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3314 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3315 
3316 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3317 		host_mem_chunks[idx].tlv_header =
3318 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3319 					   len);
3320 
3321 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3322 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3323 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3324 
3325 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3326 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3327 			   arg->mem_chunks[idx].req_id,
3328 			   (u64)arg->mem_chunks[idx].paddr,
3329 			   arg->mem_chunks[idx].len);
3330 	}
3331 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3332 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3333 
3334 	/* num_mem_chunks is zero */
3335 	tlv = ptr;
3336 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3337 	ptr += TLV_HDR_SIZE + len;
3338 
3339 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3340 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3341 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3342 							     sizeof(*hw_mode));
3343 
3344 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3345 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3346 
3347 		ptr += sizeof(*hw_mode);
3348 
3349 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3350 		tlv = ptr;
3351 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3352 
3353 		ptr += TLV_HDR_SIZE;
3354 		len = sizeof(*band_to_mac);
3355 
3356 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3357 			band_to_mac = (void *)ptr;
3358 
3359 			band_to_mac->tlv_header =
3360 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3361 						       len);
3362 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3363 			band_to_mac->start_freq =
3364 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3365 			band_to_mac->end_freq =
3366 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
3367 			ptr += sizeof(*band_to_mac);
3368 		}
3369 	}
3370 
3371 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3372 	if (ret) {
3373 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3374 		dev_kfree_skb(skb);
3375 	}
3376 
3377 	return ret;
3378 }
3379 
3380 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3381 			    int pdev_id)
3382 {
3383 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3384 	struct sk_buff *skb;
3385 	int ret;
3386 
3387 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3388 	if (!skb)
3389 		return -ENOMEM;
3390 
3391 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3392 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3393 						 sizeof(*cmd));
3394 
3395 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3396 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3397 
3398 	cmd->pdev_id = cpu_to_le32(pdev_id);
3399 
3400 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3401 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
3402 
3403 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
3404 	if (ret) {
3405 		ath12k_warn(ar->ab,
3406 			    "failed to send lro cfg req wmi cmd\n");
3407 		goto err;
3408 	}
3409 
3410 	return 0;
3411 err:
3412 	dev_kfree_skb(skb);
3413 	return ret;
3414 }
3415 
3416 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
3417 {
3418 	unsigned long time_left;
3419 
3420 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
3421 						WMI_SERVICE_READY_TIMEOUT_HZ);
3422 	if (!time_left)
3423 		return -ETIMEDOUT;
3424 
3425 	return 0;
3426 }
3427 
3428 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
3429 {
3430 	unsigned long time_left;
3431 
3432 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
3433 						WMI_SERVICE_READY_TIMEOUT_HZ);
3434 	if (!time_left)
3435 		return -ETIMEDOUT;
3436 
3437 	return 0;
3438 }
3439 
3440 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
3441 			   enum wmi_host_hw_mode_config_type mode)
3442 {
3443 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
3444 	struct sk_buff *skb;
3445 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3446 	int len;
3447 	int ret;
3448 
3449 	len = sizeof(*cmd);
3450 
3451 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3452 	if (!skb)
3453 		return -ENOMEM;
3454 
3455 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
3456 
3457 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3458 						 sizeof(*cmd));
3459 
3460 	cmd->pdev_id = WMI_PDEV_ID_SOC;
3461 	cmd->hw_mode_index = cpu_to_le32(mode);
3462 
3463 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
3464 	if (ret) {
3465 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3466 		dev_kfree_skb(skb);
3467 	}
3468 
3469 	return ret;
3470 }
3471 
3472 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
3473 {
3474 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3475 	struct ath12k_wmi_init_cmd_arg arg = {};
3476 
3477 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
3478 		     ab->wmi_ab.svc_map))
3479 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
3480 
3481 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
3482 
3483 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
3484 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
3485 	arg.mem_chunks = wmi_ab->mem_chunks;
3486 
3487 	if (ab->hw_params->single_pdev_only)
3488 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
3489 
3490 	arg.num_band_to_mac = ab->num_radios;
3491 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
3492 
3493 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
3494 }
3495 
3496 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
3497 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
3498 {
3499 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
3500 	struct sk_buff *skb;
3501 	int ret;
3502 
3503 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3504 	if (!skb)
3505 		return -ENOMEM;
3506 
3507 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
3508 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
3509 						 sizeof(*cmd));
3510 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3511 	cmd->scan_count = cpu_to_le32(arg->scan_count);
3512 	cmd->scan_period = cpu_to_le32(arg->scan_period);
3513 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
3514 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
3515 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
3516 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
3517 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
3518 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
3519 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
3520 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
3521 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
3522 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
3523 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
3524 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
3525 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
3526 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
3527 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
3528 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
3529 
3530 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3531 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
3532 		   arg->vdev_id);
3533 
3534 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3535 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
3536 	if (ret) {
3537 		ath12k_warn(ar->ab,
3538 			    "failed to send spectral scan config wmi cmd\n");
3539 		goto err;
3540 	}
3541 
3542 	return 0;
3543 err:
3544 	dev_kfree_skb(skb);
3545 	return ret;
3546 }
3547 
3548 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
3549 				    u32 trigger, u32 enable)
3550 {
3551 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
3552 	struct sk_buff *skb;
3553 	int ret;
3554 
3555 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3556 	if (!skb)
3557 		return -ENOMEM;
3558 
3559 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
3560 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
3561 						 sizeof(*cmd));
3562 
3563 	cmd->vdev_id = cpu_to_le32(vdev_id);
3564 	cmd->trigger_cmd = cpu_to_le32(trigger);
3565 	cmd->enable_cmd = cpu_to_le32(enable);
3566 
3567 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3568 		   "WMI spectral enable cmd vdev id 0x%x\n",
3569 		   vdev_id);
3570 
3571 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3572 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
3573 	if (ret) {
3574 		ath12k_warn(ar->ab,
3575 			    "failed to send spectral enable wmi cmd\n");
3576 		goto err;
3577 	}
3578 
3579 	return 0;
3580 err:
3581 	dev_kfree_skb(skb);
3582 	return ret;
3583 }
3584 
3585 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
3586 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
3587 {
3588 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
3589 	struct sk_buff *skb;
3590 	int ret;
3591 
3592 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3593 	if (!skb)
3594 		return -ENOMEM;
3595 
3596 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
3597 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
3598 						 sizeof(*cmd));
3599 
3600 	cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
3601 	cmd->module_id = cpu_to_le32(arg->module_id);
3602 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
3603 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
3604 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
3605 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
3606 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
3607 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
3608 	cmd->num_elems = cpu_to_le32(arg->num_elems);
3609 	cmd->buf_size = cpu_to_le32(arg->buf_size);
3610 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
3611 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
3612 
3613 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3614 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
3615 		   arg->pdev_id);
3616 
3617 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3618 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
3619 	if (ret) {
3620 		ath12k_warn(ar->ab,
3621 			    "failed to send dma ring cfg req wmi cmd\n");
3622 		goto err;
3623 	}
3624 
3625 	return 0;
3626 err:
3627 	dev_kfree_skb(skb);
3628 	return ret;
3629 }
3630 
3631 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
3632 					  u16 tag, u16 len,
3633 					  const void *ptr, void *data)
3634 {
3635 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3636 
3637 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
3638 		return -EPROTO;
3639 
3640 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
3641 		return -ENOBUFS;
3642 
3643 	arg->num_buf_entry++;
3644 	return 0;
3645 }
3646 
3647 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
3648 					 u16 tag, u16 len,
3649 					 const void *ptr, void *data)
3650 {
3651 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3652 
3653 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
3654 		return -EPROTO;
3655 
3656 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
3657 		return -ENOBUFS;
3658 
3659 	arg->num_meta++;
3660 
3661 	return 0;
3662 }
3663 
3664 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
3665 				    u16 tag, u16 len,
3666 				    const void *ptr, void *data)
3667 {
3668 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3669 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
3670 	u32 pdev_id;
3671 	int ret;
3672 
3673 	switch (tag) {
3674 	case WMI_TAG_DMA_BUF_RELEASE:
3675 		fixed = ptr;
3676 		arg->fixed = *fixed;
3677 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
3678 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
3679 		break;
3680 	case WMI_TAG_ARRAY_STRUCT:
3681 		if (!arg->buf_entry_done) {
3682 			arg->num_buf_entry = 0;
3683 			arg->buf_entry = ptr;
3684 
3685 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3686 						  ath12k_wmi_dma_buf_entry_parse,
3687 						  arg);
3688 			if (ret) {
3689 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
3690 					    ret);
3691 				return ret;
3692 			}
3693 
3694 			arg->buf_entry_done = true;
3695 		} else if (!arg->meta_data_done) {
3696 			arg->num_meta = 0;
3697 			arg->meta_data = ptr;
3698 
3699 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3700 						  ath12k_wmi_dma_buf_meta_parse,
3701 						  arg);
3702 			if (ret) {
3703 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
3704 					    ret);
3705 				return ret;
3706 			}
3707 
3708 			arg->meta_data_done = true;
3709 		}
3710 		break;
3711 	default:
3712 		break;
3713 	}
3714 	return 0;
3715 }
3716 
3717 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
3718 						       struct sk_buff *skb)
3719 {
3720 	struct ath12k_wmi_dma_buf_release_arg arg = {};
3721 	struct ath12k_dbring_buf_release_event param;
3722 	int ret;
3723 
3724 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
3725 				  ath12k_wmi_dma_buf_parse,
3726 				  &arg);
3727 	if (ret) {
3728 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
3729 		return;
3730 	}
3731 
3732 	param.fixed = arg.fixed;
3733 	param.buf_entry = arg.buf_entry;
3734 	param.num_buf_entry = arg.num_buf_entry;
3735 	param.meta_data = arg.meta_data;
3736 	param.num_meta = arg.num_meta;
3737 
3738 	ret = ath12k_dbring_buffer_release_event(ab, &param);
3739 	if (ret) {
3740 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
3741 		return;
3742 	}
3743 }
3744 
3745 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
3746 					 u16 tag, u16 len,
3747 					 const void *ptr, void *data)
3748 {
3749 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3750 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
3751 	u32 phy_map = 0;
3752 
3753 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
3754 		return -EPROTO;
3755 
3756 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
3757 		return -ENOBUFS;
3758 
3759 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
3760 				   hw_mode_id);
3761 	svc_rdy_ext->n_hw_mode_caps++;
3762 
3763 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
3764 	svc_rdy_ext->tot_phy_id += fls(phy_map);
3765 
3766 	return 0;
3767 }
3768 
3769 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
3770 				   u16 len, const void *ptr, void *data)
3771 {
3772 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3773 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
3774 	enum wmi_host_hw_mode_config_type mode, pref;
3775 	u32 i;
3776 	int ret;
3777 
3778 	svc_rdy_ext->n_hw_mode_caps = 0;
3779 	svc_rdy_ext->hw_mode_caps = ptr;
3780 
3781 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
3782 				  ath12k_wmi_hw_mode_caps_parse,
3783 				  svc_rdy_ext);
3784 	if (ret) {
3785 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
3786 		return ret;
3787 	}
3788 
3789 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
3790 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
3791 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
3792 
3793 		if (mode >= WMI_HOST_HW_MODE_MAX)
3794 			continue;
3795 
3796 		pref = soc->wmi_ab.preferred_hw_mode;
3797 
3798 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
3799 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
3800 			soc->wmi_ab.preferred_hw_mode = mode;
3801 		}
3802 	}
3803 
3804 	ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
3805 		   soc->wmi_ab.preferred_hw_mode);
3806 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
3807 		return -EINVAL;
3808 
3809 	return 0;
3810 }
3811 
3812 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
3813 					 u16 tag, u16 len,
3814 					 const void *ptr, void *data)
3815 {
3816 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3817 
3818 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
3819 		return -EPROTO;
3820 
3821 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
3822 		return -ENOBUFS;
3823 
3824 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
3825 	if (!svc_rdy_ext->n_mac_phy_caps) {
3826 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
3827 						    GFP_ATOMIC);
3828 		if (!svc_rdy_ext->mac_phy_caps)
3829 			return -ENOMEM;
3830 	}
3831 
3832 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
3833 	svc_rdy_ext->n_mac_phy_caps++;
3834 	return 0;
3835 }
3836 
3837 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
3838 					     u16 tag, u16 len,
3839 					     const void *ptr, void *data)
3840 {
3841 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3842 
3843 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
3844 		return -EPROTO;
3845 
3846 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
3847 		return -ENOBUFS;
3848 
3849 	svc_rdy_ext->n_ext_hal_reg_caps++;
3850 	return 0;
3851 }
3852 
3853 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
3854 				       u16 len, const void *ptr, void *data)
3855 {
3856 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
3857 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3858 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
3859 	int ret;
3860 	u32 i;
3861 
3862 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
3863 	svc_rdy_ext->ext_hal_reg_caps = ptr;
3864 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
3865 				  ath12k_wmi_ext_hal_reg_caps_parse,
3866 				  svc_rdy_ext);
3867 	if (ret) {
3868 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
3869 		return ret;
3870 	}
3871 
3872 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
3873 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
3874 						      svc_rdy_ext->soc_hal_reg_caps,
3875 						      svc_rdy_ext->ext_hal_reg_caps, i,
3876 						      &reg_cap);
3877 		if (ret) {
3878 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
3879 			return ret;
3880 		}
3881 
3882 		if (reg_cap.phy_id >= MAX_RADIOS) {
3883 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
3884 			return -EINVAL;
3885 		}
3886 
3887 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
3888 	}
3889 	return 0;
3890 }
3891 
3892 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
3893 						 u16 len, const void *ptr,
3894 						 void *data)
3895 {
3896 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
3897 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3898 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
3899 	u32 phy_id_map;
3900 	int pdev_index = 0;
3901 	int ret;
3902 
3903 	svc_rdy_ext->soc_hal_reg_caps = ptr;
3904 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
3905 
3906 	soc->num_radios = 0;
3907 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
3908 	soc->fw_pdev_count = 0;
3909 
3910 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
3911 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
3912 							    svc_rdy_ext,
3913 							    hw_mode_id, soc->num_radios,
3914 							    &soc->pdevs[pdev_index]);
3915 		if (ret) {
3916 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
3917 				    soc->num_radios);
3918 			return ret;
3919 		}
3920 
3921 		soc->num_radios++;
3922 
3923 		/* For single_pdev_only targets,
3924 		 * save mac_phy capability in the same pdev
3925 		 */
3926 		if (soc->hw_params->single_pdev_only)
3927 			pdev_index = 0;
3928 		else
3929 			pdev_index = soc->num_radios;
3930 
3931 		/* TODO: mac_phy_cap prints */
3932 		phy_id_map >>= 1;
3933 	}
3934 
3935 	if (soc->hw_params->single_pdev_only) {
3936 		soc->num_radios = 1;
3937 		soc->pdevs[0].pdev_id = 0;
3938 	}
3939 
3940 	return 0;
3941 }
3942 
3943 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
3944 					  u16 tag, u16 len,
3945 					  const void *ptr, void *data)
3946 {
3947 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
3948 
3949 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
3950 		return -EPROTO;
3951 
3952 	parse->n_dma_ring_caps++;
3953 	return 0;
3954 }
3955 
3956 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
3957 					u32 num_cap)
3958 {
3959 	size_t sz;
3960 	void *ptr;
3961 
3962 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
3963 	ptr = kzalloc(sz, GFP_ATOMIC);
3964 	if (!ptr)
3965 		return -ENOMEM;
3966 
3967 	ab->db_caps = ptr;
3968 	ab->num_db_cap = num_cap;
3969 
3970 	return 0;
3971 }
3972 
3973 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
3974 {
3975 	kfree(ab->db_caps);
3976 	ab->db_caps = NULL;
3977 }
3978 
3979 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
3980 				    u16 len, const void *ptr, void *data)
3981 {
3982 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
3983 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
3984 	struct ath12k_dbring_cap *dir_buff_caps;
3985 	int ret;
3986 	u32 i;
3987 
3988 	dma_caps_parse->n_dma_ring_caps = 0;
3989 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
3990 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3991 				  ath12k_wmi_dma_ring_caps_parse,
3992 				  dma_caps_parse);
3993 	if (ret) {
3994 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
3995 		return ret;
3996 	}
3997 
3998 	if (!dma_caps_parse->n_dma_ring_caps)
3999 		return 0;
4000 
4001 	if (ab->num_db_cap) {
4002 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4003 		return 0;
4004 	}
4005 
4006 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4007 	if (ret)
4008 		return ret;
4009 
4010 	dir_buff_caps = ab->db_caps;
4011 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4012 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4013 			ath12k_warn(ab, "Invalid module id %d\n",
4014 				    le32_to_cpu(dma_caps[i].module_id));
4015 			ret = -EINVAL;
4016 			goto free_dir_buff;
4017 		}
4018 
4019 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4020 		dir_buff_caps[i].pdev_id =
4021 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4022 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4023 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4024 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4025 	}
4026 
4027 	return 0;
4028 
4029 free_dir_buff:
4030 	ath12k_wmi_free_dbring_caps(ab);
4031 	return ret;
4032 }
4033 
4034 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4035 					u16 tag, u16 len,
4036 					const void *ptr, void *data)
4037 {
4038 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4039 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4040 	int ret;
4041 
4042 	switch (tag) {
4043 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4044 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4045 						&svc_rdy_ext->arg);
4046 		if (ret) {
4047 			ath12k_warn(ab, "unable to extract ext params\n");
4048 			return ret;
4049 		}
4050 		break;
4051 
4052 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4053 		svc_rdy_ext->hw_caps = ptr;
4054 		svc_rdy_ext->arg.num_hw_modes =
4055 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4056 		break;
4057 
4058 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4059 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4060 							    svc_rdy_ext);
4061 		if (ret)
4062 			return ret;
4063 		break;
4064 
4065 	case WMI_TAG_ARRAY_STRUCT:
4066 		if (!svc_rdy_ext->hw_mode_done) {
4067 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4068 			if (ret)
4069 				return ret;
4070 
4071 			svc_rdy_ext->hw_mode_done = true;
4072 		} else if (!svc_rdy_ext->mac_phy_done) {
4073 			svc_rdy_ext->n_mac_phy_caps = 0;
4074 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4075 						  ath12k_wmi_mac_phy_caps_parse,
4076 						  svc_rdy_ext);
4077 			if (ret) {
4078 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4079 				return ret;
4080 			}
4081 
4082 			svc_rdy_ext->mac_phy_done = true;
4083 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4084 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4085 			if (ret)
4086 				return ret;
4087 
4088 			svc_rdy_ext->ext_hal_reg_done = true;
4089 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4090 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4091 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4092 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4093 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4094 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4095 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4096 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4097 						       &svc_rdy_ext->dma_caps_parse);
4098 			if (ret)
4099 				return ret;
4100 
4101 			svc_rdy_ext->dma_ring_cap_done = true;
4102 		}
4103 		break;
4104 
4105 	default:
4106 		break;
4107 	}
4108 	return 0;
4109 }
4110 
4111 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4112 					  struct sk_buff *skb)
4113 {
4114 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4115 	int ret;
4116 
4117 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4118 				  ath12k_wmi_svc_rdy_ext_parse,
4119 				  &svc_rdy_ext);
4120 	if (ret) {
4121 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4122 		goto err;
4123 	}
4124 
4125 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4126 		complete(&ab->wmi_ab.service_ready);
4127 
4128 	kfree(svc_rdy_ext.mac_phy_caps);
4129 	return 0;
4130 
4131 err:
4132 	ath12k_wmi_free_dbring_caps(ab);
4133 	return ret;
4134 }
4135 
4136 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4137 				      const void *ptr,
4138 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4139 {
4140 	const struct wmi_service_ready_ext2_event *ev = ptr;
4141 
4142 	if (!ev)
4143 		return -EINVAL;
4144 
4145 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4146 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4147 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4148 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4149 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4150 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4151 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4152 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4153 	return 0;
4154 }
4155 
4156 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4157 				      const __le32 cap_mac_info[],
4158 				      const __le32 cap_phy_info[],
4159 				      const __le32 supp_mcs[],
4160 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4161 				       __le32 cap_info_internal)
4162 {
4163 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4164 	u32 support_320mhz;
4165 	u8 i;
4166 
4167 	if (band == NL80211_BAND_6GHZ)
4168 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4169 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4170 
4171 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4172 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4173 
4174 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4175 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4176 
4177 	if (band == NL80211_BAND_6GHZ)
4178 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4179 
4180 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4181 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4182 	if (band != NL80211_BAND_2GHZ) {
4183 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4184 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4185 	}
4186 
4187 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4188 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4189 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4190 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4191 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4192 
4193 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4194 }
4195 
4196 static int
4197 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4198 				      const struct ath12k_wmi_caps_ext_params *caps,
4199 				      struct ath12k_pdev *pdev)
4200 {
4201 	struct ath12k_band_cap *cap_band;
4202 	u32 bands, support_320mhz;
4203 	int i;
4204 
4205 	if (ab->hw_params->single_pdev_only) {
4206 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4207 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4208 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4209 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4210 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4211 			return 0;
4212 		}
4213 
4214 		for (i = 0; i < ab->fw_pdev_count; i++) {
4215 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4216 
4217 			if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
4218 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4219 				bands = fw_pdev->supported_bands;
4220 				break;
4221 			}
4222 		}
4223 
4224 		if (i == ab->fw_pdev_count)
4225 			return -EINVAL;
4226 	} else {
4227 		bands = pdev->cap.supported_bands;
4228 	}
4229 
4230 	if (bands & WMI_HOST_WLAN_2G_CAP) {
4231 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4232 					  caps->eht_cap_mac_info_2ghz,
4233 					  caps->eht_cap_phy_info_2ghz,
4234 					  caps->eht_supp_mcs_ext_2ghz,
4235 					  &caps->eht_ppet_2ghz,
4236 					  caps->eht_cap_info_internal);
4237 	}
4238 
4239 	if (bands & WMI_HOST_WLAN_5G_CAP) {
4240 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4241 					  caps->eht_cap_mac_info_5ghz,
4242 					  caps->eht_cap_phy_info_5ghz,
4243 					  caps->eht_supp_mcs_ext_5ghz,
4244 					  &caps->eht_ppet_5ghz,
4245 					  caps->eht_cap_info_internal);
4246 
4247 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4248 					  caps->eht_cap_mac_info_5ghz,
4249 					  caps->eht_cap_phy_info_5ghz,
4250 					  caps->eht_supp_mcs_ext_5ghz,
4251 					  &caps->eht_ppet_5ghz,
4252 					  caps->eht_cap_info_internal);
4253 	}
4254 
4255 	return 0;
4256 }
4257 
4258 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4259 					   u16 len, const void *ptr,
4260 					   void *data)
4261 {
4262 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4263 	int i = 0, ret;
4264 
4265 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4266 		return -EPROTO;
4267 
4268 	if (ab->hw_params->single_pdev_only) {
4269 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4270 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4271 			return 0;
4272 	} else {
4273 		for (i = 0; i < ab->num_radios; i++) {
4274 			if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
4275 				break;
4276 		}
4277 
4278 		if (i == ab->num_radios)
4279 			return -EINVAL;
4280 	}
4281 
4282 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4283 	if (ret) {
4284 		ath12k_warn(ab,
4285 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4286 			    ret, ab->pdevs[i].pdev_id);
4287 		return ret;
4288 	}
4289 
4290 	return 0;
4291 }
4292 
4293 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4294 					 u16 tag, u16 len,
4295 					 const void *ptr, void *data)
4296 {
4297 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4298 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4299 	int ret;
4300 
4301 	switch (tag) {
4302 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4303 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4304 						 &parse->arg);
4305 		if (ret) {
4306 			ath12k_warn(ab,
4307 				    "failed to extract wmi service ready ext2 parameters: %d\n",
4308 				    ret);
4309 			return ret;
4310 		}
4311 		break;
4312 
4313 	case WMI_TAG_ARRAY_STRUCT:
4314 		if (!parse->dma_ring_cap_done) {
4315 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4316 						       &parse->dma_caps_parse);
4317 			if (ret)
4318 				return ret;
4319 
4320 			parse->dma_ring_cap_done = true;
4321 		} else if (!parse->spectral_bin_scaling_done) {
4322 			/* TODO: This is a place-holder as WMI tag for
4323 			 * spectral scaling is before
4324 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4325 			 */
4326 			parse->spectral_bin_scaling_done = true;
4327 		} else if (!parse->mac_phy_caps_ext_done) {
4328 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4329 						  ath12k_wmi_tlv_mac_phy_caps_ext,
4330 						  parse);
4331 			if (ret) {
4332 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4333 					    ret);
4334 				return ret;
4335 			}
4336 
4337 			parse->mac_phy_caps_ext_done = true;
4338 		}
4339 		break;
4340 	default:
4341 		break;
4342 	}
4343 
4344 	return 0;
4345 }
4346 
4347 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4348 					   struct sk_buff *skb)
4349 {
4350 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4351 	int ret;
4352 
4353 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4354 				  ath12k_wmi_svc_rdy_ext2_parse,
4355 				  &svc_rdy_ext2);
4356 	if (ret) {
4357 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4358 		goto err;
4359 	}
4360 
4361 	complete(&ab->wmi_ab.service_ready);
4362 
4363 	return 0;
4364 
4365 err:
4366 	ath12k_wmi_free_dbring_caps(ab);
4367 	return ret;
4368 }
4369 
4370 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4371 					   struct wmi_vdev_start_resp_event *vdev_rsp)
4372 {
4373 	const void **tb;
4374 	const struct wmi_vdev_start_resp_event *ev;
4375 	int ret;
4376 
4377 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4378 	if (IS_ERR(tb)) {
4379 		ret = PTR_ERR(tb);
4380 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4381 		return ret;
4382 	}
4383 
4384 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4385 	if (!ev) {
4386 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
4387 		kfree(tb);
4388 		return -EPROTO;
4389 	}
4390 
4391 	*vdev_rsp = *ev;
4392 
4393 	kfree(tb);
4394 	return 0;
4395 }
4396 
4397 static struct ath12k_reg_rule
4398 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
4399 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
4400 {
4401 	struct ath12k_reg_rule *reg_rule_ptr;
4402 	u32 count;
4403 
4404 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
4405 			       GFP_ATOMIC);
4406 
4407 	if (!reg_rule_ptr)
4408 		return NULL;
4409 
4410 	for (count = 0; count < num_reg_rules; count++) {
4411 		reg_rule_ptr[count].start_freq =
4412 			le32_get_bits(wmi_reg_rule[count].freq_info,
4413 				      REG_RULE_START_FREQ);
4414 		reg_rule_ptr[count].end_freq =
4415 			le32_get_bits(wmi_reg_rule[count].freq_info,
4416 				      REG_RULE_END_FREQ);
4417 		reg_rule_ptr[count].max_bw =
4418 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4419 				      REG_RULE_MAX_BW);
4420 		reg_rule_ptr[count].reg_power =
4421 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4422 				      REG_RULE_REG_PWR);
4423 		reg_rule_ptr[count].ant_gain =
4424 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4425 				      REG_RULE_ANT_GAIN);
4426 		reg_rule_ptr[count].flags =
4427 			le32_get_bits(wmi_reg_rule[count].flag_info,
4428 				      REG_RULE_FLAGS);
4429 		reg_rule_ptr[count].psd_flag =
4430 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4431 				      REG_RULE_PSD_INFO);
4432 		reg_rule_ptr[count].psd_eirp =
4433 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4434 				      REG_RULE_PSD_EIRP);
4435 	}
4436 
4437 	return reg_rule_ptr;
4438 }
4439 
4440 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
4441 						   struct sk_buff *skb,
4442 						   struct ath12k_reg_info *reg_info)
4443 {
4444 	const void **tb;
4445 	const struct wmi_reg_chan_list_cc_ext_event *ev;
4446 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
4447 	u32 num_2g_reg_rules, num_5g_reg_rules;
4448 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
4449 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
4450 	u32 total_reg_rules = 0;
4451 	int ret, i, j;
4452 
4453 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
4454 
4455 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4456 	if (IS_ERR(tb)) {
4457 		ret = PTR_ERR(tb);
4458 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4459 		return ret;
4460 	}
4461 
4462 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
4463 	if (!ev) {
4464 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
4465 		kfree(tb);
4466 		return -EPROTO;
4467 	}
4468 
4469 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
4470 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
4471 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
4472 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
4473 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
4474 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
4475 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
4476 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
4477 
4478 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4479 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4480 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
4481 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4482 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
4483 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4484 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
4485 	}
4486 
4487 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
4488 	total_reg_rules += num_2g_reg_rules;
4489 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
4490 	total_reg_rules += num_5g_reg_rules;
4491 
4492 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
4493 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
4494 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
4495 		kfree(tb);
4496 		return -EINVAL;
4497 	}
4498 
4499 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4500 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
4501 
4502 		if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
4503 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
4504 				    i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
4505 			kfree(tb);
4506 			return -EINVAL;
4507 		}
4508 
4509 		total_reg_rules += num_6g_reg_rules_ap[i];
4510 	}
4511 
4512 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4513 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4514 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4515 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4516 
4517 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4518 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4519 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4520 
4521 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4522 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4523 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4524 
4525 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
4526 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
4527 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6G_REG_RULES) {
4528 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
4529 				    i);
4530 			kfree(tb);
4531 			return -EINVAL;
4532 		}
4533 	}
4534 
4535 	if (!total_reg_rules) {
4536 		ath12k_warn(ab, "No reg rules available\n");
4537 		kfree(tb);
4538 		return -EINVAL;
4539 	}
4540 
4541 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
4542 
4543 	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
4544 	 * list for country US.
4545 	 * Having same 6G reg rule in 5G and 6G rules list causes
4546 	 * intersect check to be true, and same rules will be shown
4547 	 * multiple times in iw cmd. So added hack below to avoid
4548 	 * parsing 6G rule from 5G reg rule list, and this can be
4549 	 * removed later, after FW updates to remove 6G reg rule
4550 	 * from 5G rules list.
4551 	 */
4552 	if (memcmp(reg_info->alpha2, "US", 2) == 0) {
4553 		reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
4554 		num_5g_reg_rules = reg_info->num_5g_reg_rules;
4555 	}
4556 
4557 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
4558 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
4559 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
4560 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
4561 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
4562 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
4563 
4564 	switch (le32_to_cpu(ev->status_code)) {
4565 	case WMI_REG_SET_CC_STATUS_PASS:
4566 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
4567 		break;
4568 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
4569 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
4570 		break;
4571 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
4572 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
4573 		break;
4574 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
4575 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
4576 		break;
4577 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
4578 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
4579 		break;
4580 	case WMI_REG_SET_CC_STATUS_FAIL:
4581 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
4582 		break;
4583 	}
4584 
4585 	reg_info->is_ext_reg_event = true;
4586 
4587 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
4588 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
4589 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
4590 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
4591 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
4592 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
4593 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
4594 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
4595 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
4596 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
4597 
4598 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4599 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4600 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
4601 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4602 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
4603 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4604 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
4605 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4606 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
4607 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
4608 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
4609 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
4610 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
4611 	}
4612 
4613 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4614 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
4615 		   __func__, reg_info->alpha2, reg_info->dfs_region,
4616 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
4617 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
4618 		   reg_info->phybitmap);
4619 
4620 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4621 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
4622 		   num_2g_reg_rules, num_5g_reg_rules);
4623 
4624 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4625 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
4626 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
4627 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
4628 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
4629 
4630 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4631 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4632 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
4633 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
4634 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
4635 
4636 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4637 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4638 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
4639 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
4640 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
4641 
4642 	ext_wmi_reg_rule =
4643 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
4644 			+ sizeof(*ev)
4645 			+ sizeof(struct wmi_tlv));
4646 
4647 	if (num_2g_reg_rules) {
4648 		reg_info->reg_rules_2g_ptr =
4649 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
4650 						      ext_wmi_reg_rule);
4651 
4652 		if (!reg_info->reg_rules_2g_ptr) {
4653 			kfree(tb);
4654 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
4655 			return -ENOMEM;
4656 		}
4657 	}
4658 
4659 	if (num_5g_reg_rules) {
4660 		ext_wmi_reg_rule += num_2g_reg_rules;
4661 		reg_info->reg_rules_5g_ptr =
4662 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
4663 						      ext_wmi_reg_rule);
4664 
4665 		if (!reg_info->reg_rules_5g_ptr) {
4666 			kfree(tb);
4667 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
4668 			return -ENOMEM;
4669 		}
4670 	}
4671 
4672 	ext_wmi_reg_rule += num_5g_reg_rules;
4673 
4674 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4675 		reg_info->reg_rules_6g_ap_ptr[i] =
4676 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
4677 						      ext_wmi_reg_rule);
4678 
4679 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
4680 			kfree(tb);
4681 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
4682 			return -ENOMEM;
4683 		}
4684 
4685 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
4686 	}
4687 
4688 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
4689 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4690 			reg_info->reg_rules_6g_client_ptr[j][i] =
4691 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
4692 							      ext_wmi_reg_rule);
4693 
4694 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
4695 				kfree(tb);
4696 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
4697 				return -ENOMEM;
4698 			}
4699 
4700 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
4701 		}
4702 	}
4703 
4704 	reg_info->client_type = le32_to_cpu(ev->client_type);
4705 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
4706 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
4707 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
4708 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
4709 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
4710 		le32_to_cpu(ev->domain_code_6g_ap_sp);
4711 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
4712 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
4713 
4714 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4715 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
4716 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
4717 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
4718 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
4719 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
4720 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
4721 	}
4722 
4723 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
4724 
4725 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
4726 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
4727 
4728 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
4729 
4730 	kfree(tb);
4731 	return 0;
4732 }
4733 
4734 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
4735 					struct wmi_peer_delete_resp_event *peer_del_resp)
4736 {
4737 	const void **tb;
4738 	const struct wmi_peer_delete_resp_event *ev;
4739 	int ret;
4740 
4741 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4742 	if (IS_ERR(tb)) {
4743 		ret = PTR_ERR(tb);
4744 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4745 		return ret;
4746 	}
4747 
4748 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
4749 	if (!ev) {
4750 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
4751 		kfree(tb);
4752 		return -EPROTO;
4753 	}
4754 
4755 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
4756 
4757 	peer_del_resp->vdev_id = ev->vdev_id;
4758 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
4759 			ev->peer_macaddr.addr);
4760 
4761 	kfree(tb);
4762 	return 0;
4763 }
4764 
4765 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
4766 					struct sk_buff *skb,
4767 					u32 *vdev_id)
4768 {
4769 	const void **tb;
4770 	const struct wmi_vdev_delete_resp_event *ev;
4771 	int ret;
4772 
4773 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4774 	if (IS_ERR(tb)) {
4775 		ret = PTR_ERR(tb);
4776 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4777 		return ret;
4778 	}
4779 
4780 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
4781 	if (!ev) {
4782 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
4783 		kfree(tb);
4784 		return -EPROTO;
4785 	}
4786 
4787 	*vdev_id = le32_to_cpu(ev->vdev_id);
4788 
4789 	kfree(tb);
4790 	return 0;
4791 }
4792 
4793 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
4794 					struct sk_buff *skb,
4795 					u32 *vdev_id, u32 *tx_status)
4796 {
4797 	const void **tb;
4798 	const struct wmi_bcn_tx_status_event *ev;
4799 	int ret;
4800 
4801 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4802 	if (IS_ERR(tb)) {
4803 		ret = PTR_ERR(tb);
4804 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4805 		return ret;
4806 	}
4807 
4808 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
4809 	if (!ev) {
4810 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
4811 		kfree(tb);
4812 		return -EPROTO;
4813 	}
4814 
4815 	*vdev_id = le32_to_cpu(ev->vdev_id);
4816 	*tx_status = le32_to_cpu(ev->tx_status);
4817 
4818 	kfree(tb);
4819 	return 0;
4820 }
4821 
4822 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4823 					      u32 *vdev_id)
4824 {
4825 	const void **tb;
4826 	const struct wmi_vdev_stopped_event *ev;
4827 	int ret;
4828 
4829 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4830 	if (IS_ERR(tb)) {
4831 		ret = PTR_ERR(tb);
4832 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4833 		return ret;
4834 	}
4835 
4836 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
4837 	if (!ev) {
4838 		ath12k_warn(ab, "failed to fetch vdev stop ev");
4839 		kfree(tb);
4840 		return -EPROTO;
4841 	}
4842 
4843 	*vdev_id = le32_to_cpu(ev->vdev_id);
4844 
4845 	kfree(tb);
4846 	return 0;
4847 }
4848 
4849 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
4850 					u16 tag, u16 len,
4851 					const void *ptr, void *data)
4852 {
4853 	struct wmi_tlv_mgmt_rx_parse *parse = data;
4854 
4855 	switch (tag) {
4856 	case WMI_TAG_MGMT_RX_HDR:
4857 		parse->fixed = ptr;
4858 		break;
4859 	case WMI_TAG_ARRAY_BYTE:
4860 		if (!parse->frame_buf_done) {
4861 			parse->frame_buf = ptr;
4862 			parse->frame_buf_done = true;
4863 		}
4864 		break;
4865 	}
4866 	return 0;
4867 }
4868 
4869 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
4870 					  struct sk_buff *skb,
4871 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
4872 {
4873 	struct wmi_tlv_mgmt_rx_parse parse = { };
4874 	const struct ath12k_wmi_mgmt_rx_params *ev;
4875 	const u8 *frame;
4876 	int i, ret;
4877 
4878 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4879 				  ath12k_wmi_tlv_mgmt_rx_parse,
4880 				  &parse);
4881 	if (ret) {
4882 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
4883 		return ret;
4884 	}
4885 
4886 	ev = parse.fixed;
4887 	frame = parse.frame_buf;
4888 
4889 	if (!ev || !frame) {
4890 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
4891 		return -EPROTO;
4892 	}
4893 
4894 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
4895 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
4896 	hdr->channel = le32_to_cpu(ev->channel);
4897 	hdr->snr = le32_to_cpu(ev->snr);
4898 	hdr->rate = le32_to_cpu(ev->rate);
4899 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
4900 	hdr->buf_len = le32_to_cpu(ev->buf_len);
4901 	hdr->status = le32_to_cpu(ev->status);
4902 	hdr->flags = le32_to_cpu(ev->flags);
4903 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
4904 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
4905 
4906 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
4907 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
4908 
4909 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
4910 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
4911 		return -EPROTO;
4912 	}
4913 
4914 	/* shift the sk_buff to point to `frame` */
4915 	skb_trim(skb, 0);
4916 	skb_put(skb, frame - skb->data);
4917 	skb_pull(skb, frame - skb->data);
4918 	skb_put(skb, hdr->buf_len);
4919 
4920 	return 0;
4921 }
4922 
4923 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
4924 				    u32 status)
4925 {
4926 	struct sk_buff *msdu;
4927 	struct ieee80211_tx_info *info;
4928 	struct ath12k_skb_cb *skb_cb;
4929 	int num_mgmt;
4930 
4931 	spin_lock_bh(&ar->txmgmt_idr_lock);
4932 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
4933 
4934 	if (!msdu) {
4935 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
4936 			    desc_id);
4937 		spin_unlock_bh(&ar->txmgmt_idr_lock);
4938 		return -ENOENT;
4939 	}
4940 
4941 	idr_remove(&ar->txmgmt_idr, desc_id);
4942 	spin_unlock_bh(&ar->txmgmt_idr_lock);
4943 
4944 	skb_cb = ATH12K_SKB_CB(msdu);
4945 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
4946 
4947 	info = IEEE80211_SKB_CB(msdu);
4948 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
4949 		info->flags |= IEEE80211_TX_STAT_ACK;
4950 
4951 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
4952 
4953 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
4954 
4955 	/* WARN when we received this event without doing any mgmt tx */
4956 	if (num_mgmt < 0)
4957 		WARN_ON_ONCE(1);
4958 
4959 	if (!num_mgmt)
4960 		wake_up(&ar->txmgmt_empty_waitq);
4961 
4962 	return 0;
4963 }
4964 
4965 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
4966 					       struct sk_buff *skb,
4967 					       struct wmi_mgmt_tx_compl_event *param)
4968 {
4969 	const void **tb;
4970 	const struct wmi_mgmt_tx_compl_event *ev;
4971 	int ret;
4972 
4973 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4974 	if (IS_ERR(tb)) {
4975 		ret = PTR_ERR(tb);
4976 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4977 		return ret;
4978 	}
4979 
4980 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
4981 	if (!ev) {
4982 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
4983 		kfree(tb);
4984 		return -EPROTO;
4985 	}
4986 
4987 	param->pdev_id = ev->pdev_id;
4988 	param->desc_id = ev->desc_id;
4989 	param->status = ev->status;
4990 
4991 	kfree(tb);
4992 	return 0;
4993 }
4994 
4995 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
4996 {
4997 	lockdep_assert_held(&ar->data_lock);
4998 
4999 	switch (ar->scan.state) {
5000 	case ATH12K_SCAN_IDLE:
5001 	case ATH12K_SCAN_RUNNING:
5002 	case ATH12K_SCAN_ABORTING:
5003 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5004 			    ath12k_scan_state_str(ar->scan.state),
5005 			    ar->scan.state);
5006 		break;
5007 	case ATH12K_SCAN_STARTING:
5008 		ar->scan.state = ATH12K_SCAN_RUNNING;
5009 		complete(&ar->scan.started);
5010 		break;
5011 	}
5012 }
5013 
5014 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5015 {
5016 	lockdep_assert_held(&ar->data_lock);
5017 
5018 	switch (ar->scan.state) {
5019 	case ATH12K_SCAN_IDLE:
5020 	case ATH12K_SCAN_RUNNING:
5021 	case ATH12K_SCAN_ABORTING:
5022 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5023 			    ath12k_scan_state_str(ar->scan.state),
5024 			    ar->scan.state);
5025 		break;
5026 	case ATH12K_SCAN_STARTING:
5027 		complete(&ar->scan.started);
5028 		__ath12k_mac_scan_finish(ar);
5029 		break;
5030 	}
5031 }
5032 
5033 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5034 {
5035 	lockdep_assert_held(&ar->data_lock);
5036 
5037 	switch (ar->scan.state) {
5038 	case ATH12K_SCAN_IDLE:
5039 	case ATH12K_SCAN_STARTING:
5040 		/* One suspected reason scan can be completed while starting is
5041 		 * if firmware fails to deliver all scan events to the host,
5042 		 * e.g. when transport pipe is full. This has been observed
5043 		 * with spectral scan phyerr events starving wmi transport
5044 		 * pipe. In such case the "scan completed" event should be (and
5045 		 * is) ignored by the host as it may be just firmware's scan
5046 		 * state machine recovering.
5047 		 */
5048 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5049 			    ath12k_scan_state_str(ar->scan.state),
5050 			    ar->scan.state);
5051 		break;
5052 	case ATH12K_SCAN_RUNNING:
5053 	case ATH12K_SCAN_ABORTING:
5054 		__ath12k_mac_scan_finish(ar);
5055 		break;
5056 	}
5057 }
5058 
5059 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5060 {
5061 	lockdep_assert_held(&ar->data_lock);
5062 
5063 	switch (ar->scan.state) {
5064 	case ATH12K_SCAN_IDLE:
5065 	case ATH12K_SCAN_STARTING:
5066 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5067 			    ath12k_scan_state_str(ar->scan.state),
5068 			    ar->scan.state);
5069 		break;
5070 	case ATH12K_SCAN_RUNNING:
5071 	case ATH12K_SCAN_ABORTING:
5072 		ar->scan_channel = NULL;
5073 		break;
5074 	}
5075 }
5076 
5077 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5078 {
5079 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5080 
5081 	lockdep_assert_held(&ar->data_lock);
5082 
5083 	switch (ar->scan.state) {
5084 	case ATH12K_SCAN_IDLE:
5085 	case ATH12K_SCAN_STARTING:
5086 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5087 			    ath12k_scan_state_str(ar->scan.state),
5088 			    ar->scan.state);
5089 		break;
5090 	case ATH12K_SCAN_RUNNING:
5091 	case ATH12K_SCAN_ABORTING:
5092 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
5093 		break;
5094 	}
5095 }
5096 
5097 static const char *
5098 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5099 			       enum wmi_scan_completion_reason reason)
5100 {
5101 	switch (type) {
5102 	case WMI_SCAN_EVENT_STARTED:
5103 		return "started";
5104 	case WMI_SCAN_EVENT_COMPLETED:
5105 		switch (reason) {
5106 		case WMI_SCAN_REASON_COMPLETED:
5107 			return "completed";
5108 		case WMI_SCAN_REASON_CANCELLED:
5109 			return "completed [cancelled]";
5110 		case WMI_SCAN_REASON_PREEMPTED:
5111 			return "completed [preempted]";
5112 		case WMI_SCAN_REASON_TIMEDOUT:
5113 			return "completed [timedout]";
5114 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
5115 			return "completed [internal err]";
5116 		case WMI_SCAN_REASON_MAX:
5117 			break;
5118 		}
5119 		return "completed [unknown]";
5120 	case WMI_SCAN_EVENT_BSS_CHANNEL:
5121 		return "bss channel";
5122 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5123 		return "foreign channel";
5124 	case WMI_SCAN_EVENT_DEQUEUED:
5125 		return "dequeued";
5126 	case WMI_SCAN_EVENT_PREEMPTED:
5127 		return "preempted";
5128 	case WMI_SCAN_EVENT_START_FAILED:
5129 		return "start failed";
5130 	case WMI_SCAN_EVENT_RESTARTED:
5131 		return "restarted";
5132 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5133 		return "foreign channel exit";
5134 	default:
5135 		return "unknown";
5136 	}
5137 }
5138 
5139 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5140 			       struct wmi_scan_event *scan_evt_param)
5141 {
5142 	const void **tb;
5143 	const struct wmi_scan_event *ev;
5144 	int ret;
5145 
5146 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5147 	if (IS_ERR(tb)) {
5148 		ret = PTR_ERR(tb);
5149 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5150 		return ret;
5151 	}
5152 
5153 	ev = tb[WMI_TAG_SCAN_EVENT];
5154 	if (!ev) {
5155 		ath12k_warn(ab, "failed to fetch scan ev");
5156 		kfree(tb);
5157 		return -EPROTO;
5158 	}
5159 
5160 	scan_evt_param->event_type = ev->event_type;
5161 	scan_evt_param->reason = ev->reason;
5162 	scan_evt_param->channel_freq = ev->channel_freq;
5163 	scan_evt_param->scan_req_id = ev->scan_req_id;
5164 	scan_evt_param->scan_id = ev->scan_id;
5165 	scan_evt_param->vdev_id = ev->vdev_id;
5166 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5167 
5168 	kfree(tb);
5169 	return 0;
5170 }
5171 
5172 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5173 					   struct wmi_peer_sta_kickout_arg *arg)
5174 {
5175 	const void **tb;
5176 	const struct wmi_peer_sta_kickout_event *ev;
5177 	int ret;
5178 
5179 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5180 	if (IS_ERR(tb)) {
5181 		ret = PTR_ERR(tb);
5182 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5183 		return ret;
5184 	}
5185 
5186 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5187 	if (!ev) {
5188 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5189 		kfree(tb);
5190 		return -EPROTO;
5191 	}
5192 
5193 	arg->mac_addr = ev->peer_macaddr.addr;
5194 
5195 	kfree(tb);
5196 	return 0;
5197 }
5198 
5199 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5200 			       struct wmi_roam_event *roam_ev)
5201 {
5202 	const void **tb;
5203 	const struct wmi_roam_event *ev;
5204 	int ret;
5205 
5206 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5207 	if (IS_ERR(tb)) {
5208 		ret = PTR_ERR(tb);
5209 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5210 		return ret;
5211 	}
5212 
5213 	ev = tb[WMI_TAG_ROAM_EVENT];
5214 	if (!ev) {
5215 		ath12k_warn(ab, "failed to fetch roam ev");
5216 		kfree(tb);
5217 		return -EPROTO;
5218 	}
5219 
5220 	roam_ev->vdev_id = ev->vdev_id;
5221 	roam_ev->reason = ev->reason;
5222 	roam_ev->rssi = ev->rssi;
5223 
5224 	kfree(tb);
5225 	return 0;
5226 }
5227 
5228 static int freq_to_idx(struct ath12k *ar, int freq)
5229 {
5230 	struct ieee80211_supported_band *sband;
5231 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5232 	int band, ch, idx = 0;
5233 
5234 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5235 		if (!ar->mac.sbands[band].channels)
5236 			continue;
5237 
5238 		sband = hw->wiphy->bands[band];
5239 		if (!sband)
5240 			continue;
5241 
5242 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
5243 			if (sband->channels[ch].center_freq == freq)
5244 				goto exit;
5245 	}
5246 
5247 exit:
5248 	return idx;
5249 }
5250 
5251 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5252 				    struct wmi_chan_info_event *ch_info_ev)
5253 {
5254 	const void **tb;
5255 	const struct wmi_chan_info_event *ev;
5256 	int ret;
5257 
5258 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5259 	if (IS_ERR(tb)) {
5260 		ret = PTR_ERR(tb);
5261 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5262 		return ret;
5263 	}
5264 
5265 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5266 	if (!ev) {
5267 		ath12k_warn(ab, "failed to fetch chan info ev");
5268 		kfree(tb);
5269 		return -EPROTO;
5270 	}
5271 
5272 	ch_info_ev->err_code = ev->err_code;
5273 	ch_info_ev->freq = ev->freq;
5274 	ch_info_ev->cmd_flags = ev->cmd_flags;
5275 	ch_info_ev->noise_floor = ev->noise_floor;
5276 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
5277 	ch_info_ev->cycle_count = ev->cycle_count;
5278 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5279 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5280 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
5281 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5282 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5283 	ch_info_ev->vdev_id = ev->vdev_id;
5284 
5285 	kfree(tb);
5286 	return 0;
5287 }
5288 
5289 static int
5290 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5291 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5292 {
5293 	const void **tb;
5294 	const struct wmi_pdev_bss_chan_info_event *ev;
5295 	int ret;
5296 
5297 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5298 	if (IS_ERR(tb)) {
5299 		ret = PTR_ERR(tb);
5300 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5301 		return ret;
5302 	}
5303 
5304 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5305 	if (!ev) {
5306 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5307 		kfree(tb);
5308 		return -EPROTO;
5309 	}
5310 
5311 	bss_ch_info_ev->pdev_id = ev->pdev_id;
5312 	bss_ch_info_ev->freq = ev->freq;
5313 	bss_ch_info_ev->noise_floor = ev->noise_floor;
5314 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5315 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5316 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5317 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5318 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5319 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5320 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5321 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5322 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5323 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5324 
5325 	kfree(tb);
5326 	return 0;
5327 }
5328 
5329 static int
5330 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5331 				      struct wmi_vdev_install_key_complete_arg *arg)
5332 {
5333 	const void **tb;
5334 	const struct wmi_vdev_install_key_compl_event *ev;
5335 	int ret;
5336 
5337 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5338 	if (IS_ERR(tb)) {
5339 		ret = PTR_ERR(tb);
5340 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5341 		return ret;
5342 	}
5343 
5344 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5345 	if (!ev) {
5346 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5347 		kfree(tb);
5348 		return -EPROTO;
5349 	}
5350 
5351 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
5352 	arg->macaddr = ev->peer_macaddr.addr;
5353 	arg->key_idx = le32_to_cpu(ev->key_idx);
5354 	arg->key_flags = le32_to_cpu(ev->key_flags);
5355 	arg->status = le32_to_cpu(ev->status);
5356 
5357 	kfree(tb);
5358 	return 0;
5359 }
5360 
5361 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
5362 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5363 {
5364 	const void **tb;
5365 	const struct wmi_peer_assoc_conf_event *ev;
5366 	int ret;
5367 
5368 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5369 	if (IS_ERR(tb)) {
5370 		ret = PTR_ERR(tb);
5371 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5372 		return ret;
5373 	}
5374 
5375 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5376 	if (!ev) {
5377 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
5378 		kfree(tb);
5379 		return -EPROTO;
5380 	}
5381 
5382 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
5383 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5384 
5385 	kfree(tb);
5386 	return 0;
5387 }
5388 
5389 static int
5390 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5391 			 const struct wmi_pdev_temperature_event *ev)
5392 {
5393 	const void **tb;
5394 	int ret;
5395 
5396 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5397 	if (IS_ERR(tb)) {
5398 		ret = PTR_ERR(tb);
5399 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5400 		return ret;
5401 	}
5402 
5403 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
5404 	if (!ev) {
5405 		ath12k_warn(ab, "failed to fetch pdev temp ev");
5406 		kfree(tb);
5407 		return -EPROTO;
5408 	}
5409 
5410 	kfree(tb);
5411 	return 0;
5412 }
5413 
5414 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
5415 {
5416 	/* try to send pending beacons first. they take priority */
5417 	wake_up(&ab->wmi_ab.tx_credits_wq);
5418 }
5419 
5420 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
5421 				       struct sk_buff *skb)
5422 {
5423 	dev_kfree_skb(skb);
5424 }
5425 
5426 static bool ath12k_reg_is_world_alpha(char *alpha)
5427 {
5428 	if (alpha[0] == '0' && alpha[1] == '0')
5429 		return true;
5430 
5431 	if (alpha[0] == 'n' && alpha[1] == 'a')
5432 		return true;
5433 
5434 	return false;
5435 }
5436 
5437 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
5438 {
5439 	struct ath12k_reg_info *reg_info = NULL;
5440 	struct ieee80211_regdomain *regd = NULL;
5441 	bool intersect = false;
5442 	int ret = 0, pdev_idx, i, j;
5443 	struct ath12k *ar;
5444 
5445 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
5446 	if (!reg_info) {
5447 		ret = -ENOMEM;
5448 		goto fallback;
5449 	}
5450 
5451 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
5452 
5453 	if (ret) {
5454 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
5455 		goto fallback;
5456 	}
5457 
5458 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
5459 		/* In case of failure to set the requested ctry,
5460 		 * fw retains the current regd. We print a failure info
5461 		 * and return from here.
5462 		 */
5463 		ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
5464 		goto mem_free;
5465 	}
5466 
5467 	pdev_idx = reg_info->phy_id;
5468 
5469 	if (pdev_idx >= ab->num_radios) {
5470 		/* Process the event for phy0 only if single_pdev_only
5471 		 * is true. If pdev_idx is valid but not 0, discard the
5472 		 * event. Otherwise, it goes to fallback.
5473 		 */
5474 		if (ab->hw_params->single_pdev_only &&
5475 		    pdev_idx < ab->hw_params->num_rxmda_per_pdev)
5476 			goto mem_free;
5477 		else
5478 			goto fallback;
5479 	}
5480 
5481 	/* Avoid multiple overwrites to default regd, during core
5482 	 * stop-start after mac registration.
5483 	 */
5484 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
5485 	    !memcmp(ab->default_regd[pdev_idx]->alpha2,
5486 		    reg_info->alpha2, 2))
5487 		goto mem_free;
5488 
5489 	/* Intersect new rules with default regd if a new country setting was
5490 	 * requested, i.e a default regd was already set during initialization
5491 	 * and the regd coming from this event has a valid country info.
5492 	 */
5493 	if (ab->default_regd[pdev_idx] &&
5494 	    !ath12k_reg_is_world_alpha((char *)
5495 		ab->default_regd[pdev_idx]->alpha2) &&
5496 	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
5497 		intersect = true;
5498 
5499 	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
5500 	if (!regd) {
5501 		ath12k_warn(ab, "failed to build regd from reg_info\n");
5502 		goto fallback;
5503 	}
5504 
5505 	spin_lock(&ab->base_lock);
5506 	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
5507 		/* Once mac is registered, ar is valid and all CC events from
5508 		 * fw is considered to be received due to user requests
5509 		 * currently.
5510 		 * Free previously built regd before assigning the newly
5511 		 * generated regd to ar. NULL pointer handling will be
5512 		 * taken care by kfree itself.
5513 		 */
5514 		ar = ab->pdevs[pdev_idx].ar;
5515 		kfree(ab->new_regd[pdev_idx]);
5516 		ab->new_regd[pdev_idx] = regd;
5517 		queue_work(ab->workqueue, &ar->regd_update_work);
5518 	} else {
5519 		/* Multiple events for the same *ar is not expected. But we
5520 		 * can still clear any previously stored default_regd if we
5521 		 * are receiving this event for the same radio by mistake.
5522 		 * NULL pointer handling will be taken care by kfree itself.
5523 		 */
5524 		kfree(ab->default_regd[pdev_idx]);
5525 		/* This regd would be applied during mac registration */
5526 		ab->default_regd[pdev_idx] = regd;
5527 	}
5528 	ab->dfs_region = reg_info->dfs_region;
5529 	spin_unlock(&ab->base_lock);
5530 
5531 	goto mem_free;
5532 
5533 fallback:
5534 	/* Fallback to older reg (by sending previous country setting
5535 	 * again if fw has succeeded and we failed to process here.
5536 	 * The Regdomain should be uniform across driver and fw. Since the
5537 	 * FW has processed the command and sent a success status, we expect
5538 	 * this function to succeed as well. If it doesn't, CTRY needs to be
5539 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5540 	 */
5541 	/* TODO: This is rare, but still should also be handled */
5542 	WARN_ON(1);
5543 mem_free:
5544 	if (reg_info) {
5545 		kfree(reg_info->reg_rules_2g_ptr);
5546 		kfree(reg_info->reg_rules_5g_ptr);
5547 		if (reg_info->is_ext_reg_event) {
5548 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
5549 				kfree(reg_info->reg_rules_6g_ap_ptr[i]);
5550 
5551 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
5552 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
5553 					kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
5554 		}
5555 		kfree(reg_info);
5556 	}
5557 	return ret;
5558 }
5559 
5560 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
5561 				const void *ptr, void *data)
5562 {
5563 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
5564 	struct wmi_ready_event fixed_param;
5565 	struct ath12k_wmi_mac_addr_params *addr_list;
5566 	struct ath12k_pdev *pdev;
5567 	u32 num_mac_addr;
5568 	int i;
5569 
5570 	switch (tag) {
5571 	case WMI_TAG_READY_EVENT:
5572 		memset(&fixed_param, 0, sizeof(fixed_param));
5573 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
5574 		       min_t(u16, sizeof(fixed_param), len));
5575 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
5576 		rdy_parse->num_extra_mac_addr =
5577 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
5578 
5579 		ether_addr_copy(ab->mac_addr,
5580 				fixed_param.ready_event_min.mac_addr.addr);
5581 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
5582 		ab->wmi_ready = true;
5583 		break;
5584 	case WMI_TAG_ARRAY_FIXED_STRUCT:
5585 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
5586 		num_mac_addr = rdy_parse->num_extra_mac_addr;
5587 
5588 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
5589 			break;
5590 
5591 		for (i = 0; i < ab->num_radios; i++) {
5592 			pdev = &ab->pdevs[i];
5593 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
5594 		}
5595 		ab->pdevs_macaddr_valid = true;
5596 		break;
5597 	default:
5598 		break;
5599 	}
5600 
5601 	return 0;
5602 }
5603 
5604 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
5605 {
5606 	struct ath12k_wmi_rdy_parse rdy_parse = { };
5607 	int ret;
5608 
5609 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5610 				  ath12k_wmi_rdy_parse, &rdy_parse);
5611 	if (ret) {
5612 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
5613 		return ret;
5614 	}
5615 
5616 	complete(&ab->wmi_ab.unified_ready);
5617 	return 0;
5618 }
5619 
5620 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5621 {
5622 	struct wmi_peer_delete_resp_event peer_del_resp;
5623 	struct ath12k *ar;
5624 
5625 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
5626 		ath12k_warn(ab, "failed to extract peer delete resp");
5627 		return;
5628 	}
5629 
5630 	rcu_read_lock();
5631 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
5632 	if (!ar) {
5633 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
5634 			    peer_del_resp.vdev_id);
5635 		rcu_read_unlock();
5636 		return;
5637 	}
5638 
5639 	complete(&ar->peer_delete_done);
5640 	rcu_read_unlock();
5641 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
5642 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
5643 }
5644 
5645 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
5646 					  struct sk_buff *skb)
5647 {
5648 	struct ath12k *ar;
5649 	u32 vdev_id = 0;
5650 
5651 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
5652 		ath12k_warn(ab, "failed to extract vdev delete resp");
5653 		return;
5654 	}
5655 
5656 	rcu_read_lock();
5657 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5658 	if (!ar) {
5659 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
5660 			    vdev_id);
5661 		rcu_read_unlock();
5662 		return;
5663 	}
5664 
5665 	complete(&ar->vdev_delete_done);
5666 
5667 	rcu_read_unlock();
5668 
5669 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
5670 		   vdev_id);
5671 }
5672 
5673 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
5674 {
5675 	switch (vdev_resp_status) {
5676 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
5677 		return "invalid vdev id";
5678 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
5679 		return "not supported";
5680 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
5681 		return "dfs violation";
5682 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
5683 		return "invalid regdomain";
5684 	default:
5685 		return "unknown";
5686 	}
5687 }
5688 
5689 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5690 {
5691 	struct wmi_vdev_start_resp_event vdev_start_resp;
5692 	struct ath12k *ar;
5693 	u32 status;
5694 
5695 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
5696 		ath12k_warn(ab, "failed to extract vdev start resp");
5697 		return;
5698 	}
5699 
5700 	rcu_read_lock();
5701 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
5702 	if (!ar) {
5703 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
5704 			    vdev_start_resp.vdev_id);
5705 		rcu_read_unlock();
5706 		return;
5707 	}
5708 
5709 	ar->last_wmi_vdev_start_status = 0;
5710 
5711 	status = le32_to_cpu(vdev_start_resp.status);
5712 
5713 	if (WARN_ON_ONCE(status)) {
5714 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
5715 			    status, ath12k_wmi_vdev_resp_print(status));
5716 		ar->last_wmi_vdev_start_status = status;
5717 	}
5718 
5719 	complete(&ar->vdev_setup_done);
5720 
5721 	rcu_read_unlock();
5722 
5723 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
5724 		   vdev_start_resp.vdev_id);
5725 }
5726 
5727 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
5728 {
5729 	u32 vdev_id, tx_status;
5730 
5731 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
5732 		ath12k_warn(ab, "failed to extract bcn tx status");
5733 		return;
5734 	}
5735 }
5736 
5737 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
5738 {
5739 	struct ath12k *ar;
5740 	u32 vdev_id = 0;
5741 
5742 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
5743 		ath12k_warn(ab, "failed to extract vdev stopped event");
5744 		return;
5745 	}
5746 
5747 	rcu_read_lock();
5748 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5749 	if (!ar) {
5750 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
5751 			    vdev_id);
5752 		rcu_read_unlock();
5753 		return;
5754 	}
5755 
5756 	complete(&ar->vdev_setup_done);
5757 
5758 	rcu_read_unlock();
5759 
5760 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
5761 }
5762 
5763 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
5764 {
5765 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
5766 	struct ath12k *ar;
5767 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5768 	struct ieee80211_hdr *hdr;
5769 	u16 fc;
5770 	struct ieee80211_supported_band *sband;
5771 
5772 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
5773 		ath12k_warn(ab, "failed to extract mgmt rx event");
5774 		dev_kfree_skb(skb);
5775 		return;
5776 	}
5777 
5778 	memset(status, 0, sizeof(*status));
5779 
5780 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
5781 		   rx_ev.status);
5782 
5783 	rcu_read_lock();
5784 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
5785 
5786 	if (!ar) {
5787 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
5788 			    rx_ev.pdev_id);
5789 		dev_kfree_skb(skb);
5790 		goto exit;
5791 	}
5792 
5793 	if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
5794 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
5795 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
5796 			     WMI_RX_STATUS_ERR_CRC))) {
5797 		dev_kfree_skb(skb);
5798 		goto exit;
5799 	}
5800 
5801 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
5802 		status->flag |= RX_FLAG_MMIC_ERROR;
5803 
5804 	if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
5805 		status->band = NL80211_BAND_6GHZ;
5806 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
5807 		status->band = NL80211_BAND_2GHZ;
5808 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
5809 		status->band = NL80211_BAND_5GHZ;
5810 	} else {
5811 		/* Shouldn't happen unless list of advertised channels to
5812 		 * mac80211 has been changed.
5813 		 */
5814 		WARN_ON_ONCE(1);
5815 		dev_kfree_skb(skb);
5816 		goto exit;
5817 	}
5818 
5819 	if (rx_ev.phy_mode == MODE_11B &&
5820 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
5821 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5822 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
5823 
5824 	sband = &ar->mac.sbands[status->band];
5825 
5826 	status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
5827 						      status->band);
5828 	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
5829 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
5830 
5831 	hdr = (struct ieee80211_hdr *)skb->data;
5832 	fc = le16_to_cpu(hdr->frame_control);
5833 
5834 	/* Firmware is guaranteed to report all essential management frames via
5835 	 * WMI while it can deliver some extra via HTT. Since there can be
5836 	 * duplicates split the reporting wrt monitor/sniffing.
5837 	 */
5838 	status->flag |= RX_FLAG_SKIP_MONITOR;
5839 
5840 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
5841 	 * including group privacy action frames.
5842 	 */
5843 	if (ieee80211_has_protected(hdr->frame_control)) {
5844 		status->flag |= RX_FLAG_DECRYPTED;
5845 
5846 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
5847 			status->flag |= RX_FLAG_IV_STRIPPED |
5848 					RX_FLAG_MMIC_STRIPPED;
5849 			hdr->frame_control = __cpu_to_le16(fc &
5850 					     ~IEEE80211_FCTL_PROTECTED);
5851 		}
5852 	}
5853 
5854 	/* TODO: Pending handle beacon implementation
5855 	 *if (ieee80211_is_beacon(hdr->frame_control))
5856 	 *	ath12k_mac_handle_beacon(ar, skb);
5857 	 */
5858 
5859 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
5860 		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
5861 		   skb, skb->len,
5862 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
5863 
5864 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
5865 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
5866 		   status->freq, status->band, status->signal,
5867 		   status->rate_idx);
5868 
5869 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
5870 
5871 exit:
5872 	rcu_read_unlock();
5873 }
5874 
5875 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
5876 {
5877 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
5878 	struct ath12k *ar;
5879 
5880 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
5881 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
5882 		return;
5883 	}
5884 
5885 	rcu_read_lock();
5886 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
5887 	if (!ar) {
5888 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
5889 			    tx_compl_param.pdev_id);
5890 		goto exit;
5891 	}
5892 
5893 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
5894 				 le32_to_cpu(tx_compl_param.status));
5895 
5896 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
5897 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
5898 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
5899 		   tx_compl_param.status);
5900 
5901 exit:
5902 	rcu_read_unlock();
5903 }
5904 
5905 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
5906 						  u32 vdev_id,
5907 						  enum ath12k_scan_state state)
5908 {
5909 	int i;
5910 	struct ath12k_pdev *pdev;
5911 	struct ath12k *ar;
5912 
5913 	for (i = 0; i < ab->num_radios; i++) {
5914 		pdev = rcu_dereference(ab->pdevs_active[i]);
5915 		if (pdev && pdev->ar) {
5916 			ar = pdev->ar;
5917 
5918 			spin_lock_bh(&ar->data_lock);
5919 			if (ar->scan.state == state &&
5920 			    ar->scan.vdev_id == vdev_id) {
5921 				spin_unlock_bh(&ar->data_lock);
5922 				return ar;
5923 			}
5924 			spin_unlock_bh(&ar->data_lock);
5925 		}
5926 	}
5927 	return NULL;
5928 }
5929 
5930 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
5931 {
5932 	struct ath12k *ar;
5933 	struct wmi_scan_event scan_ev = {0};
5934 
5935 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
5936 		ath12k_warn(ab, "failed to extract scan event");
5937 		return;
5938 	}
5939 
5940 	rcu_read_lock();
5941 
5942 	/* In case the scan was cancelled, ex. during interface teardown,
5943 	 * the interface will not be found in active interfaces.
5944 	 * Rather, in such scenarios, iterate over the active pdev's to
5945 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
5946 	 * aborting scan's vdev id matches this event info.
5947 	 */
5948 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
5949 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
5950 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
5951 						 ATH12K_SCAN_ABORTING);
5952 		if (!ar)
5953 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
5954 							 ATH12K_SCAN_RUNNING);
5955 	} else {
5956 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
5957 	}
5958 
5959 	if (!ar) {
5960 		ath12k_warn(ab, "Received scan event for unknown vdev");
5961 		rcu_read_unlock();
5962 		return;
5963 	}
5964 
5965 	spin_lock_bh(&ar->data_lock);
5966 
5967 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5968 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
5969 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
5970 						  le32_to_cpu(scan_ev.reason)),
5971 		   le32_to_cpu(scan_ev.event_type),
5972 		   le32_to_cpu(scan_ev.reason),
5973 		   le32_to_cpu(scan_ev.channel_freq),
5974 		   le32_to_cpu(scan_ev.scan_req_id),
5975 		   le32_to_cpu(scan_ev.scan_id),
5976 		   le32_to_cpu(scan_ev.vdev_id),
5977 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
5978 
5979 	switch (le32_to_cpu(scan_ev.event_type)) {
5980 	case WMI_SCAN_EVENT_STARTED:
5981 		ath12k_wmi_event_scan_started(ar);
5982 		break;
5983 	case WMI_SCAN_EVENT_COMPLETED:
5984 		ath12k_wmi_event_scan_completed(ar);
5985 		break;
5986 	case WMI_SCAN_EVENT_BSS_CHANNEL:
5987 		ath12k_wmi_event_scan_bss_chan(ar);
5988 		break;
5989 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5990 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
5991 		break;
5992 	case WMI_SCAN_EVENT_START_FAILED:
5993 		ath12k_warn(ab, "received scan start failure event\n");
5994 		ath12k_wmi_event_scan_start_failed(ar);
5995 		break;
5996 	case WMI_SCAN_EVENT_DEQUEUED:
5997 		__ath12k_mac_scan_finish(ar);
5998 		break;
5999 	case WMI_SCAN_EVENT_PREEMPTED:
6000 	case WMI_SCAN_EVENT_RESTARTED:
6001 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6002 	default:
6003 		break;
6004 	}
6005 
6006 	spin_unlock_bh(&ar->data_lock);
6007 
6008 	rcu_read_unlock();
6009 }
6010 
6011 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6012 {
6013 	struct wmi_peer_sta_kickout_arg arg = {};
6014 	struct ieee80211_sta *sta;
6015 	struct ath12k_peer *peer;
6016 	struct ath12k *ar;
6017 
6018 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6019 		ath12k_warn(ab, "failed to extract peer sta kickout event");
6020 		return;
6021 	}
6022 
6023 	rcu_read_lock();
6024 
6025 	spin_lock_bh(&ab->base_lock);
6026 
6027 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6028 
6029 	if (!peer) {
6030 		ath12k_warn(ab, "peer not found %pM\n",
6031 			    arg.mac_addr);
6032 		goto exit;
6033 	}
6034 
6035 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6036 	if (!ar) {
6037 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6038 			    peer->vdev_id);
6039 		goto exit;
6040 	}
6041 
6042 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
6043 					   arg.mac_addr, NULL);
6044 	if (!sta) {
6045 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6046 			    arg.mac_addr);
6047 		goto exit;
6048 	}
6049 
6050 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6051 		   arg.mac_addr);
6052 
6053 	ieee80211_report_low_ack(sta, 10);
6054 
6055 exit:
6056 	spin_unlock_bh(&ab->base_lock);
6057 	rcu_read_unlock();
6058 }
6059 
6060 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6061 {
6062 	struct wmi_roam_event roam_ev = {};
6063 	struct ath12k *ar;
6064 
6065 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6066 		ath12k_warn(ab, "failed to extract roam event");
6067 		return;
6068 	}
6069 
6070 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6071 		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
6072 		   roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
6073 
6074 	rcu_read_lock();
6075 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
6076 	if (!ar) {
6077 		ath12k_warn(ab, "invalid vdev id in roam ev %d",
6078 			    roam_ev.vdev_id);
6079 		rcu_read_unlock();
6080 		return;
6081 	}
6082 
6083 	if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
6084 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6085 			    roam_ev.reason, roam_ev.vdev_id);
6086 
6087 	switch (le32_to_cpu(roam_ev.reason)) {
6088 	case WMI_ROAM_REASON_BEACON_MISS:
6089 		/* TODO: Pending beacon miss and connection_loss_work
6090 		 * implementation
6091 		 * ath12k_mac_handle_beacon_miss(ar, vdev_id);
6092 		 */
6093 		break;
6094 	case WMI_ROAM_REASON_BETTER_AP:
6095 	case WMI_ROAM_REASON_LOW_RSSI:
6096 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6097 	case WMI_ROAM_REASON_HO_FAILED:
6098 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6099 			    roam_ev.reason, roam_ev.vdev_id);
6100 		break;
6101 	}
6102 
6103 	rcu_read_unlock();
6104 }
6105 
6106 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6107 {
6108 	struct wmi_chan_info_event ch_info_ev = {0};
6109 	struct ath12k *ar;
6110 	struct survey_info *survey;
6111 	int idx;
6112 	/* HW channel counters frequency value in hertz */
6113 	u32 cc_freq_hz = ab->cc_freq_hz;
6114 
6115 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
6116 		ath12k_warn(ab, "failed to extract chan info event");
6117 		return;
6118 	}
6119 
6120 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6121 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6122 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6123 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6124 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6125 		   ch_info_ev.mac_clk_mhz);
6126 
6127 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6128 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6129 		return;
6130 	}
6131 
6132 	rcu_read_lock();
6133 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6134 	if (!ar) {
6135 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6136 			    ch_info_ev.vdev_id);
6137 		rcu_read_unlock();
6138 		return;
6139 	}
6140 	spin_lock_bh(&ar->data_lock);
6141 
6142 	switch (ar->scan.state) {
6143 	case ATH12K_SCAN_IDLE:
6144 	case ATH12K_SCAN_STARTING:
6145 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6146 		goto exit;
6147 	case ATH12K_SCAN_RUNNING:
6148 	case ATH12K_SCAN_ABORTING:
6149 		break;
6150 	}
6151 
6152 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6153 	if (idx >= ARRAY_SIZE(ar->survey)) {
6154 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6155 			    ch_info_ev.freq, idx);
6156 		goto exit;
6157 	}
6158 
6159 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
6160 	 * HW channel counters frequency value
6161 	 */
6162 	if (ch_info_ev.mac_clk_mhz)
6163 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6164 
6165 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6166 		survey = &ar->survey[idx];
6167 		memset(survey, 0, sizeof(*survey));
6168 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6169 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6170 				 SURVEY_INFO_TIME_BUSY;
6171 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6172 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6173 					    cc_freq_hz);
6174 	}
6175 exit:
6176 	spin_unlock_bh(&ar->data_lock);
6177 	rcu_read_unlock();
6178 }
6179 
6180 static void
6181 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6182 {
6183 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6184 	struct survey_info *survey;
6185 	struct ath12k *ar;
6186 	u32 cc_freq_hz = ab->cc_freq_hz;
6187 	u64 busy, total, tx, rx, rx_bss;
6188 	int idx;
6189 
6190 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6191 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
6192 		return;
6193 	}
6194 
6195 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6196 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6197 
6198 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6199 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6200 
6201 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6202 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6203 
6204 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6205 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6206 
6207 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6208 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6209 
6210 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6211 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6212 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6213 		   bss_ch_info_ev.noise_floor, busy, total,
6214 		   tx, rx, rx_bss);
6215 
6216 	rcu_read_lock();
6217 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6218 
6219 	if (!ar) {
6220 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6221 			    bss_ch_info_ev.pdev_id);
6222 		rcu_read_unlock();
6223 		return;
6224 	}
6225 
6226 	spin_lock_bh(&ar->data_lock);
6227 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6228 	if (idx >= ARRAY_SIZE(ar->survey)) {
6229 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6230 			    bss_ch_info_ev.freq, idx);
6231 		goto exit;
6232 	}
6233 
6234 	survey = &ar->survey[idx];
6235 
6236 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
6237 	survey->time      = div_u64(total, cc_freq_hz);
6238 	survey->time_busy = div_u64(busy, cc_freq_hz);
6239 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
6240 	survey->time_tx   = div_u64(tx, cc_freq_hz);
6241 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
6242 			     SURVEY_INFO_TIME |
6243 			     SURVEY_INFO_TIME_BUSY |
6244 			     SURVEY_INFO_TIME_RX |
6245 			     SURVEY_INFO_TIME_TX);
6246 exit:
6247 	spin_unlock_bh(&ar->data_lock);
6248 	complete(&ar->bss_survey_done);
6249 
6250 	rcu_read_unlock();
6251 }
6252 
6253 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6254 						struct sk_buff *skb)
6255 {
6256 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6257 	struct ath12k *ar;
6258 
6259 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6260 		ath12k_warn(ab, "failed to extract install key compl event");
6261 		return;
6262 	}
6263 
6264 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6265 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6266 		   install_key_compl.key_idx, install_key_compl.key_flags,
6267 		   install_key_compl.macaddr, install_key_compl.status);
6268 
6269 	rcu_read_lock();
6270 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6271 	if (!ar) {
6272 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6273 			    install_key_compl.vdev_id);
6274 		rcu_read_unlock();
6275 		return;
6276 	}
6277 
6278 	ar->install_key_status = 0;
6279 
6280 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6281 		ath12k_warn(ab, "install key failed for %pM status %d\n",
6282 			    install_key_compl.macaddr, install_key_compl.status);
6283 		ar->install_key_status = install_key_compl.status;
6284 	}
6285 
6286 	complete(&ar->install_key_done);
6287 	rcu_read_unlock();
6288 }
6289 
6290 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6291 					  u16 tag, u16 len,
6292 					  const void *ptr,
6293 					  void *data)
6294 {
6295 	const struct wmi_service_available_event *ev;
6296 	u32 *wmi_ext2_service_bitmap;
6297 	int i, j;
6298 	u16 expected_len;
6299 
6300 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6301 	if (len < expected_len) {
6302 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
6303 			    len, tag);
6304 		return -EINVAL;
6305 	}
6306 
6307 	switch (tag) {
6308 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
6309 		ev = (struct wmi_service_available_event *)ptr;
6310 		for (i = 0, j = WMI_MAX_SERVICE;
6311 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6312 		     i++) {
6313 			do {
6314 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6315 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6316 					set_bit(j, ab->wmi_ab.svc_map);
6317 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6318 		}
6319 
6320 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6321 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
6322 			   ev->wmi_service_segment_bitmap[0],
6323 			   ev->wmi_service_segment_bitmap[1],
6324 			   ev->wmi_service_segment_bitmap[2],
6325 			   ev->wmi_service_segment_bitmap[3]);
6326 		break;
6327 	case WMI_TAG_ARRAY_UINT32:
6328 		wmi_ext2_service_bitmap = (u32 *)ptr;
6329 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
6330 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
6331 		     i++) {
6332 			do {
6333 				if (wmi_ext2_service_bitmap[i] &
6334 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6335 					set_bit(j, ab->wmi_ab.svc_map);
6336 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6337 		}
6338 
6339 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6340 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
6341 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
6342 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
6343 		break;
6344 	}
6345 	return 0;
6346 }
6347 
6348 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
6349 {
6350 	int ret;
6351 
6352 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6353 				  ath12k_wmi_tlv_services_parser,
6354 				  NULL);
6355 	return ret;
6356 }
6357 
6358 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
6359 {
6360 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
6361 	struct ath12k *ar;
6362 
6363 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
6364 		ath12k_warn(ab, "failed to extract peer assoc conf event");
6365 		return;
6366 	}
6367 
6368 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6369 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
6370 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
6371 
6372 	rcu_read_lock();
6373 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
6374 
6375 	if (!ar) {
6376 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
6377 			    peer_assoc_conf.vdev_id);
6378 		rcu_read_unlock();
6379 		return;
6380 	}
6381 
6382 	complete(&ar->peer_assoc_done);
6383 	rcu_read_unlock();
6384 }
6385 
6386 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
6387 {
6388 }
6389 
6390 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6391  * is not part of BDF CTL(Conformance test limits) table entries.
6392  */
6393 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
6394 						 struct sk_buff *skb)
6395 {
6396 	const void **tb;
6397 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
6398 	int ret;
6399 
6400 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6401 	if (IS_ERR(tb)) {
6402 		ret = PTR_ERR(tb);
6403 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6404 		return;
6405 	}
6406 
6407 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
6408 	if (!ev) {
6409 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
6410 		kfree(tb);
6411 		return;
6412 	}
6413 
6414 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6415 		   "pdev ctl failsafe check ev status %d\n",
6416 		   ev->ctl_failsafe_status);
6417 
6418 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6419 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6420 	 */
6421 	if (ev->ctl_failsafe_status != 0)
6422 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
6423 			    ev->ctl_failsafe_status);
6424 
6425 	kfree(tb);
6426 }
6427 
6428 static void
6429 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
6430 					  const struct ath12k_wmi_pdev_csa_event *ev,
6431 					  const u32 *vdev_ids)
6432 {
6433 	int i;
6434 	struct ath12k_vif *arvif;
6435 
6436 	/* Finish CSA once the switch count becomes NULL */
6437 	if (ev->current_switch_count)
6438 		return;
6439 
6440 	rcu_read_lock();
6441 	for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
6442 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
6443 
6444 		if (!arvif) {
6445 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
6446 				    vdev_ids[i]);
6447 			continue;
6448 		}
6449 
6450 		if (arvif->is_up && arvif->vif->bss_conf.csa_active)
6451 			ieee80211_csa_finish(arvif->vif, 0);
6452 	}
6453 	rcu_read_unlock();
6454 }
6455 
6456 static void
6457 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
6458 					      struct sk_buff *skb)
6459 {
6460 	const void **tb;
6461 	const struct ath12k_wmi_pdev_csa_event *ev;
6462 	const u32 *vdev_ids;
6463 	int ret;
6464 
6465 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6466 	if (IS_ERR(tb)) {
6467 		ret = PTR_ERR(tb);
6468 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6469 		return;
6470 	}
6471 
6472 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
6473 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
6474 
6475 	if (!ev || !vdev_ids) {
6476 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
6477 		kfree(tb);
6478 		return;
6479 	}
6480 
6481 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6482 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
6483 		   ev->current_switch_count, ev->pdev_id,
6484 		   ev->num_vdevs);
6485 
6486 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
6487 
6488 	kfree(tb);
6489 }
6490 
6491 static void
6492 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
6493 {
6494 	const void **tb;
6495 	const struct ath12k_wmi_pdev_radar_event *ev;
6496 	struct ath12k *ar;
6497 	int ret;
6498 
6499 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6500 	if (IS_ERR(tb)) {
6501 		ret = PTR_ERR(tb);
6502 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6503 		return;
6504 	}
6505 
6506 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
6507 
6508 	if (!ev) {
6509 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
6510 		kfree(tb);
6511 		return;
6512 	}
6513 
6514 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6515 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6516 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
6517 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
6518 		   ev->freq_offset, ev->sidx);
6519 
6520 	rcu_read_lock();
6521 
6522 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
6523 
6524 	if (!ar) {
6525 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
6526 			    ev->pdev_id);
6527 		goto exit;
6528 	}
6529 
6530 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
6531 		   ev->pdev_id);
6532 
6533 	if (ar->dfs_block_radar_events)
6534 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
6535 	else
6536 		ieee80211_radar_detected(ath12k_ar_to_hw(ar));
6537 
6538 exit:
6539 	rcu_read_unlock();
6540 
6541 	kfree(tb);
6542 }
6543 
6544 static void
6545 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
6546 				  struct sk_buff *skb)
6547 {
6548 	struct ath12k *ar;
6549 	struct wmi_pdev_temperature_event ev = {0};
6550 
6551 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
6552 		ath12k_warn(ab, "failed to extract pdev temperature event");
6553 		return;
6554 	}
6555 
6556 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6557 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
6558 
6559 	rcu_read_lock();
6560 
6561 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
6562 	if (!ar) {
6563 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
6564 		goto exit;
6565 	}
6566 
6567 exit:
6568 	rcu_read_unlock();
6569 }
6570 
6571 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
6572 					struct sk_buff *skb)
6573 {
6574 	const void **tb;
6575 	const struct wmi_fils_discovery_event *ev;
6576 	int ret;
6577 
6578 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6579 	if (IS_ERR(tb)) {
6580 		ret = PTR_ERR(tb);
6581 		ath12k_warn(ab,
6582 			    "failed to parse FILS discovery event tlv %d\n",
6583 			    ret);
6584 		return;
6585 	}
6586 
6587 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
6588 	if (!ev) {
6589 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
6590 		kfree(tb);
6591 		return;
6592 	}
6593 
6594 	ath12k_warn(ab,
6595 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
6596 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
6597 
6598 	kfree(tb);
6599 }
6600 
6601 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
6602 					      struct sk_buff *skb)
6603 {
6604 	const void **tb;
6605 	const struct wmi_probe_resp_tx_status_event *ev;
6606 	int ret;
6607 
6608 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6609 	if (IS_ERR(tb)) {
6610 		ret = PTR_ERR(tb);
6611 		ath12k_warn(ab,
6612 			    "failed to parse probe response transmission status event tlv: %d\n",
6613 			    ret);
6614 		return;
6615 	}
6616 
6617 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
6618 	if (!ev) {
6619 		ath12k_warn(ab,
6620 			    "failed to fetch probe response transmission status event");
6621 		kfree(tb);
6622 		return;
6623 	}
6624 
6625 	if (ev->tx_status)
6626 		ath12k_warn(ab,
6627 			    "Probe response transmission failed for vdev_id %u, status %u\n",
6628 			    ev->vdev_id, ev->tx_status);
6629 
6630 	kfree(tb);
6631 }
6632 
6633 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
6634 					     struct sk_buff *skb)
6635 {
6636 	const struct wmi_rfkill_state_change_event *ev;
6637 	const void **tb;
6638 	int ret;
6639 
6640 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6641 	if (IS_ERR(tb)) {
6642 		ret = PTR_ERR(tb);
6643 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6644 		return;
6645 	}
6646 
6647 	ev = tb[WMI_TAG_RFKILL_EVENT];
6648 	if (!ev) {
6649 		kfree(tb);
6650 		return;
6651 	}
6652 
6653 	ath12k_dbg(ab, ATH12K_DBG_MAC,
6654 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
6655 		   le32_to_cpu(ev->gpio_pin_num),
6656 		   le32_to_cpu(ev->int_type),
6657 		   le32_to_cpu(ev->radio_state));
6658 
6659 	spin_lock_bh(&ab->base_lock);
6660 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
6661 	spin_unlock_bh(&ab->base_lock);
6662 
6663 	queue_work(ab->workqueue, &ab->rfkill_work);
6664 	kfree(tb);
6665 }
6666 
6667 static void
6668 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
6669 {
6670 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
6671 }
6672 
6673 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
6674 {
6675 	struct wmi_cmd_hdr *cmd_hdr;
6676 	enum wmi_tlv_event_id id;
6677 
6678 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6679 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
6680 
6681 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6682 		goto out;
6683 
6684 	switch (id) {
6685 		/* Process all the WMI events here */
6686 	case WMI_SERVICE_READY_EVENTID:
6687 		ath12k_service_ready_event(ab, skb);
6688 		break;
6689 	case WMI_SERVICE_READY_EXT_EVENTID:
6690 		ath12k_service_ready_ext_event(ab, skb);
6691 		break;
6692 	case WMI_SERVICE_READY_EXT2_EVENTID:
6693 		ath12k_service_ready_ext2_event(ab, skb);
6694 		break;
6695 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
6696 		ath12k_reg_chan_list_event(ab, skb);
6697 		break;
6698 	case WMI_READY_EVENTID:
6699 		ath12k_ready_event(ab, skb);
6700 		break;
6701 	case WMI_PEER_DELETE_RESP_EVENTID:
6702 		ath12k_peer_delete_resp_event(ab, skb);
6703 		break;
6704 	case WMI_VDEV_START_RESP_EVENTID:
6705 		ath12k_vdev_start_resp_event(ab, skb);
6706 		break;
6707 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
6708 		ath12k_bcn_tx_status_event(ab, skb);
6709 		break;
6710 	case WMI_VDEV_STOPPED_EVENTID:
6711 		ath12k_vdev_stopped_event(ab, skb);
6712 		break;
6713 	case WMI_MGMT_RX_EVENTID:
6714 		ath12k_mgmt_rx_event(ab, skb);
6715 		/* mgmt_rx_event() owns the skb now! */
6716 		return;
6717 	case WMI_MGMT_TX_COMPLETION_EVENTID:
6718 		ath12k_mgmt_tx_compl_event(ab, skb);
6719 		break;
6720 	case WMI_SCAN_EVENTID:
6721 		ath12k_scan_event(ab, skb);
6722 		break;
6723 	case WMI_PEER_STA_KICKOUT_EVENTID:
6724 		ath12k_peer_sta_kickout_event(ab, skb);
6725 		break;
6726 	case WMI_ROAM_EVENTID:
6727 		ath12k_roam_event(ab, skb);
6728 		break;
6729 	case WMI_CHAN_INFO_EVENTID:
6730 		ath12k_chan_info_event(ab, skb);
6731 		break;
6732 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
6733 		ath12k_pdev_bss_chan_info_event(ab, skb);
6734 		break;
6735 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
6736 		ath12k_vdev_install_key_compl_event(ab, skb);
6737 		break;
6738 	case WMI_SERVICE_AVAILABLE_EVENTID:
6739 		ath12k_service_available_event(ab, skb);
6740 		break;
6741 	case WMI_PEER_ASSOC_CONF_EVENTID:
6742 		ath12k_peer_assoc_conf_event(ab, skb);
6743 		break;
6744 	case WMI_UPDATE_STATS_EVENTID:
6745 		ath12k_update_stats_event(ab, skb);
6746 		break;
6747 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
6748 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
6749 		break;
6750 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
6751 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
6752 		break;
6753 	case WMI_PDEV_TEMPERATURE_EVENTID:
6754 		ath12k_wmi_pdev_temperature_event(ab, skb);
6755 		break;
6756 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
6757 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
6758 		break;
6759 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
6760 		ath12k_fils_discovery_event(ab, skb);
6761 		break;
6762 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
6763 		ath12k_probe_resp_tx_status_event(ab, skb);
6764 		break;
6765 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
6766 		ath12k_rfkill_state_change_event(ab, skb);
6767 		break;
6768 	/* add Unsupported events here */
6769 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
6770 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
6771 	case WMI_TWT_ENABLE_EVENTID:
6772 	case WMI_TWT_DISABLE_EVENTID:
6773 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
6774 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6775 			   "ignoring unsupported event 0x%x\n", id);
6776 		break;
6777 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
6778 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
6779 		break;
6780 	case WMI_VDEV_DELETE_RESP_EVENTID:
6781 		ath12k_vdev_delete_resp_event(ab, skb);
6782 		break;
6783 	case WMI_DIAG_EVENTID:
6784 		ath12k_wmi_diag_event(ab, skb);
6785 		break;
6786 	/* TODO: Add remaining events */
6787 	default:
6788 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
6789 		break;
6790 	}
6791 
6792 out:
6793 	dev_kfree_skb(skb);
6794 }
6795 
6796 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
6797 					   u32 pdev_idx)
6798 {
6799 	int status;
6800 	u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
6801 			 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
6802 			 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
6803 	struct ath12k_htc_svc_conn_req conn_req = {};
6804 	struct ath12k_htc_svc_conn_resp conn_resp = {};
6805 
6806 	/* these fields are the same for all service endpoints */
6807 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
6808 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
6809 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
6810 
6811 	/* connect to control service */
6812 	conn_req.service_id = svc_id[pdev_idx];
6813 
6814 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
6815 	if (status) {
6816 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
6817 			    status);
6818 		return status;
6819 	}
6820 
6821 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
6822 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
6823 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
6824 
6825 	return 0;
6826 }
6827 
6828 static int
6829 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
6830 			      struct wmi_unit_test_cmd ut_cmd,
6831 			      u32 *test_args)
6832 {
6833 	struct ath12k_wmi_pdev *wmi = ar->wmi;
6834 	struct wmi_unit_test_cmd *cmd;
6835 	struct sk_buff *skb;
6836 	struct wmi_tlv *tlv;
6837 	void *ptr;
6838 	u32 *ut_cmd_args;
6839 	int buf_len, arg_len;
6840 	int ret;
6841 	int i;
6842 
6843 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
6844 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
6845 
6846 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
6847 	if (!skb)
6848 		return -ENOMEM;
6849 
6850 	cmd = (struct wmi_unit_test_cmd *)skb->data;
6851 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
6852 						 sizeof(ut_cmd));
6853 
6854 	cmd->vdev_id = ut_cmd.vdev_id;
6855 	cmd->module_id = ut_cmd.module_id;
6856 	cmd->num_args = ut_cmd.num_args;
6857 	cmd->diag_token = ut_cmd.diag_token;
6858 
6859 	ptr = skb->data + sizeof(ut_cmd);
6860 
6861 	tlv = ptr;
6862 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
6863 
6864 	ptr += TLV_HDR_SIZE;
6865 
6866 	ut_cmd_args = ptr;
6867 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
6868 		ut_cmd_args[i] = test_args[i];
6869 
6870 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
6871 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
6872 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
6873 		   cmd->diag_token);
6874 
6875 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
6876 
6877 	if (ret) {
6878 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
6879 			    ret);
6880 		dev_kfree_skb(skb);
6881 	}
6882 
6883 	return ret;
6884 }
6885 
6886 int ath12k_wmi_simulate_radar(struct ath12k *ar)
6887 {
6888 	struct ath12k_vif *arvif;
6889 	u32 dfs_args[DFS_MAX_TEST_ARGS];
6890 	struct wmi_unit_test_cmd wmi_ut;
6891 	bool arvif_found = false;
6892 
6893 	list_for_each_entry(arvif, &ar->arvifs, list) {
6894 		if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
6895 			arvif_found = true;
6896 			break;
6897 		}
6898 	}
6899 
6900 	if (!arvif_found)
6901 		return -EINVAL;
6902 
6903 	dfs_args[DFS_TEST_CMDID] = 0;
6904 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
6905 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
6906 	 * freq offset (b3 - b10) to unit test. For simulation
6907 	 * purpose this can be set to 0 which is valid.
6908 	 */
6909 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
6910 
6911 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
6912 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
6913 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
6914 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
6915 
6916 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
6917 
6918 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
6919 }
6920 
6921 int ath12k_wmi_connect(struct ath12k_base *ab)
6922 {
6923 	u32 i;
6924 	u8 wmi_ep_count;
6925 
6926 	wmi_ep_count = ab->htc.wmi_ep_count;
6927 	if (wmi_ep_count > ab->hw_params->max_radios)
6928 		return -1;
6929 
6930 	for (i = 0; i < wmi_ep_count; i++)
6931 		ath12k_connect_pdev_htc_service(ab, i);
6932 
6933 	return 0;
6934 }
6935 
6936 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
6937 {
6938 	if (WARN_ON(pdev_id >= MAX_RADIOS))
6939 		return;
6940 
6941 	/* TODO: Deinit any pdev specific wmi resource */
6942 }
6943 
6944 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
6945 			   u8 pdev_id)
6946 {
6947 	struct ath12k_wmi_pdev *wmi_handle;
6948 
6949 	if (pdev_id >= ab->hw_params->max_radios)
6950 		return -EINVAL;
6951 
6952 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
6953 
6954 	wmi_handle->wmi_ab = &ab->wmi_ab;
6955 
6956 	ab->wmi_ab.ab = ab;
6957 	/* TODO: Init remaining resource specific to pdev */
6958 
6959 	return 0;
6960 }
6961 
6962 int ath12k_wmi_attach(struct ath12k_base *ab)
6963 {
6964 	int ret;
6965 
6966 	ret = ath12k_wmi_pdev_attach(ab, 0);
6967 	if (ret)
6968 		return ret;
6969 
6970 	ab->wmi_ab.ab = ab;
6971 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
6972 
6973 	/* It's overwritten when service_ext_ready is handled */
6974 	if (ab->hw_params->single_pdev_only)
6975 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
6976 
6977 	/* TODO: Init remaining wmi soc resources required */
6978 	init_completion(&ab->wmi_ab.service_ready);
6979 	init_completion(&ab->wmi_ab.unified_ready);
6980 
6981 	return 0;
6982 }
6983 
6984 void ath12k_wmi_detach(struct ath12k_base *ab)
6985 {
6986 	int i;
6987 
6988 	/* TODO: Deinit wmi resource specific to SOC as required */
6989 
6990 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
6991 		ath12k_wmi_pdev_detach(ab, i);
6992 
6993 	ath12k_wmi_free_dbring_caps(ab);
6994 }
6995