xref: /linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debugfs.h"
19 #include "debug.h"
20 #include "mac.h"
21 #include "hw.h"
22 #include "peer.h"
23 #include "p2p.h"
24 #include "testmode.h"
25 
26 struct ath12k_wmi_svc_ready_parse {
27 	bool wmi_svc_bitmap_done;
28 };
29 
30 struct wmi_tlv_fw_stats_parse {
31 	const struct wmi_stats_event *ev;
32 };
33 
34 struct ath12k_wmi_dma_ring_caps_parse {
35 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
36 	u32 n_dma_ring_caps;
37 };
38 
39 struct ath12k_wmi_service_ext_arg {
40 	u32 default_conc_scan_config_bits;
41 	u32 default_fw_config_bits;
42 	struct ath12k_wmi_ppe_threshold_arg ppet;
43 	u32 he_cap_info;
44 	u32 mpdu_density;
45 	u32 max_bssid_rx_filters;
46 	u32 num_hw_modes;
47 	u32 num_phy;
48 };
49 
50 struct ath12k_wmi_svc_rdy_ext_parse {
51 	struct ath12k_wmi_service_ext_arg arg;
52 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
53 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
54 	u32 n_hw_mode_caps;
55 	u32 tot_phy_id;
56 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
57 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
58 	u32 n_mac_phy_caps;
59 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
60 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
61 	u32 n_ext_hal_reg_caps;
62 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
63 	bool hw_mode_done;
64 	bool mac_phy_done;
65 	bool ext_hal_reg_done;
66 	bool mac_phy_chainmask_combo_done;
67 	bool mac_phy_chainmask_cap_done;
68 	bool oem_dma_ring_cap_done;
69 	bool dma_ring_cap_done;
70 };
71 
72 struct ath12k_wmi_svc_rdy_ext2_arg {
73 	u32 reg_db_version;
74 	u32 hw_min_max_tx_power_2ghz;
75 	u32 hw_min_max_tx_power_5ghz;
76 	u32 chwidth_num_peer_caps;
77 	u32 preamble_puncture_bw;
78 	u32 max_user_per_ppdu_ofdma;
79 	u32 max_user_per_ppdu_mumimo;
80 	u32 target_cap_flags;
81 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
82 	u32 max_num_linkview_peers;
83 	u32 max_num_msduq_supported_per_tid;
84 	u32 default_num_msduq_supported_per_tid;
85 };
86 
87 struct ath12k_wmi_svc_rdy_ext2_parse {
88 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
89 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
90 	bool dma_ring_cap_done;
91 	bool spectral_bin_scaling_done;
92 	bool mac_phy_caps_ext_done;
93 };
94 
95 struct ath12k_wmi_rdy_parse {
96 	u32 num_extra_mac_addr;
97 };
98 
99 struct ath12k_wmi_dma_buf_release_arg {
100 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
101 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
102 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
103 	u32 num_buf_entry;
104 	u32 num_meta;
105 	bool buf_entry_done;
106 	bool meta_data_done;
107 };
108 
109 struct ath12k_wmi_tlv_policy {
110 	size_t min_len;
111 };
112 
113 struct wmi_tlv_mgmt_rx_parse {
114 	const struct ath12k_wmi_mgmt_rx_params *fixed;
115 	const u8 *frame_buf;
116 	bool frame_buf_done;
117 };
118 
119 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
120 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
121 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
122 	[WMI_TAG_SERVICE_READY_EVENT] = {
123 		.min_len = sizeof(struct wmi_service_ready_event) },
124 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
125 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
126 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
127 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
128 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
129 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
130 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
131 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
132 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
133 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
134 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
135 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
136 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
137 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
138 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
139 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
140 	[WMI_TAG_MGMT_RX_HDR] = {
141 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
142 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
143 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
144 	[WMI_TAG_SCAN_EVENT] = {
145 		.min_len = sizeof(struct wmi_scan_event) },
146 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
147 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
148 	[WMI_TAG_ROAM_EVENT] = {
149 		.min_len = sizeof(struct wmi_roam_event) },
150 	[WMI_TAG_CHAN_INFO_EVENT] = {
151 		.min_len = sizeof(struct wmi_chan_info_event) },
152 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
153 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
154 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
155 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
156 	[WMI_TAG_READY_EVENT] = {
157 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
158 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
159 		.min_len = sizeof(struct wmi_service_available_event) },
160 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
161 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
162 	[WMI_TAG_RFKILL_EVENT] = {
163 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
164 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
165 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
166 	[WMI_TAG_HOST_SWFDA_EVENT] = {
167 		.min_len = sizeof(struct wmi_fils_discovery_event) },
168 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
169 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
170 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
171 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
172 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
173 		.min_len = sizeof(struct wmi_twt_enable_event) },
174 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
175 		.min_len = sizeof(struct wmi_twt_disable_event) },
176 	[WMI_TAG_P2P_NOA_INFO] = {
177 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
178 	[WMI_TAG_P2P_NOA_EVENT] = {
179 		.min_len = sizeof(struct wmi_p2p_noa_event) },
180 };
181 
ath12k_wmi_tlv_hdr(u32 cmd,u32 len)182 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
183 {
184 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
185 		le32_encode_bits(len, WMI_TLV_LEN);
186 }
187 
ath12k_wmi_tlv_cmd_hdr(u32 cmd,u32 len)188 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
189 {
190 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
191 }
192 
ath12k_wmi_init_qcn9274(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)193 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
194 			     struct ath12k_wmi_resource_config_arg *config)
195 {
196 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
197 	config->num_peers = ab->num_radios *
198 		ath12k_core_get_max_peers_per_radio(ab);
199 	config->num_tids = ath12k_core_get_max_num_tids(ab);
200 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
201 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
202 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
203 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
204 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
205 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
206 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
207 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
208 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
209 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
210 
211 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
212 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
213 	else
214 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
215 
216 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
217 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
218 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
219 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
220 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
221 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
222 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
223 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
224 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
225 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
226 	config->rx_skip_defrag_timeout_dup_detection_check =
227 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
228 	config->vow_config = TARGET_VOW_CONFIG;
229 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
230 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
231 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
232 	config->rx_batchmode = TARGET_RX_BATCHMODE;
233 	/* Indicates host supports peer map v3 and unmap v2 support */
234 	config->peer_map_unmap_version = 0x32;
235 	config->twt_ap_pdev_count = ab->num_radios;
236 	config->twt_ap_sta_count = 1000;
237 	config->ema_max_vap_cnt = ab->num_radios;
238 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
239 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
240 
241 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
242 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
243 }
244 
ath12k_wmi_init_wcn7850(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)245 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
246 			     struct ath12k_wmi_resource_config_arg *config)
247 {
248 	config->num_vdevs = 4;
249 	config->num_peers = 16;
250 	config->num_tids = 32;
251 
252 	config->num_offload_peers = 3;
253 	config->num_offload_reorder_buffs = 3;
254 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
255 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
256 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
257 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
258 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
259 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
260 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
261 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
262 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
263 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
264 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
265 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
266 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
267 	config->num_mcast_groups = 0;
268 	config->num_mcast_table_elems = 0;
269 	config->mcast2ucast_mode = 0;
270 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
271 	config->num_wds_entries = 0;
272 	config->dma_burst_size = 0;
273 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
274 	config->vow_config = TARGET_VOW_CONFIG;
275 	config->gtk_offload_max_vdev = 2;
276 	config->num_msdu_desc = 0x400;
277 	config->beacon_tx_offload_max_vdev = 2;
278 	config->rx_batchmode = TARGET_RX_BATCHMODE;
279 
280 	config->peer_map_unmap_version = 0x1;
281 	config->use_pdev_id = 1;
282 	config->max_frag_entries = 0xa;
283 	config->num_tdls_vdevs = 0x1;
284 	config->num_tdls_conn_table_entries = 8;
285 	config->beacon_tx_offload_max_vdev = 0x2;
286 	config->num_multicast_filter_entries = 0x20;
287 	config->num_wow_filters = 0x16;
288 	config->num_keep_alive_pattern = 0;
289 }
290 
291 #define PRIMAP(_hw_mode_) \
292 	[_hw_mode_] = _hw_mode_##_PRI
293 
294 static const int ath12k_hw_mode_pri_map[] = {
295 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
296 	PRIMAP(WMI_HOST_HW_MODE_DBS),
297 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
298 	PRIMAP(WMI_HOST_HW_MODE_SBS),
299 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
300 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
301 	/* keep last */
302 	PRIMAP(WMI_HOST_HW_MODE_MAX),
303 };
304 
305 static int
ath12k_wmi_tlv_iter(struct ath12k_base * ab,const void * ptr,size_t len,int (* iter)(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data),void * data)306 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
307 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
308 				const void *ptr, void *data),
309 		    void *data)
310 {
311 	const void *begin = ptr;
312 	const struct wmi_tlv *tlv;
313 	u16 tlv_tag, tlv_len;
314 	int ret;
315 
316 	while (len > 0) {
317 		if (len < sizeof(*tlv)) {
318 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
319 				   ptr - begin, len, sizeof(*tlv));
320 			return -EINVAL;
321 		}
322 
323 		tlv = ptr;
324 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
325 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
326 		ptr += sizeof(*tlv);
327 		len -= sizeof(*tlv);
328 
329 		if (tlv_len > len) {
330 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
331 				   tlv_tag, ptr - begin, len, tlv_len);
332 			return -EINVAL;
333 		}
334 
335 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
336 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
337 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
338 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
339 				   tlv_tag, ptr - begin, tlv_len,
340 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
341 			return -EINVAL;
342 		}
343 
344 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
345 		if (ret)
346 			return ret;
347 
348 		ptr += tlv_len;
349 		len -= tlv_len;
350 	}
351 
352 	return 0;
353 }
354 
ath12k_wmi_tlv_iter_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)355 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
356 				     const void *ptr, void *data)
357 {
358 	const void **tb = data;
359 
360 	if (tag < WMI_TAG_MAX)
361 		tb[tag] = ptr;
362 
363 	return 0;
364 }
365 
ath12k_wmi_tlv_parse(struct ath12k_base * ar,const void ** tb,const void * ptr,size_t len)366 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
367 				const void *ptr, size_t len)
368 {
369 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
370 				   (void *)tb);
371 }
372 
373 static const void **
ath12k_wmi_tlv_parse_alloc(struct ath12k_base * ab,struct sk_buff * skb,gfp_t gfp)374 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
375 			   struct sk_buff *skb, gfp_t gfp)
376 {
377 	const void **tb;
378 	int ret;
379 
380 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
381 	if (!tb)
382 		return ERR_PTR(-ENOMEM);
383 
384 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
385 	if (ret) {
386 		kfree(tb);
387 		return ERR_PTR(ret);
388 	}
389 
390 	return tb;
391 }
392 
ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)393 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
394 				      u32 cmd_id)
395 {
396 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
397 	struct ath12k_base *ab = wmi->wmi_ab->ab;
398 	struct wmi_cmd_hdr *cmd_hdr;
399 	int ret;
400 
401 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
402 		return -ENOMEM;
403 
404 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
405 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
406 
407 	memset(skb_cb, 0, sizeof(*skb_cb));
408 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
409 
410 	if (ret)
411 		goto err_pull;
412 
413 	return 0;
414 
415 err_pull:
416 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
417 	return ret;
418 }
419 
ath12k_wmi_cmd_send(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)420 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
421 			u32 cmd_id)
422 {
423 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
424 	int ret = -EOPNOTSUPP;
425 
426 	might_sleep();
427 
428 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
429 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
430 
431 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
432 			ret = -ESHUTDOWN;
433 
434 		(ret != -EAGAIN);
435 	}), WMI_SEND_TIMEOUT_HZ);
436 
437 	if (ret == -EAGAIN)
438 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
439 
440 	return ret;
441 }
442 
ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_service_ext_arg * arg)443 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
444 				     const void *ptr,
445 				     struct ath12k_wmi_service_ext_arg *arg)
446 {
447 	const struct wmi_service_ready_ext_event *ev = ptr;
448 	int i;
449 
450 	if (!ev)
451 		return -EINVAL;
452 
453 	/* Move this to host based bitmap */
454 	arg->default_conc_scan_config_bits =
455 		le32_to_cpu(ev->default_conc_scan_config_bits);
456 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
457 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
458 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
459 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
460 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
461 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
462 
463 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
464 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
465 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
466 
467 	return 0;
468 }
469 
470 static int
ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,struct ath12k_wmi_svc_rdy_ext_parse * svc,u8 hw_mode_id,u8 phy_id,struct ath12k_pdev * pdev)471 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
472 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
473 				      u8 hw_mode_id, u8 phy_id,
474 				      struct ath12k_pdev *pdev)
475 {
476 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
477 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
478 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
479 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
480 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
481 	struct ath12k_band_cap *cap_band;
482 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
483 	struct ath12k_fw_pdev *fw_pdev;
484 	u32 phy_map;
485 	u32 hw_idx, phy_idx = 0;
486 	int i;
487 
488 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
489 		return -EINVAL;
490 
491 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
492 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
493 			break;
494 
495 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
496 		phy_idx = fls(phy_map);
497 	}
498 
499 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
500 		return -EINVAL;
501 
502 	phy_idx += phy_id;
503 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
504 		return -EINVAL;
505 
506 	mac_caps = wmi_mac_phy_caps + phy_idx;
507 
508 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
509 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
510 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
511 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
512 
513 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
514 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
515 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
516 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
517 	ab->fw_pdev_count++;
518 
519 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
520 	 * band to band for a single radio, need to see how this should be
521 	 * handled.
522 	 */
523 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
524 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
525 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
526 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
527 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
528 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
529 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
530 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
531 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
532 	} else {
533 		return -EINVAL;
534 	}
535 
536 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
537 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
538 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
539 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
540 	 * will be advertised for second mac or vice-versa. Compute the shift value
541 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
542 	 * mac80211.
543 	 */
544 	pdev_cap->tx_chain_mask_shift =
545 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
546 	pdev_cap->rx_chain_mask_shift =
547 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
548 
549 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
550 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
551 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
552 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
553 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
554 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
555 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
556 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
557 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
558 			cap_band->he_cap_phy_info[i] =
559 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
560 
561 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
562 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
563 
564 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
565 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
566 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
567 	}
568 
569 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
570 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
571 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
572 		cap_band->max_bw_supported =
573 			le32_to_cpu(mac_caps->max_bw_supported_5g);
574 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
575 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
576 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
577 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
578 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
579 			cap_band->he_cap_phy_info[i] =
580 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
581 
582 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
583 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
584 
585 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
586 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
587 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
588 
589 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
590 		cap_band->max_bw_supported =
591 			le32_to_cpu(mac_caps->max_bw_supported_5g);
592 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
593 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
594 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
595 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
596 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
597 			cap_band->he_cap_phy_info[i] =
598 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
599 
600 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
601 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
602 
603 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
604 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
605 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
606 	}
607 
608 	return 0;
609 }
610 
611 static int
ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev * wmi_handle,const struct ath12k_wmi_soc_hal_reg_caps_params * reg_caps,const struct ath12k_wmi_hal_reg_caps_ext_params * ext_caps,u8 phy_idx,struct ath12k_wmi_hal_reg_capabilities_ext_arg * param)612 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
613 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
614 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
615 				u8 phy_idx,
616 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
617 {
618 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
619 
620 	if (!reg_caps || !ext_caps)
621 		return -EINVAL;
622 
623 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
624 		return -EINVAL;
625 
626 	ext_reg_cap = &ext_caps[phy_idx];
627 
628 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
629 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
630 	param->eeprom_reg_domain_ext =
631 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
632 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
633 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
634 	/* check if param->wireless_mode is needed */
635 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
636 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
637 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
638 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
639 
640 	return 0;
641 }
642 
ath12k_pull_service_ready_tlv(struct ath12k_base * ab,const void * evt_buf,struct ath12k_wmi_target_cap_arg * cap)643 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
644 					 const void *evt_buf,
645 					 struct ath12k_wmi_target_cap_arg *cap)
646 {
647 	const struct wmi_service_ready_event *ev = evt_buf;
648 
649 	if (!ev) {
650 		ath12k_err(ab, "%s: failed by NULL param\n",
651 			   __func__);
652 		return -EINVAL;
653 	}
654 
655 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
656 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
657 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
658 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
659 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
660 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
661 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
662 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
663 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
664 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
665 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
666 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
667 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
668 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
669 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
670 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
671 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
672 
673 	return 0;
674 }
675 
676 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
677  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
678  * 4-byte word.
679  */
ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev * wmi,const u32 * wmi_svc_bm)680 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
681 					   const u32 *wmi_svc_bm)
682 {
683 	int i, j;
684 
685 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
686 		do {
687 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
688 				set_bit(j, wmi->wmi_ab->svc_map);
689 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
690 	}
691 }
692 
ath12k_wmi_svc_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)693 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
694 				    const void *ptr, void *data)
695 {
696 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
697 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
698 	u16 expect_len;
699 
700 	switch (tag) {
701 	case WMI_TAG_SERVICE_READY_EVENT:
702 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
703 			return -EINVAL;
704 		break;
705 
706 	case WMI_TAG_ARRAY_UINT32:
707 		if (!svc_ready->wmi_svc_bitmap_done) {
708 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
709 			if (len < expect_len) {
710 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
711 					    len, tag);
712 				return -EINVAL;
713 			}
714 
715 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
716 
717 			svc_ready->wmi_svc_bitmap_done = true;
718 		}
719 		break;
720 	default:
721 		break;
722 	}
723 
724 	return 0;
725 }
726 
ath12k_service_ready_event(struct ath12k_base * ab,struct sk_buff * skb)727 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
728 {
729 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
730 	int ret;
731 
732 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
733 				  ath12k_wmi_svc_rdy_parse,
734 				  &svc_ready);
735 	if (ret) {
736 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
737 		return ret;
738 	}
739 
740 	return 0;
741 }
742 
ath12k_wmi_mgmt_get_freq(struct ath12k * ar,struct ieee80211_tx_info * info)743 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
744 				    struct ieee80211_tx_info *info)
745 {
746 	struct ath12k_base *ab = ar->ab;
747 	u32 freq = 0;
748 
749 	if (ab->hw_params->single_pdev_only &&
750 	    ar->scan.is_roc &&
751 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
752 		freq = ar->scan.roc_freq;
753 
754 	return freq;
755 }
756 
ath12k_wmi_alloc_skb(struct ath12k_wmi_base * wmi_ab,u32 len)757 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
758 {
759 	struct sk_buff *skb;
760 	struct ath12k_base *ab = wmi_ab->ab;
761 	u32 round_len = roundup(len, 4);
762 
763 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
764 	if (!skb)
765 		return NULL;
766 
767 	skb_reserve(skb, WMI_SKB_HEADROOM);
768 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
769 		ath12k_warn(ab, "unaligned WMI skb data\n");
770 
771 	skb_put(skb, round_len);
772 	memset(skb->data, 0, round_len);
773 
774 	return skb;
775 }
776 
ath12k_wmi_mgmt_send(struct ath12k * ar,u32 vdev_id,u32 buf_id,struct sk_buff * frame)777 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
778 			 struct sk_buff *frame)
779 {
780 	struct ath12k_wmi_pdev *wmi = ar->wmi;
781 	struct wmi_mgmt_send_cmd *cmd;
782 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
783 	struct wmi_tlv *frame_tlv;
784 	struct sk_buff *skb;
785 	u32 buf_len;
786 	int ret, len;
787 
788 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
789 
790 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
791 
792 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
793 	if (!skb)
794 		return -ENOMEM;
795 
796 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
797 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
798 						 sizeof(*cmd));
799 	cmd->vdev_id = cpu_to_le32(vdev_id);
800 	cmd->desc_id = cpu_to_le32(buf_id);
801 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
802 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
803 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
804 	cmd->frame_len = cpu_to_le32(frame->len);
805 	cmd->buf_len = cpu_to_le32(buf_len);
806 	cmd->tx_params_valid = 0;
807 
808 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
809 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
810 
811 	memcpy(frame_tlv->value, frame->data, buf_len);
812 
813 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
814 	if (ret) {
815 		ath12k_warn(ar->ab,
816 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
817 		dev_kfree_skb(skb);
818 	}
819 
820 	return ret;
821 }
822 
ath12k_wmi_send_stats_request_cmd(struct ath12k * ar,u32 stats_id,u32 vdev_id,u32 pdev_id)823 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
824 				      u32 vdev_id, u32 pdev_id)
825 {
826 	struct ath12k_wmi_pdev *wmi = ar->wmi;
827 	struct wmi_request_stats_cmd *cmd;
828 	struct sk_buff *skb;
829 	int ret;
830 
831 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
832 	if (!skb)
833 		return -ENOMEM;
834 
835 	cmd = (struct wmi_request_stats_cmd *)skb->data;
836 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
837 						 sizeof(*cmd));
838 
839 	cmd->stats_id = cpu_to_le32(stats_id);
840 	cmd->vdev_id = cpu_to_le32(vdev_id);
841 	cmd->pdev_id = cpu_to_le32(pdev_id);
842 
843 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
844 	if (ret) {
845 		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
846 		dev_kfree_skb(skb);
847 	}
848 
849 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
850 		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
851 		   stats_id, vdev_id, pdev_id);
852 
853 	return ret;
854 }
855 
ath12k_wmi_vdev_create(struct ath12k * ar,u8 * macaddr,struct ath12k_wmi_vdev_create_arg * args)856 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
857 			   struct ath12k_wmi_vdev_create_arg *args)
858 {
859 	struct ath12k_wmi_pdev *wmi = ar->wmi;
860 	struct wmi_vdev_create_cmd *cmd;
861 	struct sk_buff *skb;
862 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
863 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
864 	struct wmi_vdev_create_mlo_params *ml_params;
865 	struct wmi_tlv *tlv;
866 	int ret, len;
867 	void *ptr;
868 
869 	/* It can be optimized my sending tx/rx chain configuration
870 	 * only for supported bands instead of always sending it for
871 	 * both the bands.
872 	 */
873 	len = sizeof(*cmd) + TLV_HDR_SIZE +
874 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
875 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
876 
877 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
878 	if (!skb)
879 		return -ENOMEM;
880 
881 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
882 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
883 						 sizeof(*cmd));
884 
885 	cmd->vdev_id = cpu_to_le32(args->if_id);
886 	cmd->vdev_type = cpu_to_le32(args->type);
887 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
888 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
889 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
890 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
891 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
892 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
893 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
894 
895 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
896 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
897 
898 	ptr = skb->data + sizeof(*cmd);
899 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
900 
901 	tlv = ptr;
902 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
903 
904 	ptr += TLV_HDR_SIZE;
905 	txrx_streams = ptr;
906 	len = sizeof(*txrx_streams);
907 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
908 							  len);
909 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
910 	txrx_streams->supported_tx_streams =
911 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
912 	txrx_streams->supported_rx_streams =
913 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
914 
915 	txrx_streams++;
916 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
917 							  len);
918 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
919 	txrx_streams->supported_tx_streams =
920 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
921 	txrx_streams->supported_rx_streams =
922 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
923 
924 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
925 
926 	if (is_ml_vdev) {
927 		tlv = ptr;
928 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
929 						 sizeof(*ml_params));
930 		ptr += TLV_HDR_SIZE;
931 		ml_params = ptr;
932 
933 		ml_params->tlv_header =
934 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
935 					       sizeof(*ml_params));
936 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
937 	}
938 
939 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
940 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
941 		   args->if_id, args->type, args->subtype,
942 		   macaddr, args->pdev_id);
943 
944 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
945 	if (ret) {
946 		ath12k_warn(ar->ab,
947 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
948 		dev_kfree_skb(skb);
949 	}
950 
951 	return ret;
952 }
953 
ath12k_wmi_vdev_delete(struct ath12k * ar,u8 vdev_id)954 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
955 {
956 	struct ath12k_wmi_pdev *wmi = ar->wmi;
957 	struct wmi_vdev_delete_cmd *cmd;
958 	struct sk_buff *skb;
959 	int ret;
960 
961 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
962 	if (!skb)
963 		return -ENOMEM;
964 
965 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
966 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
967 						 sizeof(*cmd));
968 	cmd->vdev_id = cpu_to_le32(vdev_id);
969 
970 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
971 
972 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
973 	if (ret) {
974 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
975 		dev_kfree_skb(skb);
976 	}
977 
978 	return ret;
979 }
980 
ath12k_wmi_vdev_stop(struct ath12k * ar,u8 vdev_id)981 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
982 {
983 	struct ath12k_wmi_pdev *wmi = ar->wmi;
984 	struct wmi_vdev_stop_cmd *cmd;
985 	struct sk_buff *skb;
986 	int ret;
987 
988 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
989 	if (!skb)
990 		return -ENOMEM;
991 
992 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
993 
994 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
995 						 sizeof(*cmd));
996 	cmd->vdev_id = cpu_to_le32(vdev_id);
997 
998 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
999 
1000 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
1001 	if (ret) {
1002 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
1003 		dev_kfree_skb(skb);
1004 	}
1005 
1006 	return ret;
1007 }
1008 
ath12k_wmi_vdev_down(struct ath12k * ar,u8 vdev_id)1009 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
1010 {
1011 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1012 	struct wmi_vdev_down_cmd *cmd;
1013 	struct sk_buff *skb;
1014 	int ret;
1015 
1016 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1017 	if (!skb)
1018 		return -ENOMEM;
1019 
1020 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1021 
1022 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
1023 						 sizeof(*cmd));
1024 	cmd->vdev_id = cpu_to_le32(vdev_id);
1025 
1026 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
1027 
1028 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
1029 	if (ret) {
1030 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
1031 		dev_kfree_skb(skb);
1032 	}
1033 
1034 	return ret;
1035 }
1036 
ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params * chan,struct wmi_vdev_start_req_arg * arg)1037 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
1038 				       struct wmi_vdev_start_req_arg *arg)
1039 {
1040 	memset(chan, 0, sizeof(*chan));
1041 
1042 	chan->mhz = cpu_to_le32(arg->freq);
1043 	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
1044 	if (arg->mode == MODE_11AC_VHT80_80)
1045 		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
1046 	else
1047 		chan->band_center_freq2 = 0;
1048 
1049 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1050 	if (arg->passive)
1051 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1052 	if (arg->allow_ibss)
1053 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1054 	if (arg->allow_ht)
1055 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1056 	if (arg->allow_vht)
1057 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1058 	if (arg->allow_he)
1059 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1060 	if (arg->ht40plus)
1061 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1062 	if (arg->chan_radar)
1063 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1064 	if (arg->freq2_radar)
1065 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1066 
1067 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1068 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1069 		le32_encode_bits(arg->max_reg_power,
1070 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1071 
1072 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1073 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1074 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1075 }
1076 
ath12k_wmi_vdev_start(struct ath12k * ar,struct wmi_vdev_start_req_arg * arg,bool restart)1077 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1078 			  bool restart)
1079 {
1080 	struct wmi_vdev_start_mlo_params *ml_params;
1081 	struct wmi_partner_link_info *partner_info;
1082 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1083 	struct wmi_vdev_start_request_cmd *cmd;
1084 	struct sk_buff *skb;
1085 	struct ath12k_wmi_channel_params *chan;
1086 	struct wmi_tlv *tlv;
1087 	void *ptr;
1088 	int ret, len, i, ml_arg_size = 0;
1089 
1090 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1091 		return -EINVAL;
1092 
1093 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1094 
1095 	if (!restart && arg->ml.enabled) {
1096 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1097 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1098 					      sizeof(*partner_info));
1099 		len += ml_arg_size;
1100 	}
1101 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1102 	if (!skb)
1103 		return -ENOMEM;
1104 
1105 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1106 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1107 						 sizeof(*cmd));
1108 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1109 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1110 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1111 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1112 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1113 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1114 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1115 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1116 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1117 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1118 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1119 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1120 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1121 
1122 	if (!restart) {
1123 		if (arg->ssid) {
1124 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1125 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1126 		}
1127 		if (arg->hidden_ssid)
1128 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1129 		if (arg->pmf_enabled)
1130 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1131 	}
1132 
1133 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1134 
1135 	ptr = skb->data + sizeof(*cmd);
1136 	chan = ptr;
1137 
1138 	ath12k_wmi_put_wmi_channel(chan, arg);
1139 
1140 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1141 						  sizeof(*chan));
1142 	ptr += sizeof(*chan);
1143 
1144 	tlv = ptr;
1145 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1146 
1147 	/* Note: This is a nested TLV containing:
1148 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1149 	 */
1150 
1151 	ptr += sizeof(*tlv);
1152 
1153 	if (ml_arg_size) {
1154 		tlv = ptr;
1155 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1156 						 sizeof(*ml_params));
1157 		ptr += TLV_HDR_SIZE;
1158 
1159 		ml_params = ptr;
1160 
1161 		ml_params->tlv_header =
1162 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1163 					       sizeof(*ml_params));
1164 
1165 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1166 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1167 				   le32_encode_bits(arg->ml.assoc_link,
1168 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1169 				   le32_encode_bits(arg->ml.mcast_link,
1170 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1171 				   le32_encode_bits(arg->ml.link_add,
1172 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1173 
1174 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1175 			   arg->vdev_id, ml_params->flags);
1176 
1177 		ptr += sizeof(*ml_params);
1178 
1179 		tlv = ptr;
1180 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1181 						 arg->ml.num_partner_links *
1182 						 sizeof(*partner_info));
1183 		ptr += TLV_HDR_SIZE;
1184 
1185 		partner_info = ptr;
1186 
1187 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1188 			partner_info->tlv_header =
1189 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1190 						       sizeof(*partner_info));
1191 			partner_info->vdev_id =
1192 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1193 			partner_info->hw_link_id =
1194 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1195 			ether_addr_copy(partner_info->vdev_addr.addr,
1196 					arg->ml.partner_info[i].addr);
1197 
1198 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1199 				   partner_info->vdev_id, partner_info->hw_link_id,
1200 				   partner_info->vdev_addr.addr);
1201 
1202 			partner_info++;
1203 		}
1204 
1205 		ptr = partner_info;
1206 	}
1207 
1208 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1209 		   restart ? "restart" : "start", arg->vdev_id,
1210 		   arg->freq, arg->mode);
1211 
1212 	if (restart)
1213 		ret = ath12k_wmi_cmd_send(wmi, skb,
1214 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1215 	else
1216 		ret = ath12k_wmi_cmd_send(wmi, skb,
1217 					  WMI_VDEV_START_REQUEST_CMDID);
1218 	if (ret) {
1219 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1220 			    restart ? "restart" : "start");
1221 		dev_kfree_skb(skb);
1222 	}
1223 
1224 	return ret;
1225 }
1226 
ath12k_wmi_vdev_up(struct ath12k * ar,struct ath12k_wmi_vdev_up_params * params)1227 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1228 {
1229 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1230 	struct wmi_vdev_up_cmd *cmd;
1231 	struct sk_buff *skb;
1232 	int ret;
1233 
1234 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1235 	if (!skb)
1236 		return -ENOMEM;
1237 
1238 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1239 
1240 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1241 						 sizeof(*cmd));
1242 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1243 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1244 
1245 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1246 
1247 	if (params->tx_bssid) {
1248 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1249 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1250 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1251 	}
1252 
1253 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1254 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1255 		   params->vdev_id, params->aid, params->bssid);
1256 
1257 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1258 	if (ret) {
1259 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1260 		dev_kfree_skb(skb);
1261 	}
1262 
1263 	return ret;
1264 }
1265 
ath12k_wmi_send_peer_create_cmd(struct ath12k * ar,struct ath12k_wmi_peer_create_arg * arg)1266 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1267 				    struct ath12k_wmi_peer_create_arg *arg)
1268 {
1269 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1270 	struct wmi_peer_create_cmd *cmd;
1271 	struct sk_buff *skb;
1272 	int ret, len;
1273 	struct wmi_peer_create_mlo_params *ml_param;
1274 	void *ptr;
1275 	struct wmi_tlv *tlv;
1276 
1277 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1278 
1279 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1280 	if (!skb)
1281 		return -ENOMEM;
1282 
1283 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1284 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1285 						 sizeof(*cmd));
1286 
1287 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1288 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1289 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1290 
1291 	ptr = skb->data + sizeof(*cmd);
1292 	tlv = ptr;
1293 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1294 					 sizeof(*ml_param));
1295 	ptr += TLV_HDR_SIZE;
1296 	ml_param = ptr;
1297 	ml_param->tlv_header =
1298 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1299 					       sizeof(*ml_param));
1300 	if (arg->ml_enabled)
1301 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1302 
1303 	ptr += sizeof(*ml_param);
1304 
1305 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1306 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1307 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1308 
1309 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1310 	if (ret) {
1311 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1312 		dev_kfree_skb(skb);
1313 	}
1314 
1315 	return ret;
1316 }
1317 
ath12k_wmi_send_peer_delete_cmd(struct ath12k * ar,const u8 * peer_addr,u8 vdev_id)1318 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1319 				    const u8 *peer_addr, u8 vdev_id)
1320 {
1321 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1322 	struct wmi_peer_delete_cmd *cmd;
1323 	struct sk_buff *skb;
1324 	int ret;
1325 
1326 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1327 	if (!skb)
1328 		return -ENOMEM;
1329 
1330 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1331 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1332 						 sizeof(*cmd));
1333 
1334 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1335 	cmd->vdev_id = cpu_to_le32(vdev_id);
1336 
1337 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1338 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1339 		   vdev_id,  peer_addr);
1340 
1341 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1342 	if (ret) {
1343 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1344 		dev_kfree_skb(skb);
1345 	}
1346 
1347 	return ret;
1348 }
1349 
ath12k_wmi_send_pdev_set_regdomain(struct ath12k * ar,struct ath12k_wmi_pdev_set_regdomain_arg * arg)1350 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1351 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1352 {
1353 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1354 	struct wmi_pdev_set_regdomain_cmd *cmd;
1355 	struct sk_buff *skb;
1356 	int ret;
1357 
1358 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1359 	if (!skb)
1360 		return -ENOMEM;
1361 
1362 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1363 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1364 						 sizeof(*cmd));
1365 
1366 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1367 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1368 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1369 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1370 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1371 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1372 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1373 
1374 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1375 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1376 		   arg->current_rd_in_use, arg->current_rd_2g,
1377 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1378 
1379 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1380 	if (ret) {
1381 		ath12k_warn(ar->ab,
1382 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1383 		dev_kfree_skb(skb);
1384 	}
1385 
1386 	return ret;
1387 }
1388 
ath12k_wmi_set_peer_param(struct ath12k * ar,const u8 * peer_addr,u32 vdev_id,u32 param_id,u32 param_val)1389 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1390 			      u32 vdev_id, u32 param_id, u32 param_val)
1391 {
1392 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1393 	struct wmi_peer_set_param_cmd *cmd;
1394 	struct sk_buff *skb;
1395 	int ret;
1396 
1397 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1398 	if (!skb)
1399 		return -ENOMEM;
1400 
1401 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1402 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1403 						 sizeof(*cmd));
1404 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1405 	cmd->vdev_id = cpu_to_le32(vdev_id);
1406 	cmd->param_id = cpu_to_le32(param_id);
1407 	cmd->param_value = cpu_to_le32(param_val);
1408 
1409 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1410 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1411 		   vdev_id, peer_addr, param_id, param_val);
1412 
1413 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1414 	if (ret) {
1415 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1416 		dev_kfree_skb(skb);
1417 	}
1418 
1419 	return ret;
1420 }
1421 
ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k * ar,u8 peer_addr[ETH_ALEN],u32 peer_tid_bitmap,u8 vdev_id)1422 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1423 					u8 peer_addr[ETH_ALEN],
1424 					u32 peer_tid_bitmap,
1425 					u8 vdev_id)
1426 {
1427 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1428 	struct wmi_peer_flush_tids_cmd *cmd;
1429 	struct sk_buff *skb;
1430 	int ret;
1431 
1432 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1433 	if (!skb)
1434 		return -ENOMEM;
1435 
1436 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1437 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1438 						 sizeof(*cmd));
1439 
1440 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1441 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1442 	cmd->vdev_id = cpu_to_le32(vdev_id);
1443 
1444 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1445 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1446 		   vdev_id, peer_addr, peer_tid_bitmap);
1447 
1448 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1449 	if (ret) {
1450 		ath12k_warn(ar->ab,
1451 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1452 		dev_kfree_skb(skb);
1453 	}
1454 
1455 	return ret;
1456 }
1457 
ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k * ar,int vdev_id,const u8 * addr,dma_addr_t paddr,u8 tid,u8 ba_window_size_valid,u32 ba_window_size)1458 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1459 					   int vdev_id, const u8 *addr,
1460 					   dma_addr_t paddr, u8 tid,
1461 					   u8 ba_window_size_valid,
1462 					   u32 ba_window_size)
1463 {
1464 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1465 	struct sk_buff *skb;
1466 	int ret;
1467 
1468 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1469 	if (!skb)
1470 		return -ENOMEM;
1471 
1472 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1473 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1474 						 sizeof(*cmd));
1475 
1476 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1477 	cmd->vdev_id = cpu_to_le32(vdev_id);
1478 	cmd->tid = cpu_to_le32(tid);
1479 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1480 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1481 	cmd->queue_no = cpu_to_le32(tid);
1482 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1483 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1484 
1485 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1486 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1487 		   addr, vdev_id, tid);
1488 
1489 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1490 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1491 	if (ret) {
1492 		ath12k_warn(ar->ab,
1493 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1494 		dev_kfree_skb(skb);
1495 	}
1496 
1497 	return ret;
1498 }
1499 
1500 int
ath12k_wmi_rx_reord_queue_remove(struct ath12k * ar,struct ath12k_wmi_rx_reorder_queue_remove_arg * arg)1501 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1502 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1503 {
1504 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1505 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1506 	struct sk_buff *skb;
1507 	int ret;
1508 
1509 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1510 	if (!skb)
1511 		return -ENOMEM;
1512 
1513 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1514 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1515 						 sizeof(*cmd));
1516 
1517 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1518 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1519 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1520 
1521 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1522 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1523 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1524 
1525 	ret = ath12k_wmi_cmd_send(wmi, skb,
1526 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1527 	if (ret) {
1528 		ath12k_warn(ar->ab,
1529 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1530 		dev_kfree_skb(skb);
1531 	}
1532 
1533 	return ret;
1534 }
1535 
ath12k_wmi_pdev_set_param(struct ath12k * ar,u32 param_id,u32 param_value,u8 pdev_id)1536 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1537 			      u32 param_value, u8 pdev_id)
1538 {
1539 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1540 	struct wmi_pdev_set_param_cmd *cmd;
1541 	struct sk_buff *skb;
1542 	int ret;
1543 
1544 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1545 	if (!skb)
1546 		return -ENOMEM;
1547 
1548 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1549 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1550 						 sizeof(*cmd));
1551 	cmd->pdev_id = cpu_to_le32(pdev_id);
1552 	cmd->param_id = cpu_to_le32(param_id);
1553 	cmd->param_value = cpu_to_le32(param_value);
1554 
1555 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1556 		   "WMI pdev set param %d pdev id %d value %d\n",
1557 		   param_id, pdev_id, param_value);
1558 
1559 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1560 	if (ret) {
1561 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1562 		dev_kfree_skb(skb);
1563 	}
1564 
1565 	return ret;
1566 }
1567 
ath12k_wmi_pdev_set_ps_mode(struct ath12k * ar,int vdev_id,u32 enable)1568 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1569 {
1570 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1571 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1572 	struct sk_buff *skb;
1573 	int ret;
1574 
1575 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1576 	if (!skb)
1577 		return -ENOMEM;
1578 
1579 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1580 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1581 						 sizeof(*cmd));
1582 	cmd->vdev_id = cpu_to_le32(vdev_id);
1583 	cmd->sta_ps_mode = cpu_to_le32(enable);
1584 
1585 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1586 		   "WMI vdev set psmode %d vdev id %d\n",
1587 		   enable, vdev_id);
1588 
1589 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1590 	if (ret) {
1591 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1592 		dev_kfree_skb(skb);
1593 	}
1594 
1595 	return ret;
1596 }
1597 
ath12k_wmi_pdev_suspend(struct ath12k * ar,u32 suspend_opt,u32 pdev_id)1598 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1599 			    u32 pdev_id)
1600 {
1601 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1602 	struct wmi_pdev_suspend_cmd *cmd;
1603 	struct sk_buff *skb;
1604 	int ret;
1605 
1606 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1607 	if (!skb)
1608 		return -ENOMEM;
1609 
1610 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1611 
1612 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1613 						 sizeof(*cmd));
1614 
1615 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1616 	cmd->pdev_id = cpu_to_le32(pdev_id);
1617 
1618 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1619 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1620 
1621 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1622 	if (ret) {
1623 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1624 		dev_kfree_skb(skb);
1625 	}
1626 
1627 	return ret;
1628 }
1629 
ath12k_wmi_pdev_resume(struct ath12k * ar,u32 pdev_id)1630 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1631 {
1632 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1633 	struct wmi_pdev_resume_cmd *cmd;
1634 	struct sk_buff *skb;
1635 	int ret;
1636 
1637 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1638 	if (!skb)
1639 		return -ENOMEM;
1640 
1641 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1642 
1643 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1644 						 sizeof(*cmd));
1645 	cmd->pdev_id = cpu_to_le32(pdev_id);
1646 
1647 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1648 		   "WMI pdev resume pdev id %d\n", pdev_id);
1649 
1650 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1651 	if (ret) {
1652 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1653 		dev_kfree_skb(skb);
1654 	}
1655 
1656 	return ret;
1657 }
1658 
1659 /* TODO FW Support for the cmd is not available yet.
1660  * Can be tested once the command and corresponding
1661  * event is implemented in FW
1662  */
ath12k_wmi_pdev_bss_chan_info_request(struct ath12k * ar,enum wmi_bss_chan_info_req_type type)1663 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1664 					  enum wmi_bss_chan_info_req_type type)
1665 {
1666 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1667 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1668 	struct sk_buff *skb;
1669 	int ret;
1670 
1671 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1672 	if (!skb)
1673 		return -ENOMEM;
1674 
1675 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1676 
1677 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1678 						 sizeof(*cmd));
1679 	cmd->req_type = cpu_to_le32(type);
1680 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1681 
1682 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1683 		   "WMI bss chan info req type %d\n", type);
1684 
1685 	ret = ath12k_wmi_cmd_send(wmi, skb,
1686 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1687 	if (ret) {
1688 		ath12k_warn(ar->ab,
1689 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1690 		dev_kfree_skb(skb);
1691 	}
1692 
1693 	return ret;
1694 }
1695 
ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k * ar,u8 * peer_addr,struct ath12k_wmi_ap_ps_arg * arg)1696 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1697 					struct ath12k_wmi_ap_ps_arg *arg)
1698 {
1699 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1700 	struct wmi_ap_ps_peer_cmd *cmd;
1701 	struct sk_buff *skb;
1702 	int ret;
1703 
1704 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1705 	if (!skb)
1706 		return -ENOMEM;
1707 
1708 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1709 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1710 						 sizeof(*cmd));
1711 
1712 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1713 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1714 	cmd->param = cpu_to_le32(arg->param);
1715 	cmd->value = cpu_to_le32(arg->value);
1716 
1717 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1718 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1719 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1720 
1721 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1722 	if (ret) {
1723 		ath12k_warn(ar->ab,
1724 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1725 		dev_kfree_skb(skb);
1726 	}
1727 
1728 	return ret;
1729 }
1730 
ath12k_wmi_set_sta_ps_param(struct ath12k * ar,u32 vdev_id,u32 param,u32 param_value)1731 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1732 				u32 param, u32 param_value)
1733 {
1734 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1735 	struct wmi_sta_powersave_param_cmd *cmd;
1736 	struct sk_buff *skb;
1737 	int ret;
1738 
1739 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1740 	if (!skb)
1741 		return -ENOMEM;
1742 
1743 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1744 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1745 						 sizeof(*cmd));
1746 
1747 	cmd->vdev_id = cpu_to_le32(vdev_id);
1748 	cmd->param = cpu_to_le32(param);
1749 	cmd->value = cpu_to_le32(param_value);
1750 
1751 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1752 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1753 		   vdev_id, param, param_value);
1754 
1755 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1756 	if (ret) {
1757 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1758 		dev_kfree_skb(skb);
1759 	}
1760 
1761 	return ret;
1762 }
1763 
ath12k_wmi_force_fw_hang_cmd(struct ath12k * ar,u32 type,u32 delay_time_ms)1764 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1765 {
1766 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1767 	struct wmi_force_fw_hang_cmd *cmd;
1768 	struct sk_buff *skb;
1769 	int ret, len;
1770 
1771 	len = sizeof(*cmd);
1772 
1773 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1774 	if (!skb)
1775 		return -ENOMEM;
1776 
1777 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1778 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1779 						 len);
1780 
1781 	cmd->type = cpu_to_le32(type);
1782 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1783 
1784 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1785 
1786 	if (ret) {
1787 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1788 		dev_kfree_skb(skb);
1789 	}
1790 	return ret;
1791 }
1792 
ath12k_wmi_vdev_set_param_cmd(struct ath12k * ar,u32 vdev_id,u32 param_id,u32 param_value)1793 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1794 				  u32 param_id, u32 param_value)
1795 {
1796 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1797 	struct wmi_vdev_set_param_cmd *cmd;
1798 	struct sk_buff *skb;
1799 	int ret;
1800 
1801 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1802 	if (!skb)
1803 		return -ENOMEM;
1804 
1805 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1806 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1807 						 sizeof(*cmd));
1808 
1809 	cmd->vdev_id = cpu_to_le32(vdev_id);
1810 	cmd->param_id = cpu_to_le32(param_id);
1811 	cmd->param_value = cpu_to_le32(param_value);
1812 
1813 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1814 		   "WMI vdev id 0x%x set param %d value %d\n",
1815 		   vdev_id, param_id, param_value);
1816 
1817 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1818 	if (ret) {
1819 		ath12k_warn(ar->ab,
1820 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1821 		dev_kfree_skb(skb);
1822 	}
1823 
1824 	return ret;
1825 }
1826 
ath12k_wmi_send_pdev_temperature_cmd(struct ath12k * ar)1827 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1828 {
1829 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1830 	struct wmi_get_pdev_temperature_cmd *cmd;
1831 	struct sk_buff *skb;
1832 	int ret;
1833 
1834 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1835 	if (!skb)
1836 		return -ENOMEM;
1837 
1838 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1839 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1840 						 sizeof(*cmd));
1841 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1842 
1843 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1844 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1845 
1846 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1847 	if (ret) {
1848 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1849 		dev_kfree_skb(skb);
1850 	}
1851 
1852 	return ret;
1853 }
1854 
ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k * ar,u32 vdev_id,u32 bcn_ctrl_op)1855 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1856 					    u32 vdev_id, u32 bcn_ctrl_op)
1857 {
1858 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1859 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1860 	struct sk_buff *skb;
1861 	int ret;
1862 
1863 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1864 	if (!skb)
1865 		return -ENOMEM;
1866 
1867 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1868 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1869 						 sizeof(*cmd));
1870 
1871 	cmd->vdev_id = cpu_to_le32(vdev_id);
1872 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1873 
1874 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1875 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1876 		   vdev_id, bcn_ctrl_op);
1877 
1878 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1879 	if (ret) {
1880 		ath12k_warn(ar->ab,
1881 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1882 		dev_kfree_skb(skb);
1883 	}
1884 
1885 	return ret;
1886 }
1887 
ath12k_wmi_p2p_go_bcn_ie(struct ath12k * ar,u32 vdev_id,const u8 * p2p_ie)1888 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1889 			     const u8 *p2p_ie)
1890 {
1891 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1892 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1893 	size_t p2p_ie_len, aligned_len;
1894 	struct wmi_tlv *tlv;
1895 	struct sk_buff *skb;
1896 	void *ptr;
1897 	int ret, len;
1898 
1899 	p2p_ie_len = p2p_ie[1] + 2;
1900 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1901 
1902 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1903 
1904 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1905 	if (!skb)
1906 		return -ENOMEM;
1907 
1908 	ptr = skb->data;
1909 	cmd = ptr;
1910 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1911 						 sizeof(*cmd));
1912 	cmd->vdev_id = cpu_to_le32(vdev_id);
1913 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1914 
1915 	ptr += sizeof(*cmd);
1916 	tlv = ptr;
1917 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1918 					     aligned_len);
1919 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1920 
1921 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1922 	if (ret) {
1923 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1924 		dev_kfree_skb(skb);
1925 	}
1926 
1927 	return ret;
1928 }
1929 
ath12k_wmi_bcn_tmpl(struct ath12k_link_vif * arvif,struct ieee80211_mutable_offsets * offs,struct sk_buff * bcn,struct ath12k_wmi_bcn_tmpl_ema_arg * ema_args)1930 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
1931 			struct ieee80211_mutable_offsets *offs,
1932 			struct sk_buff *bcn,
1933 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1934 {
1935 	struct ath12k *ar = arvif->ar;
1936 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1937 	struct ath12k_base *ab = ar->ab;
1938 	struct wmi_bcn_tmpl_cmd *cmd;
1939 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1940 	struct ath12k_vif *ahvif = arvif->ahvif;
1941 	struct ieee80211_bss_conf *conf;
1942 	u32 vdev_id = arvif->vdev_id;
1943 	struct wmi_tlv *tlv;
1944 	struct sk_buff *skb;
1945 	u32 ema_params = 0;
1946 	void *ptr;
1947 	int ret, len;
1948 	size_t aligned_len = roundup(bcn->len, 4);
1949 
1950 	conf = ath12k_mac_get_link_bss_conf(arvif);
1951 	if (!conf) {
1952 		ath12k_warn(ab,
1953 			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
1954 			    ahvif->vif->addr, arvif->link_id);
1955 		return -EINVAL;
1956 	}
1957 
1958 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1959 
1960 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1961 	if (!skb)
1962 		return -ENOMEM;
1963 
1964 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1965 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1966 						 sizeof(*cmd));
1967 	cmd->vdev_id = cpu_to_le32(vdev_id);
1968 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1969 
1970 	if (conf->csa_active) {
1971 		cmd->csa_switch_count_offset =
1972 				cpu_to_le32(offs->cntdwn_counter_offs[0]);
1973 		cmd->ext_csa_switch_count_offset =
1974 				cpu_to_le32(offs->cntdwn_counter_offs[1]);
1975 		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
1976 		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
1977 	}
1978 
1979 	cmd->buf_len = cpu_to_le32(bcn->len);
1980 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
1981 	if (ema_args) {
1982 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
1983 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
1984 		if (ema_args->bcn_index == 0)
1985 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
1986 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
1987 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
1988 		cmd->ema_params = cpu_to_le32(ema_params);
1989 	}
1990 
1991 	ptr = skb->data + sizeof(*cmd);
1992 
1993 	bcn_prb_info = ptr;
1994 	len = sizeof(*bcn_prb_info);
1995 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
1996 							  len);
1997 	bcn_prb_info->caps = 0;
1998 	bcn_prb_info->erp = 0;
1999 
2000 	ptr += sizeof(*bcn_prb_info);
2001 
2002 	tlv = ptr;
2003 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2004 	memcpy(tlv->value, bcn->data, bcn->len);
2005 
2006 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
2007 	if (ret) {
2008 		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
2009 		dev_kfree_skb(skb);
2010 	}
2011 
2012 	return ret;
2013 }
2014 
ath12k_wmi_vdev_install_key(struct ath12k * ar,struct wmi_vdev_install_key_arg * arg)2015 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
2016 				struct wmi_vdev_install_key_arg *arg)
2017 {
2018 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2019 	struct wmi_vdev_install_key_cmd *cmd;
2020 	struct wmi_tlv *tlv;
2021 	struct sk_buff *skb;
2022 	int ret, len, key_len_aligned;
2023 
2024 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
2025 	 * length is specified in cmd->key_len.
2026 	 */
2027 	key_len_aligned = roundup(arg->key_len, 4);
2028 
2029 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
2030 
2031 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2032 	if (!skb)
2033 		return -ENOMEM;
2034 
2035 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
2036 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
2037 						 sizeof(*cmd));
2038 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2039 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2040 	cmd->key_idx = cpu_to_le32(arg->key_idx);
2041 	cmd->key_flags = cpu_to_le32(arg->key_flags);
2042 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
2043 	cmd->key_len = cpu_to_le32(arg->key_len);
2044 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
2045 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
2046 
2047 	if (arg->key_rsc_counter)
2048 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
2049 
2050 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2051 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
2052 	memcpy(tlv->value, arg->key_data, arg->key_len);
2053 
2054 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2055 		   "WMI vdev install key idx %d cipher %d len %d\n",
2056 		   arg->key_idx, arg->key_cipher, arg->key_len);
2057 
2058 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
2059 	if (ret) {
2060 		ath12k_warn(ar->ab,
2061 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2062 		dev_kfree_skb(skb);
2063 	}
2064 
2065 	return ret;
2066 }
2067 
ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd * cmd,struct ath12k_wmi_peer_assoc_arg * arg,bool hw_crypto_disabled)2068 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2069 				       struct ath12k_wmi_peer_assoc_arg *arg,
2070 				       bool hw_crypto_disabled)
2071 {
2072 	cmd->peer_flags = 0;
2073 	cmd->peer_flags_ext = 0;
2074 
2075 	if (arg->is_wme_set) {
2076 		if (arg->qos_flag)
2077 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2078 		if (arg->apsd_flag)
2079 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2080 		if (arg->ht_flag)
2081 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2082 		if (arg->bw_40)
2083 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2084 		if (arg->bw_80)
2085 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2086 		if (arg->bw_160)
2087 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2088 		if (arg->bw_320)
2089 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2090 
2091 		/* Typically if STBC is enabled for VHT it should be enabled
2092 		 * for HT as well
2093 		 **/
2094 		if (arg->stbc_flag)
2095 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2096 
2097 		/* Typically if LDPC is enabled for VHT it should be enabled
2098 		 * for HT as well
2099 		 **/
2100 		if (arg->ldpc_flag)
2101 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2102 
2103 		if (arg->static_mimops_flag)
2104 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2105 		if (arg->dynamic_mimops_flag)
2106 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2107 		if (arg->spatial_mux_flag)
2108 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2109 		if (arg->vht_flag)
2110 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2111 		if (arg->he_flag)
2112 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2113 		if (arg->twt_requester)
2114 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2115 		if (arg->twt_responder)
2116 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2117 		if (arg->eht_flag)
2118 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2119 	}
2120 
2121 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2122 	 * (during re-association).
2123 	 * Authorization will be done for these modes on key installation.
2124 	 */
2125 	if (arg->auth_flag)
2126 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2127 	if (arg->need_ptk_4_way) {
2128 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2129 		if (!hw_crypto_disabled)
2130 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2131 	}
2132 	if (arg->need_gtk_2_way)
2133 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2134 	/* safe mode bypass the 4-way handshake */
2135 	if (arg->safe_mode_enabled)
2136 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2137 						 WMI_PEER_NEED_GTK_2_WAY));
2138 
2139 	if (arg->is_pmf_enabled)
2140 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2141 
2142 	/* Disable AMSDU for station transmit, if user configures it */
2143 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2144 	 * it
2145 	 * if (arg->amsdu_disable) Add after FW support
2146 	 **/
2147 
2148 	/* Target asserts if node is marked HT and all MCS is set to 0.
2149 	 * Mark the node as non-HT if all the mcs rates are disabled through
2150 	 * iwpriv
2151 	 **/
2152 	if (arg->peer_ht_rates.num_rates == 0)
2153 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2154 }
2155 
ath12k_wmi_send_peer_assoc_cmd(struct ath12k * ar,struct ath12k_wmi_peer_assoc_arg * arg)2156 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2157 				   struct ath12k_wmi_peer_assoc_arg *arg)
2158 {
2159 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2160 	struct wmi_peer_assoc_complete_cmd *cmd;
2161 	struct ath12k_wmi_vht_rate_set_params *mcs;
2162 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2163 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2164 	struct wmi_peer_assoc_mlo_params *ml_params;
2165 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2166 	struct sk_buff *skb;
2167 	struct wmi_tlv *tlv;
2168 	void *ptr;
2169 	u32 peer_legacy_rates_align;
2170 	u32 peer_ht_rates_align;
2171 	int i, ret, len;
2172 	__le32 v;
2173 
2174 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2175 					  sizeof(u32));
2176 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2177 				      sizeof(u32));
2178 
2179 	len = sizeof(*cmd) +
2180 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2181 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2182 	      sizeof(*mcs) + TLV_HDR_SIZE +
2183 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2184 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2185 
2186 	if (arg->ml.enabled)
2187 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2188 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2189 	else
2190 		len += (2 * TLV_HDR_SIZE);
2191 
2192 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2193 	if (!skb)
2194 		return -ENOMEM;
2195 
2196 	ptr = skb->data;
2197 
2198 	cmd = ptr;
2199 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2200 						 sizeof(*cmd));
2201 
2202 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2203 
2204 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2205 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2206 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2207 
2208 	ath12k_wmi_copy_peer_flags(cmd, arg,
2209 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2210 					    &ar->ab->dev_flags));
2211 
2212 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2213 
2214 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2215 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2216 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2217 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2218 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2219 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2220 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2221 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2222 
2223 	/* Update 11ax capabilities */
2224 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2225 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2226 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2227 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2228 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2229 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2230 		cmd->peer_he_cap_phy[i] =
2231 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2232 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2233 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2234 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2235 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2236 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2237 
2238 	/* Update 11be capabilities */
2239 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2240 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2241 		       0);
2242 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2243 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2244 		       0);
2245 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2246 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2247 
2248 	/* Update peer legacy rate information */
2249 	ptr += sizeof(*cmd);
2250 
2251 	tlv = ptr;
2252 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2253 
2254 	ptr += TLV_HDR_SIZE;
2255 
2256 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2257 	memcpy(ptr, arg->peer_legacy_rates.rates,
2258 	       arg->peer_legacy_rates.num_rates);
2259 
2260 	/* Update peer HT rate information */
2261 	ptr += peer_legacy_rates_align;
2262 
2263 	tlv = ptr;
2264 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2265 	ptr += TLV_HDR_SIZE;
2266 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2267 	memcpy(ptr, arg->peer_ht_rates.rates,
2268 	       arg->peer_ht_rates.num_rates);
2269 
2270 	/* VHT Rates */
2271 	ptr += peer_ht_rates_align;
2272 
2273 	mcs = ptr;
2274 
2275 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2276 						 sizeof(*mcs));
2277 
2278 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2279 
2280 	/* Update bandwidth-NSS mapping */
2281 	cmd->peer_bw_rxnss_override = 0;
2282 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2283 
2284 	if (arg->vht_capable) {
2285 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2286 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2287 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2288 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2289 	}
2290 
2291 	/* HE Rates */
2292 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2293 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2294 
2295 	ptr += sizeof(*mcs);
2296 
2297 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2298 
2299 	tlv = ptr;
2300 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2301 	ptr += TLV_HDR_SIZE;
2302 
2303 	/* Loop through the HE rate set */
2304 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2305 		he_mcs = ptr;
2306 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2307 							    sizeof(*he_mcs));
2308 
2309 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2310 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2311 		ptr += sizeof(*he_mcs);
2312 	}
2313 
2314 	tlv = ptr;
2315 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2316 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2317 	ptr += TLV_HDR_SIZE;
2318 	if (!len)
2319 		goto skip_ml_params;
2320 
2321 	ml_params = ptr;
2322 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2323 						       len);
2324 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2325 
2326 	if (arg->ml.assoc_link)
2327 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2328 
2329 	if (arg->ml.primary_umac)
2330 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2331 
2332 	if (arg->ml.logical_link_idx_valid)
2333 		ml_params->flags |=
2334 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2335 
2336 	if (arg->ml.peer_id_valid)
2337 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2338 
2339 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2340 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2341 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2342 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2343 	ptr += sizeof(*ml_params);
2344 
2345 skip_ml_params:
2346 	/* Loop through the EHT rate set */
2347 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2348 	tlv = ptr;
2349 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2350 	ptr += TLV_HDR_SIZE;
2351 
2352 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2353 		eht_mcs = ptr;
2354 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2355 							     sizeof(*eht_mcs));
2356 
2357 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2358 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2359 		ptr += sizeof(*eht_mcs);
2360 	}
2361 
2362 	tlv = ptr;
2363 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2364 	/* fill ML Partner links */
2365 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2366 	ptr += TLV_HDR_SIZE;
2367 
2368 	if (len == 0)
2369 		goto send;
2370 
2371 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2372 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2373 
2374 		partner_info = ptr;
2375 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2376 								  sizeof(*partner_info));
2377 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2378 		partner_info->hw_link_id =
2379 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2380 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2381 
2382 		if (arg->ml.partner_info[i].assoc_link)
2383 			partner_info->flags |=
2384 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2385 
2386 		if (arg->ml.partner_info[i].primary_umac)
2387 			partner_info->flags |=
2388 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2389 
2390 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2391 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2392 			partner_info->flags |= v;
2393 		}
2394 
2395 		partner_info->logical_link_idx =
2396 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2397 		ptr += sizeof(*partner_info);
2398 	}
2399 
2400 send:
2401 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2402 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
2403 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2404 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2405 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2406 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2407 		   cmd->peer_mpdu_density,
2408 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2409 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2410 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2411 		   cmd->peer_he_cap_phy[2],
2412 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2413 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2414 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2415 		   cmd->peer_eht_cap_phy[2]);
2416 
2417 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2418 	if (ret) {
2419 		ath12k_warn(ar->ab,
2420 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2421 		dev_kfree_skb(skb);
2422 	}
2423 
2424 	return ret;
2425 }
2426 
ath12k_wmi_start_scan_init(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2427 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2428 				struct ath12k_wmi_scan_req_arg *arg)
2429 {
2430 	/* setup commonly used values */
2431 	arg->scan_req_id = 1;
2432 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2433 	arg->dwell_time_active = 50;
2434 	arg->dwell_time_active_2g = 0;
2435 	arg->dwell_time_passive = 150;
2436 	arg->dwell_time_active_6g = 70;
2437 	arg->dwell_time_passive_6g = 70;
2438 	arg->min_rest_time = 50;
2439 	arg->max_rest_time = 500;
2440 	arg->repeat_probe_time = 0;
2441 	arg->probe_spacing_time = 0;
2442 	arg->idle_time = 0;
2443 	arg->max_scan_time = 20000;
2444 	arg->probe_delay = 5;
2445 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2446 				  WMI_SCAN_EVENT_COMPLETED |
2447 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2448 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2449 				  WMI_SCAN_EVENT_DEQUEUED;
2450 	arg->scan_f_chan_stat_evnt = 1;
2451 	arg->num_bssid = 1;
2452 
2453 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2454 	 * ZEROs in probe request
2455 	 */
2456 	eth_broadcast_addr(arg->bssid_list[0].addr);
2457 }
2458 
ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd * cmd,struct ath12k_wmi_scan_req_arg * arg)2459 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2460 						   struct ath12k_wmi_scan_req_arg *arg)
2461 {
2462 	/* Scan events subscription */
2463 	if (arg->scan_ev_started)
2464 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2465 	if (arg->scan_ev_completed)
2466 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2467 	if (arg->scan_ev_bss_chan)
2468 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2469 	if (arg->scan_ev_foreign_chan)
2470 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2471 	if (arg->scan_ev_dequeued)
2472 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2473 	if (arg->scan_ev_preempted)
2474 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2475 	if (arg->scan_ev_start_failed)
2476 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2477 	if (arg->scan_ev_restarted)
2478 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2479 	if (arg->scan_ev_foreign_chn_exit)
2480 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2481 	if (arg->scan_ev_suspended)
2482 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2483 	if (arg->scan_ev_resumed)
2484 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2485 
2486 	/** Set scan control flags */
2487 	cmd->scan_ctrl_flags = 0;
2488 	if (arg->scan_f_passive)
2489 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2490 	if (arg->scan_f_strict_passive_pch)
2491 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2492 	if (arg->scan_f_promisc_mode)
2493 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2494 	if (arg->scan_f_capture_phy_err)
2495 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2496 	if (arg->scan_f_half_rate)
2497 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2498 	if (arg->scan_f_quarter_rate)
2499 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2500 	if (arg->scan_f_cck_rates)
2501 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2502 	if (arg->scan_f_ofdm_rates)
2503 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2504 	if (arg->scan_f_chan_stat_evnt)
2505 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2506 	if (arg->scan_f_filter_prb_req)
2507 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2508 	if (arg->scan_f_bcast_probe)
2509 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2510 	if (arg->scan_f_offchan_mgmt_tx)
2511 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2512 	if (arg->scan_f_offchan_data_tx)
2513 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2514 	if (arg->scan_f_force_active_dfs_chn)
2515 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2516 	if (arg->scan_f_add_tpc_ie_in_probe)
2517 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2518 	if (arg->scan_f_add_ds_ie_in_probe)
2519 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2520 	if (arg->scan_f_add_spoofed_mac_in_probe)
2521 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2522 	if (arg->scan_f_add_rand_seq_in_probe)
2523 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2524 	if (arg->scan_f_en_ie_whitelist_in_probe)
2525 		cmd->scan_ctrl_flags |=
2526 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2527 
2528 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2529 						 WMI_SCAN_DWELL_MODE_MASK);
2530 }
2531 
ath12k_wmi_send_scan_start_cmd(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2532 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2533 				   struct ath12k_wmi_scan_req_arg *arg)
2534 {
2535 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2536 	struct wmi_start_scan_cmd *cmd;
2537 	struct ath12k_wmi_ssid_params *ssid = NULL;
2538 	struct ath12k_wmi_mac_addr_params *bssid;
2539 	struct sk_buff *skb;
2540 	struct wmi_tlv *tlv;
2541 	void *ptr;
2542 	int i, ret, len;
2543 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2544 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2545 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2546 
2547 	len = sizeof(*cmd);
2548 
2549 	len += TLV_HDR_SIZE;
2550 	if (arg->num_chan)
2551 		len += arg->num_chan * sizeof(u32);
2552 
2553 	len += TLV_HDR_SIZE;
2554 	if (arg->num_ssids)
2555 		len += arg->num_ssids * sizeof(*ssid);
2556 
2557 	len += TLV_HDR_SIZE;
2558 	if (arg->num_bssid)
2559 		len += sizeof(*bssid) * arg->num_bssid;
2560 
2561 	if (arg->num_hint_bssid)
2562 		len += TLV_HDR_SIZE +
2563 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2564 
2565 	if (arg->num_hint_s_ssid)
2566 		len += TLV_HDR_SIZE +
2567 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2568 
2569 	len += TLV_HDR_SIZE;
2570 	if (arg->extraie.len)
2571 		extraie_len_with_pad =
2572 			roundup(arg->extraie.len, sizeof(u32));
2573 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2574 		len += extraie_len_with_pad;
2575 	} else {
2576 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2577 			    arg->extraie.len);
2578 		extraie_len_with_pad = 0;
2579 	}
2580 
2581 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2582 	if (!skb)
2583 		return -ENOMEM;
2584 
2585 	ptr = skb->data;
2586 
2587 	cmd = ptr;
2588 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2589 						 sizeof(*cmd));
2590 
2591 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2592 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2593 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2594 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
2595 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2596 
2597 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2598 
2599 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2600 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2601 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2602 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2603 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2604 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2605 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2606 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2607 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2608 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2609 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2610 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2611 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2612 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2613 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2614 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2615 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2616 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2617 
2618 	ptr += sizeof(*cmd);
2619 
2620 	len = arg->num_chan * sizeof(u32);
2621 
2622 	tlv = ptr;
2623 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2624 	ptr += TLV_HDR_SIZE;
2625 	tmp_ptr = (u32 *)ptr;
2626 
2627 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2628 
2629 	ptr += len;
2630 
2631 	len = arg->num_ssids * sizeof(*ssid);
2632 	tlv = ptr;
2633 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2634 
2635 	ptr += TLV_HDR_SIZE;
2636 
2637 	if (arg->num_ssids) {
2638 		ssid = ptr;
2639 		for (i = 0; i < arg->num_ssids; ++i) {
2640 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2641 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2642 			       arg->ssid[i].ssid_len);
2643 			ssid++;
2644 		}
2645 	}
2646 
2647 	ptr += (arg->num_ssids * sizeof(*ssid));
2648 	len = arg->num_bssid * sizeof(*bssid);
2649 	tlv = ptr;
2650 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2651 
2652 	ptr += TLV_HDR_SIZE;
2653 	bssid = ptr;
2654 
2655 	if (arg->num_bssid) {
2656 		for (i = 0; i < arg->num_bssid; ++i) {
2657 			ether_addr_copy(bssid->addr,
2658 					arg->bssid_list[i].addr);
2659 			bssid++;
2660 		}
2661 	}
2662 
2663 	ptr += arg->num_bssid * sizeof(*bssid);
2664 
2665 	len = extraie_len_with_pad;
2666 	tlv = ptr;
2667 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2668 	ptr += TLV_HDR_SIZE;
2669 
2670 	if (extraie_len_with_pad)
2671 		memcpy(ptr, arg->extraie.ptr,
2672 		       arg->extraie.len);
2673 
2674 	ptr += extraie_len_with_pad;
2675 
2676 	if (arg->num_hint_s_ssid) {
2677 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2678 		tlv = ptr;
2679 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2680 		ptr += TLV_HDR_SIZE;
2681 		s_ssid = ptr;
2682 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2683 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2684 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2685 			s_ssid++;
2686 		}
2687 		ptr += len;
2688 	}
2689 
2690 	if (arg->num_hint_bssid) {
2691 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2692 		tlv = ptr;
2693 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2694 		ptr += TLV_HDR_SIZE;
2695 		hint_bssid = ptr;
2696 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2697 			hint_bssid->freq_flags =
2698 				arg->hint_bssid[i].freq_flags;
2699 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2700 					&hint_bssid->bssid.addr[0]);
2701 			hint_bssid++;
2702 		}
2703 	}
2704 
2705 	ret = ath12k_wmi_cmd_send(wmi, skb,
2706 				  WMI_START_SCAN_CMDID);
2707 	if (ret) {
2708 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2709 		dev_kfree_skb(skb);
2710 	}
2711 
2712 	return ret;
2713 }
2714 
ath12k_wmi_send_scan_stop_cmd(struct ath12k * ar,struct ath12k_wmi_scan_cancel_arg * arg)2715 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2716 				  struct ath12k_wmi_scan_cancel_arg *arg)
2717 {
2718 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2719 	struct wmi_stop_scan_cmd *cmd;
2720 	struct sk_buff *skb;
2721 	int ret;
2722 
2723 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2724 	if (!skb)
2725 		return -ENOMEM;
2726 
2727 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2728 
2729 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2730 						 sizeof(*cmd));
2731 
2732 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2733 	cmd->requestor = cpu_to_le32(arg->requester);
2734 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2735 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2736 	/* stop the scan with the corresponding scan_id */
2737 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2738 		/* Cancelling all scans */
2739 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2740 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2741 		/* Cancelling VAP scans */
2742 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2743 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2744 		/* Cancelling specific scan */
2745 		cmd->req_type = WMI_SCAN_STOP_ONE;
2746 	} else {
2747 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2748 			    arg->req_type);
2749 		dev_kfree_skb(skb);
2750 		return -EINVAL;
2751 	}
2752 
2753 	ret = ath12k_wmi_cmd_send(wmi, skb,
2754 				  WMI_STOP_SCAN_CMDID);
2755 	if (ret) {
2756 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2757 		dev_kfree_skb(skb);
2758 	}
2759 
2760 	return ret;
2761 }
2762 
ath12k_wmi_send_scan_chan_list_cmd(struct ath12k * ar,struct ath12k_wmi_scan_chan_list_arg * arg)2763 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2764 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2765 {
2766 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2767 	struct wmi_scan_chan_list_cmd *cmd;
2768 	struct sk_buff *skb;
2769 	struct ath12k_wmi_channel_params *chan_info;
2770 	struct ath12k_wmi_channel_arg *channel_arg;
2771 	struct wmi_tlv *tlv;
2772 	void *ptr;
2773 	int i, ret, len;
2774 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2775 	__le32 *reg1, *reg2;
2776 
2777 	channel_arg = &arg->channel[0];
2778 	while (arg->nallchans) {
2779 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2780 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2781 			sizeof(*chan_info);
2782 
2783 		num_send_chans = min(arg->nallchans, max_chan_limit);
2784 
2785 		arg->nallchans -= num_send_chans;
2786 		len += sizeof(*chan_info) * num_send_chans;
2787 
2788 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2789 		if (!skb)
2790 			return -ENOMEM;
2791 
2792 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2793 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2794 							 sizeof(*cmd));
2795 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2796 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2797 		if (num_sends)
2798 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2799 
2800 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2801 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2802 			   num_send_chans, len, cmd->pdev_id, num_sends);
2803 
2804 		ptr = skb->data + sizeof(*cmd);
2805 
2806 		len = sizeof(*chan_info) * num_send_chans;
2807 		tlv = ptr;
2808 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2809 						     len);
2810 		ptr += TLV_HDR_SIZE;
2811 
2812 		for (i = 0; i < num_send_chans; ++i) {
2813 			chan_info = ptr;
2814 			memset(chan_info, 0, sizeof(*chan_info));
2815 			len = sizeof(*chan_info);
2816 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2817 								       len);
2818 
2819 			reg1 = &chan_info->reg_info_1;
2820 			reg2 = &chan_info->reg_info_2;
2821 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2822 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2823 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2824 
2825 			if (channel_arg->is_chan_passive)
2826 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2827 			if (channel_arg->allow_he)
2828 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2829 			else if (channel_arg->allow_vht)
2830 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2831 			else if (channel_arg->allow_ht)
2832 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2833 			if (channel_arg->half_rate)
2834 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2835 			if (channel_arg->quarter_rate)
2836 				chan_info->info |=
2837 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2838 
2839 			if (channel_arg->psc_channel)
2840 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2841 
2842 			if (channel_arg->dfs_set)
2843 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2844 
2845 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2846 							    WMI_CHAN_INFO_MODE);
2847 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2848 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2849 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2850 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2851 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2852 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2853 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2854 						  WMI_CHAN_REG_INFO1_REG_CLS);
2855 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2856 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2857 			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
2858 						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
2859 
2860 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2861 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2862 				   i, chan_info->mhz, chan_info->info);
2863 
2864 			ptr += sizeof(*chan_info);
2865 
2866 			channel_arg++;
2867 		}
2868 
2869 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2870 		if (ret) {
2871 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2872 			dev_kfree_skb(skb);
2873 			return ret;
2874 		}
2875 
2876 		num_sends++;
2877 	}
2878 
2879 	return 0;
2880 }
2881 
ath12k_wmi_send_wmm_update_cmd(struct ath12k * ar,u32 vdev_id,struct wmi_wmm_params_all_arg * param)2882 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2883 				   struct wmi_wmm_params_all_arg *param)
2884 {
2885 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2886 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2887 	struct wmi_wmm_params *wmm_param;
2888 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2889 	struct sk_buff *skb;
2890 	int ret, ac;
2891 
2892 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2893 	if (!skb)
2894 		return -ENOMEM;
2895 
2896 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2897 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2898 						 sizeof(*cmd));
2899 
2900 	cmd->vdev_id = cpu_to_le32(vdev_id);
2901 	cmd->wmm_param_type = 0;
2902 
2903 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2904 		switch (ac) {
2905 		case WME_AC_BE:
2906 			wmi_wmm_arg = &param->ac_be;
2907 			break;
2908 		case WME_AC_BK:
2909 			wmi_wmm_arg = &param->ac_bk;
2910 			break;
2911 		case WME_AC_VI:
2912 			wmi_wmm_arg = &param->ac_vi;
2913 			break;
2914 		case WME_AC_VO:
2915 			wmi_wmm_arg = &param->ac_vo;
2916 			break;
2917 		}
2918 
2919 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2920 		wmm_param->tlv_header =
2921 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2922 					       sizeof(*wmm_param));
2923 
2924 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2925 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2926 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2927 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2928 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2929 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2930 
2931 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2932 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2933 			   ac, wmm_param->aifs, wmm_param->cwmin,
2934 			   wmm_param->cwmax, wmm_param->txoplimit,
2935 			   wmm_param->acm, wmm_param->no_ack);
2936 	}
2937 	ret = ath12k_wmi_cmd_send(wmi, skb,
2938 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2939 	if (ret) {
2940 		ath12k_warn(ar->ab,
2941 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2942 		dev_kfree_skb(skb);
2943 	}
2944 
2945 	return ret;
2946 }
2947 
ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k * ar,u32 pdev_id)2948 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2949 						  u32 pdev_id)
2950 {
2951 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2952 	struct wmi_dfs_phyerr_offload_cmd *cmd;
2953 	struct sk_buff *skb;
2954 	int ret;
2955 
2956 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2957 	if (!skb)
2958 		return -ENOMEM;
2959 
2960 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2961 	cmd->tlv_header =
2962 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2963 				       sizeof(*cmd));
2964 
2965 	cmd->pdev_id = cpu_to_le32(pdev_id);
2966 
2967 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2968 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2969 
2970 	ret = ath12k_wmi_cmd_send(wmi, skb,
2971 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2972 	if (ret) {
2973 		ath12k_warn(ar->ab,
2974 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2975 		dev_kfree_skb(skb);
2976 	}
2977 
2978 	return ret;
2979 }
2980 
ath12k_wmi_set_bios_cmd(struct ath12k_base * ab,u32 param_id,const u8 * buf,size_t buf_len)2981 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
2982 			    const u8 *buf, size_t buf_len)
2983 {
2984 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2985 	struct wmi_pdev_set_bios_interface_cmd *cmd;
2986 	struct wmi_tlv *tlv;
2987 	struct sk_buff *skb;
2988 	u8 *ptr;
2989 	u32 len, len_aligned;
2990 	int ret;
2991 
2992 	len_aligned = roundup(buf_len, sizeof(u32));
2993 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
2994 
2995 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2996 	if (!skb)
2997 		return -ENOMEM;
2998 
2999 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
3000 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
3001 						 sizeof(*cmd));
3002 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3003 	cmd->param_type_id = cpu_to_le32(param_id);
3004 	cmd->length = cpu_to_le32(buf_len);
3005 
3006 	ptr = skb->data + sizeof(*cmd);
3007 	tlv = (struct wmi_tlv *)ptr;
3008 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
3009 	ptr += TLV_HDR_SIZE;
3010 	memcpy(ptr, buf, buf_len);
3011 
3012 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3013 				  skb,
3014 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
3015 	if (ret) {
3016 		ath12k_warn(ab,
3017 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
3018 			    param_id, ret);
3019 		dev_kfree_skb(skb);
3020 	}
3021 
3022 	return 0;
3023 }
3024 
ath12k_wmi_set_bios_sar_cmd(struct ath12k_base * ab,const u8 * psar_table)3025 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
3026 {
3027 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3028 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
3029 	struct wmi_tlv *tlv;
3030 	struct sk_buff *skb;
3031 	int ret;
3032 	u8 *buf_ptr;
3033 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
3034 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
3035 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
3036 
3037 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
3038 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
3039 					      sizeof(u32));
3040 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
3041 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
3042 
3043 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3044 	if (!skb)
3045 		return -ENOMEM;
3046 
3047 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
3048 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
3049 						 sizeof(*cmd));
3050 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3051 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3052 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3053 
3054 	buf_ptr = skb->data + sizeof(*cmd);
3055 	tlv = (struct wmi_tlv *)buf_ptr;
3056 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3057 					 sar_table_len_aligned);
3058 	buf_ptr += TLV_HDR_SIZE;
3059 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3060 
3061 	buf_ptr += sar_table_len_aligned;
3062 	tlv = (struct wmi_tlv *)buf_ptr;
3063 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3064 					 sar_dbs_backoff_len_aligned);
3065 	buf_ptr += TLV_HDR_SIZE;
3066 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3067 
3068 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3069 				  skb,
3070 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3071 	if (ret) {
3072 		ath12k_warn(ab,
3073 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3074 			    ret);
3075 		dev_kfree_skb(skb);
3076 	}
3077 
3078 	return ret;
3079 }
3080 
ath12k_wmi_set_bios_geo_cmd(struct ath12k_base * ab,const u8 * pgeo_table)3081 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3082 {
3083 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3084 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3085 	struct wmi_tlv *tlv;
3086 	struct sk_buff *skb;
3087 	int ret;
3088 	u8 *buf_ptr;
3089 	u32 len, sar_geo_len_aligned;
3090 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3091 
3092 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3093 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3094 
3095 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3096 	if (!skb)
3097 		return -ENOMEM;
3098 
3099 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3100 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3101 						 sizeof(*cmd));
3102 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3103 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3104 
3105 	buf_ptr = skb->data + sizeof(*cmd);
3106 	tlv = (struct wmi_tlv *)buf_ptr;
3107 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3108 	buf_ptr += TLV_HDR_SIZE;
3109 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3110 
3111 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3112 				  skb,
3113 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3114 	if (ret) {
3115 		ath12k_warn(ab,
3116 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3117 			    ret);
3118 		dev_kfree_skb(skb);
3119 	}
3120 
3121 	return ret;
3122 }
3123 
ath12k_wmi_delba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)3124 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3125 			  u32 tid, u32 initiator, u32 reason)
3126 {
3127 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3128 	struct wmi_delba_send_cmd *cmd;
3129 	struct sk_buff *skb;
3130 	int ret;
3131 
3132 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3133 	if (!skb)
3134 		return -ENOMEM;
3135 
3136 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3137 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3138 						 sizeof(*cmd));
3139 	cmd->vdev_id = cpu_to_le32(vdev_id);
3140 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3141 	cmd->tid = cpu_to_le32(tid);
3142 	cmd->initiator = cpu_to_le32(initiator);
3143 	cmd->reasoncode = cpu_to_le32(reason);
3144 
3145 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3146 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3147 		   vdev_id, mac, tid, initiator, reason);
3148 
3149 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3150 
3151 	if (ret) {
3152 		ath12k_warn(ar->ab,
3153 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3154 		dev_kfree_skb(skb);
3155 	}
3156 
3157 	return ret;
3158 }
3159 
ath12k_wmi_addba_set_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)3160 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3161 			      u32 tid, u32 status)
3162 {
3163 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3164 	struct wmi_addba_setresponse_cmd *cmd;
3165 	struct sk_buff *skb;
3166 	int ret;
3167 
3168 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3169 	if (!skb)
3170 		return -ENOMEM;
3171 
3172 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3173 	cmd->tlv_header =
3174 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3175 				       sizeof(*cmd));
3176 	cmd->vdev_id = cpu_to_le32(vdev_id);
3177 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3178 	cmd->tid = cpu_to_le32(tid);
3179 	cmd->statuscode = cpu_to_le32(status);
3180 
3181 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3182 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3183 		   vdev_id, mac, tid, status);
3184 
3185 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3186 
3187 	if (ret) {
3188 		ath12k_warn(ar->ab,
3189 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3190 		dev_kfree_skb(skb);
3191 	}
3192 
3193 	return ret;
3194 }
3195 
ath12k_wmi_addba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)3196 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3197 			  u32 tid, u32 buf_size)
3198 {
3199 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3200 	struct wmi_addba_send_cmd *cmd;
3201 	struct sk_buff *skb;
3202 	int ret;
3203 
3204 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3205 	if (!skb)
3206 		return -ENOMEM;
3207 
3208 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3209 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3210 						 sizeof(*cmd));
3211 	cmd->vdev_id = cpu_to_le32(vdev_id);
3212 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3213 	cmd->tid = cpu_to_le32(tid);
3214 	cmd->buffersize = cpu_to_le32(buf_size);
3215 
3216 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3217 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3218 		   vdev_id, mac, tid, buf_size);
3219 
3220 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3221 
3222 	if (ret) {
3223 		ath12k_warn(ar->ab,
3224 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3225 		dev_kfree_skb(skb);
3226 	}
3227 
3228 	return ret;
3229 }
3230 
ath12k_wmi_addba_clear_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac)3231 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3232 {
3233 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3234 	struct wmi_addba_clear_resp_cmd *cmd;
3235 	struct sk_buff *skb;
3236 	int ret;
3237 
3238 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3239 	if (!skb)
3240 		return -ENOMEM;
3241 
3242 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3243 	cmd->tlv_header =
3244 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3245 				       sizeof(*cmd));
3246 	cmd->vdev_id = cpu_to_le32(vdev_id);
3247 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3248 
3249 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3250 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3251 		   vdev_id, mac);
3252 
3253 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3254 
3255 	if (ret) {
3256 		ath12k_warn(ar->ab,
3257 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3258 		dev_kfree_skb(skb);
3259 	}
3260 
3261 	return ret;
3262 }
3263 
ath12k_wmi_send_init_country_cmd(struct ath12k * ar,struct ath12k_wmi_init_country_arg * arg)3264 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3265 				     struct ath12k_wmi_init_country_arg *arg)
3266 {
3267 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3268 	struct wmi_init_country_cmd *cmd;
3269 	struct sk_buff *skb;
3270 	int ret;
3271 
3272 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3273 	if (!skb)
3274 		return -ENOMEM;
3275 
3276 	cmd = (struct wmi_init_country_cmd *)skb->data;
3277 	cmd->tlv_header =
3278 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3279 				       sizeof(*cmd));
3280 
3281 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3282 
3283 	switch (arg->flags) {
3284 	case ALPHA_IS_SET:
3285 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3286 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3287 		break;
3288 	case CC_IS_SET:
3289 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3290 		cmd->cc_info.country_code =
3291 			cpu_to_le32(arg->cc_info.country_code);
3292 		break;
3293 	case REGDMN_IS_SET:
3294 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3295 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3296 		break;
3297 	default:
3298 		ret = -EINVAL;
3299 		goto out;
3300 	}
3301 
3302 	ret = ath12k_wmi_cmd_send(wmi, skb,
3303 				  WMI_SET_INIT_COUNTRY_CMDID);
3304 
3305 out:
3306 	if (ret) {
3307 		ath12k_warn(ar->ab,
3308 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3309 			    ret);
3310 		dev_kfree_skb(skb);
3311 	}
3312 
3313 	return ret;
3314 }
3315 
3316 int
ath12k_wmi_send_twt_enable_cmd(struct ath12k * ar,u32 pdev_id)3317 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3318 {
3319 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3320 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3321 	struct wmi_twt_enable_params_cmd *cmd;
3322 	struct sk_buff *skb;
3323 	int ret, len;
3324 
3325 	len = sizeof(*cmd);
3326 
3327 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3328 	if (!skb)
3329 		return -ENOMEM;
3330 
3331 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3332 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3333 						 len);
3334 	cmd->pdev_id = cpu_to_le32(pdev_id);
3335 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3336 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3337 	cmd->congestion_thresh_setup =
3338 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3339 	cmd->congestion_thresh_teardown =
3340 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3341 	cmd->congestion_thresh_critical =
3342 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3343 	cmd->interference_thresh_teardown =
3344 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3345 	cmd->interference_thresh_setup =
3346 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3347 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3348 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3349 	cmd->no_of_bcast_mcast_slots =
3350 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3351 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3352 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3353 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3354 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3355 	cmd->remove_sta_slot_interval =
3356 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3357 	/* TODO add MBSSID support */
3358 	cmd->mbss_support = 0;
3359 
3360 	ret = ath12k_wmi_cmd_send(wmi, skb,
3361 				  WMI_TWT_ENABLE_CMDID);
3362 	if (ret) {
3363 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3364 		dev_kfree_skb(skb);
3365 	}
3366 	return ret;
3367 }
3368 
3369 int
ath12k_wmi_send_twt_disable_cmd(struct ath12k * ar,u32 pdev_id)3370 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3371 {
3372 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3373 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3374 	struct wmi_twt_disable_params_cmd *cmd;
3375 	struct sk_buff *skb;
3376 	int ret, len;
3377 
3378 	len = sizeof(*cmd);
3379 
3380 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3381 	if (!skb)
3382 		return -ENOMEM;
3383 
3384 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3385 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3386 						 len);
3387 	cmd->pdev_id = cpu_to_le32(pdev_id);
3388 
3389 	ret = ath12k_wmi_cmd_send(wmi, skb,
3390 				  WMI_TWT_DISABLE_CMDID);
3391 	if (ret) {
3392 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3393 		dev_kfree_skb(skb);
3394 	}
3395 	return ret;
3396 }
3397 
3398 int
ath12k_wmi_send_obss_spr_cmd(struct ath12k * ar,u32 vdev_id,struct ieee80211_he_obss_pd * he_obss_pd)3399 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3400 			     struct ieee80211_he_obss_pd *he_obss_pd)
3401 {
3402 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3403 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3404 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3405 	struct sk_buff *skb;
3406 	int ret, len;
3407 
3408 	len = sizeof(*cmd);
3409 
3410 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3411 	if (!skb)
3412 		return -ENOMEM;
3413 
3414 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3415 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3416 						 len);
3417 	cmd->vdev_id = cpu_to_le32(vdev_id);
3418 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3419 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3420 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3421 
3422 	ret = ath12k_wmi_cmd_send(wmi, skb,
3423 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3424 	if (ret) {
3425 		ath12k_warn(ab,
3426 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3427 		dev_kfree_skb(skb);
3428 	}
3429 	return ret;
3430 }
3431 
ath12k_wmi_obss_color_cfg_cmd(struct ath12k * ar,u32 vdev_id,u8 bss_color,u32 period,bool enable)3432 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3433 				  u8 bss_color, u32 period,
3434 				  bool enable)
3435 {
3436 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3437 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3438 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3439 	struct sk_buff *skb;
3440 	int ret, len;
3441 
3442 	len = sizeof(*cmd);
3443 
3444 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3445 	if (!skb)
3446 		return -ENOMEM;
3447 
3448 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3449 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3450 						 len);
3451 	cmd->vdev_id = cpu_to_le32(vdev_id);
3452 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3453 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3454 	cmd->current_bss_color = cpu_to_le32(bss_color);
3455 	cmd->detection_period_ms = cpu_to_le32(period);
3456 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3457 	cmd->free_slot_expiry_time_ms = 0;
3458 	cmd->flags = 0;
3459 
3460 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3461 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3462 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3463 		   cmd->detection_period_ms, cmd->scan_period_ms);
3464 
3465 	ret = ath12k_wmi_cmd_send(wmi, skb,
3466 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3467 	if (ret) {
3468 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3469 		dev_kfree_skb(skb);
3470 	}
3471 	return ret;
3472 }
3473 
ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k * ar,u32 vdev_id,bool enable)3474 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3475 						bool enable)
3476 {
3477 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3478 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3479 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3480 	struct sk_buff *skb;
3481 	int ret, len;
3482 
3483 	len = sizeof(*cmd);
3484 
3485 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3486 	if (!skb)
3487 		return -ENOMEM;
3488 
3489 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3490 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3491 						 len);
3492 	cmd->vdev_id = cpu_to_le32(vdev_id);
3493 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3494 
3495 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3496 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3497 		   cmd->vdev_id, cmd->enable);
3498 
3499 	ret = ath12k_wmi_cmd_send(wmi, skb,
3500 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3501 	if (ret) {
3502 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3503 		dev_kfree_skb(skb);
3504 	}
3505 	return ret;
3506 }
3507 
ath12k_wmi_fils_discovery_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3508 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3509 				   struct sk_buff *tmpl)
3510 {
3511 	struct wmi_tlv *tlv;
3512 	struct sk_buff *skb;
3513 	void *ptr;
3514 	int ret, len;
3515 	size_t aligned_len;
3516 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3517 
3518 	aligned_len = roundup(tmpl->len, 4);
3519 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3520 
3521 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3522 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3523 
3524 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3525 	if (!skb)
3526 		return -ENOMEM;
3527 
3528 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3529 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3530 						 sizeof(*cmd));
3531 	cmd->vdev_id = cpu_to_le32(vdev_id);
3532 	cmd->buf_len = cpu_to_le32(tmpl->len);
3533 	ptr = skb->data + sizeof(*cmd);
3534 
3535 	tlv = ptr;
3536 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3537 	memcpy(tlv->value, tmpl->data, tmpl->len);
3538 
3539 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3540 	if (ret) {
3541 		ath12k_warn(ar->ab,
3542 			    "WMI vdev %i failed to send FILS discovery template command\n",
3543 			    vdev_id);
3544 		dev_kfree_skb(skb);
3545 	}
3546 	return ret;
3547 }
3548 
ath12k_wmi_probe_resp_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3549 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3550 			       struct sk_buff *tmpl)
3551 {
3552 	struct wmi_probe_tmpl_cmd *cmd;
3553 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3554 	struct wmi_tlv *tlv;
3555 	struct sk_buff *skb;
3556 	void *ptr;
3557 	int ret, len;
3558 	size_t aligned_len = roundup(tmpl->len, 4);
3559 
3560 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3561 		   "WMI vdev %i set probe response template\n", vdev_id);
3562 
3563 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3564 
3565 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3566 	if (!skb)
3567 		return -ENOMEM;
3568 
3569 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3570 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3571 						 sizeof(*cmd));
3572 	cmd->vdev_id = cpu_to_le32(vdev_id);
3573 	cmd->buf_len = cpu_to_le32(tmpl->len);
3574 
3575 	ptr = skb->data + sizeof(*cmd);
3576 
3577 	probe_info = ptr;
3578 	len = sizeof(*probe_info);
3579 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3580 							len);
3581 	probe_info->caps = 0;
3582 	probe_info->erp = 0;
3583 
3584 	ptr += sizeof(*probe_info);
3585 
3586 	tlv = ptr;
3587 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3588 	memcpy(tlv->value, tmpl->data, tmpl->len);
3589 
3590 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3591 	if (ret) {
3592 		ath12k_warn(ar->ab,
3593 			    "WMI vdev %i failed to send probe response template command\n",
3594 			    vdev_id);
3595 		dev_kfree_skb(skb);
3596 	}
3597 	return ret;
3598 }
3599 
ath12k_wmi_fils_discovery(struct ath12k * ar,u32 vdev_id,u32 interval,bool unsol_bcast_probe_resp_enabled)3600 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3601 			      bool unsol_bcast_probe_resp_enabled)
3602 {
3603 	struct sk_buff *skb;
3604 	int ret, len;
3605 	struct wmi_fils_discovery_cmd *cmd;
3606 
3607 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3608 		   "WMI vdev %i set %s interval to %u TU\n",
3609 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3610 		   "unsolicited broadcast probe response" : "FILS discovery",
3611 		   interval);
3612 
3613 	len = sizeof(*cmd);
3614 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3615 	if (!skb)
3616 		return -ENOMEM;
3617 
3618 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3619 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3620 						 len);
3621 	cmd->vdev_id = cpu_to_le32(vdev_id);
3622 	cmd->interval = cpu_to_le32(interval);
3623 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3624 
3625 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3626 	if (ret) {
3627 		ath12k_warn(ar->ab,
3628 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3629 			    vdev_id);
3630 		dev_kfree_skb(skb);
3631 	}
3632 	return ret;
3633 }
3634 
3635 static void
ath12k_fill_band_to_mac_param(struct ath12k_base * soc,struct ath12k_wmi_pdev_band_arg * arg)3636 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3637 			      struct ath12k_wmi_pdev_band_arg *arg)
3638 {
3639 	u8 i;
3640 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3641 	struct ath12k_pdev *pdev;
3642 
3643 	for (i = 0; i < soc->num_radios; i++) {
3644 		pdev = &soc->pdevs[i];
3645 		hal_reg_cap = &soc->hal_reg_cap[i];
3646 		arg[i].pdev_id = pdev->pdev_id;
3647 
3648 		switch (pdev->cap.supported_bands) {
3649 		case WMI_HOST_WLAN_2G_5G_CAP:
3650 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3651 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3652 			break;
3653 		case WMI_HOST_WLAN_2G_CAP:
3654 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3655 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3656 			break;
3657 		case WMI_HOST_WLAN_5G_CAP:
3658 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3659 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3660 			break;
3661 		default:
3662 			break;
3663 		}
3664 	}
3665 }
3666 
3667 static void
ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params * wmi_cfg,struct ath12k_wmi_resource_config_arg * tg_cfg)3668 ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
3669 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3670 {
3671 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3672 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3673 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3674 	wmi_cfg->num_offload_reorder_buffs =
3675 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3676 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3677 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3678 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3679 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3680 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3681 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3682 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3683 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3684 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3685 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3686 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3687 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3688 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3689 	wmi_cfg->roam_offload_max_ap_profiles =
3690 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3691 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3692 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3693 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3694 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3695 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3696 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3697 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3698 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3699 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3700 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3701 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3702 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3703 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3704 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3705 	wmi_cfg->num_tdls_conn_table_entries =
3706 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3707 	wmi_cfg->beacon_tx_offload_max_vdev =
3708 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3709 	wmi_cfg->num_multicast_filter_entries =
3710 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3711 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3712 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3713 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3714 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3715 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3716 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3717 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3718 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3719 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3720 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3721 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3722 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3723 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3724 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3725 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3726 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
3727 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3728 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3729 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3730 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3731 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3732 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3733 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3734 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3735 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3736 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3737 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3738 }
3739 
ath12k_init_cmd_send(struct ath12k_wmi_pdev * wmi,struct ath12k_wmi_init_cmd_arg * arg)3740 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3741 				struct ath12k_wmi_init_cmd_arg *arg)
3742 {
3743 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3744 	struct sk_buff *skb;
3745 	struct wmi_init_cmd *cmd;
3746 	struct ath12k_wmi_resource_config_params *cfg;
3747 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3748 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3749 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3750 	struct wmi_tlv *tlv;
3751 	size_t ret, len;
3752 	void *ptr;
3753 	u32 hw_mode_len = 0;
3754 	u16 idx;
3755 
3756 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3757 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3758 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3759 
3760 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3761 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3762 
3763 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3764 	if (!skb)
3765 		return -ENOMEM;
3766 
3767 	cmd = (struct wmi_init_cmd *)skb->data;
3768 
3769 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3770 						 sizeof(*cmd));
3771 
3772 	ptr = skb->data + sizeof(*cmd);
3773 	cfg = ptr;
3774 
3775 	ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
3776 
3777 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3778 						 sizeof(*cfg));
3779 
3780 	ptr += sizeof(*cfg);
3781 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3782 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3783 
3784 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3785 		host_mem_chunks[idx].tlv_header =
3786 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3787 					   len);
3788 
3789 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3790 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3791 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3792 
3793 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3794 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3795 			   arg->mem_chunks[idx].req_id,
3796 			   (u64)arg->mem_chunks[idx].paddr,
3797 			   arg->mem_chunks[idx].len);
3798 	}
3799 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3800 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3801 
3802 	/* num_mem_chunks is zero */
3803 	tlv = ptr;
3804 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3805 	ptr += TLV_HDR_SIZE + len;
3806 
3807 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3808 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3809 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3810 							     sizeof(*hw_mode));
3811 
3812 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3813 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3814 
3815 		ptr += sizeof(*hw_mode);
3816 
3817 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3818 		tlv = ptr;
3819 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3820 
3821 		ptr += TLV_HDR_SIZE;
3822 		len = sizeof(*band_to_mac);
3823 
3824 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3825 			band_to_mac = (void *)ptr;
3826 
3827 			band_to_mac->tlv_header =
3828 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3829 						       len);
3830 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3831 			band_to_mac->start_freq =
3832 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3833 			band_to_mac->end_freq =
3834 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
3835 			ptr += sizeof(*band_to_mac);
3836 		}
3837 	}
3838 
3839 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3840 	if (ret) {
3841 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3842 		dev_kfree_skb(skb);
3843 	}
3844 
3845 	return ret;
3846 }
3847 
ath12k_wmi_pdev_lro_cfg(struct ath12k * ar,int pdev_id)3848 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3849 			    int pdev_id)
3850 {
3851 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3852 	struct sk_buff *skb;
3853 	int ret;
3854 
3855 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3856 	if (!skb)
3857 		return -ENOMEM;
3858 
3859 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3860 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3861 						 sizeof(*cmd));
3862 
3863 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3864 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3865 
3866 	cmd->pdev_id = cpu_to_le32(pdev_id);
3867 
3868 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3869 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
3870 
3871 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
3872 	if (ret) {
3873 		ath12k_warn(ar->ab,
3874 			    "failed to send lro cfg req wmi cmd\n");
3875 		goto err;
3876 	}
3877 
3878 	return 0;
3879 err:
3880 	dev_kfree_skb(skb);
3881 	return ret;
3882 }
3883 
ath12k_wmi_wait_for_service_ready(struct ath12k_base * ab)3884 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
3885 {
3886 	unsigned long time_left;
3887 
3888 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
3889 						WMI_SERVICE_READY_TIMEOUT_HZ);
3890 	if (!time_left)
3891 		return -ETIMEDOUT;
3892 
3893 	return 0;
3894 }
3895 
ath12k_wmi_wait_for_unified_ready(struct ath12k_base * ab)3896 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
3897 {
3898 	unsigned long time_left;
3899 
3900 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
3901 						WMI_SERVICE_READY_TIMEOUT_HZ);
3902 	if (!time_left)
3903 		return -ETIMEDOUT;
3904 
3905 	return 0;
3906 }
3907 
ath12k_wmi_set_hw_mode(struct ath12k_base * ab,enum wmi_host_hw_mode_config_type mode)3908 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
3909 			   enum wmi_host_hw_mode_config_type mode)
3910 {
3911 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
3912 	struct sk_buff *skb;
3913 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3914 	int len;
3915 	int ret;
3916 
3917 	len = sizeof(*cmd);
3918 
3919 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3920 	if (!skb)
3921 		return -ENOMEM;
3922 
3923 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
3924 
3925 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3926 						 sizeof(*cmd));
3927 
3928 	cmd->pdev_id = WMI_PDEV_ID_SOC;
3929 	cmd->hw_mode_index = cpu_to_le32(mode);
3930 
3931 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
3932 	if (ret) {
3933 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3934 		dev_kfree_skb(skb);
3935 	}
3936 
3937 	return ret;
3938 }
3939 
ath12k_wmi_cmd_init(struct ath12k_base * ab)3940 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
3941 {
3942 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3943 	struct ath12k_wmi_init_cmd_arg arg = {};
3944 
3945 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
3946 		     ab->wmi_ab.svc_map))
3947 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
3948 
3949 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
3950 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
3951 
3952 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
3953 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
3954 	arg.mem_chunks = wmi_ab->mem_chunks;
3955 
3956 	if (ab->hw_params->single_pdev_only)
3957 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
3958 
3959 	arg.num_band_to_mac = ab->num_radios;
3960 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
3961 
3962 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
3963 
3964 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
3965 }
3966 
ath12k_wmi_vdev_spectral_conf(struct ath12k * ar,struct ath12k_wmi_vdev_spectral_conf_arg * arg)3967 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
3968 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
3969 {
3970 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
3971 	struct sk_buff *skb;
3972 	int ret;
3973 
3974 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3975 	if (!skb)
3976 		return -ENOMEM;
3977 
3978 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
3979 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
3980 						 sizeof(*cmd));
3981 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3982 	cmd->scan_count = cpu_to_le32(arg->scan_count);
3983 	cmd->scan_period = cpu_to_le32(arg->scan_period);
3984 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
3985 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
3986 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
3987 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
3988 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
3989 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
3990 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
3991 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
3992 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
3993 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
3994 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
3995 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
3996 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
3997 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
3998 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
3999 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
4000 
4001 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4002 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
4003 		   arg->vdev_id);
4004 
4005 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4006 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4007 	if (ret) {
4008 		ath12k_warn(ar->ab,
4009 			    "failed to send spectral scan config wmi cmd\n");
4010 		goto err;
4011 	}
4012 
4013 	return 0;
4014 err:
4015 	dev_kfree_skb(skb);
4016 	return ret;
4017 }
4018 
ath12k_wmi_vdev_spectral_enable(struct ath12k * ar,u32 vdev_id,u32 trigger,u32 enable)4019 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
4020 				    u32 trigger, u32 enable)
4021 {
4022 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
4023 	struct sk_buff *skb;
4024 	int ret;
4025 
4026 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4027 	if (!skb)
4028 		return -ENOMEM;
4029 
4030 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
4031 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
4032 						 sizeof(*cmd));
4033 
4034 	cmd->vdev_id = cpu_to_le32(vdev_id);
4035 	cmd->trigger_cmd = cpu_to_le32(trigger);
4036 	cmd->enable_cmd = cpu_to_le32(enable);
4037 
4038 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4039 		   "WMI spectral enable cmd vdev id 0x%x\n",
4040 		   vdev_id);
4041 
4042 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4043 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4044 	if (ret) {
4045 		ath12k_warn(ar->ab,
4046 			    "failed to send spectral enable wmi cmd\n");
4047 		goto err;
4048 	}
4049 
4050 	return 0;
4051 err:
4052 	dev_kfree_skb(skb);
4053 	return ret;
4054 }
4055 
ath12k_wmi_pdev_dma_ring_cfg(struct ath12k * ar,struct ath12k_wmi_pdev_dma_ring_cfg_arg * arg)4056 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
4057 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
4058 {
4059 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4060 	struct sk_buff *skb;
4061 	int ret;
4062 
4063 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4064 	if (!skb)
4065 		return -ENOMEM;
4066 
4067 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4068 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4069 						 sizeof(*cmd));
4070 
4071 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4072 	cmd->module_id = cpu_to_le32(arg->module_id);
4073 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4074 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4075 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4076 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4077 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4078 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4079 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4080 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4081 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4082 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4083 
4084 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4085 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4086 		   arg->pdev_id);
4087 
4088 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4089 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4090 	if (ret) {
4091 		ath12k_warn(ar->ab,
4092 			    "failed to send dma ring cfg req wmi cmd\n");
4093 		goto err;
4094 	}
4095 
4096 	return 0;
4097 err:
4098 	dev_kfree_skb(skb);
4099 	return ret;
4100 }
4101 
ath12k_wmi_dma_buf_entry_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4102 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4103 					  u16 tag, u16 len,
4104 					  const void *ptr, void *data)
4105 {
4106 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4107 
4108 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4109 		return -EPROTO;
4110 
4111 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4112 		return -ENOBUFS;
4113 
4114 	arg->num_buf_entry++;
4115 	return 0;
4116 }
4117 
ath12k_wmi_dma_buf_meta_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4118 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4119 					 u16 tag, u16 len,
4120 					 const void *ptr, void *data)
4121 {
4122 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4123 
4124 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4125 		return -EPROTO;
4126 
4127 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4128 		return -ENOBUFS;
4129 
4130 	arg->num_meta++;
4131 
4132 	return 0;
4133 }
4134 
ath12k_wmi_dma_buf_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4135 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4136 				    u16 tag, u16 len,
4137 				    const void *ptr, void *data)
4138 {
4139 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4140 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4141 	u32 pdev_id;
4142 	int ret;
4143 
4144 	switch (tag) {
4145 	case WMI_TAG_DMA_BUF_RELEASE:
4146 		fixed = ptr;
4147 		arg->fixed = *fixed;
4148 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4149 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4150 		break;
4151 	case WMI_TAG_ARRAY_STRUCT:
4152 		if (!arg->buf_entry_done) {
4153 			arg->num_buf_entry = 0;
4154 			arg->buf_entry = ptr;
4155 
4156 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4157 						  ath12k_wmi_dma_buf_entry_parse,
4158 						  arg);
4159 			if (ret) {
4160 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4161 					    ret);
4162 				return ret;
4163 			}
4164 
4165 			arg->buf_entry_done = true;
4166 		} else if (!arg->meta_data_done) {
4167 			arg->num_meta = 0;
4168 			arg->meta_data = ptr;
4169 
4170 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4171 						  ath12k_wmi_dma_buf_meta_parse,
4172 						  arg);
4173 			if (ret) {
4174 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4175 					    ret);
4176 				return ret;
4177 			}
4178 
4179 			arg->meta_data_done = true;
4180 		}
4181 		break;
4182 	default:
4183 		break;
4184 	}
4185 	return 0;
4186 }
4187 
ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base * ab,struct sk_buff * skb)4188 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4189 						       struct sk_buff *skb)
4190 {
4191 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4192 	struct ath12k_dbring_buf_release_event param;
4193 	int ret;
4194 
4195 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4196 				  ath12k_wmi_dma_buf_parse,
4197 				  &arg);
4198 	if (ret) {
4199 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4200 		return;
4201 	}
4202 
4203 	param.fixed = arg.fixed;
4204 	param.buf_entry = arg.buf_entry;
4205 	param.num_buf_entry = arg.num_buf_entry;
4206 	param.meta_data = arg.meta_data;
4207 	param.num_meta = arg.num_meta;
4208 
4209 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4210 	if (ret) {
4211 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4212 		return;
4213 	}
4214 }
4215 
ath12k_wmi_hw_mode_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4216 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4217 					 u16 tag, u16 len,
4218 					 const void *ptr, void *data)
4219 {
4220 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4221 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4222 	u32 phy_map = 0;
4223 
4224 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4225 		return -EPROTO;
4226 
4227 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4228 		return -ENOBUFS;
4229 
4230 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4231 				   hw_mode_id);
4232 	svc_rdy_ext->n_hw_mode_caps++;
4233 
4234 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4235 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4236 
4237 	return 0;
4238 }
4239 
ath12k_wmi_hw_mode_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4240 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4241 				   u16 len, const void *ptr, void *data)
4242 {
4243 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4244 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4245 	enum wmi_host_hw_mode_config_type mode, pref;
4246 	u32 i;
4247 	int ret;
4248 
4249 	svc_rdy_ext->n_hw_mode_caps = 0;
4250 	svc_rdy_ext->hw_mode_caps = ptr;
4251 
4252 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4253 				  ath12k_wmi_hw_mode_caps_parse,
4254 				  svc_rdy_ext);
4255 	if (ret) {
4256 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4257 		return ret;
4258 	}
4259 
4260 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4261 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4262 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4263 
4264 		if (mode >= WMI_HOST_HW_MODE_MAX)
4265 			continue;
4266 
4267 		pref = soc->wmi_ab.preferred_hw_mode;
4268 
4269 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4270 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4271 			soc->wmi_ab.preferred_hw_mode = mode;
4272 		}
4273 	}
4274 
4275 	ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
4276 		   soc->wmi_ab.preferred_hw_mode);
4277 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4278 		return -EINVAL;
4279 
4280 	return 0;
4281 }
4282 
ath12k_wmi_mac_phy_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4283 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4284 					 u16 tag, u16 len,
4285 					 const void *ptr, void *data)
4286 {
4287 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4288 
4289 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4290 		return -EPROTO;
4291 
4292 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4293 		return -ENOBUFS;
4294 
4295 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4296 	if (!svc_rdy_ext->n_mac_phy_caps) {
4297 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4298 						    GFP_ATOMIC);
4299 		if (!svc_rdy_ext->mac_phy_caps)
4300 			return -ENOMEM;
4301 	}
4302 
4303 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4304 	svc_rdy_ext->n_mac_phy_caps++;
4305 	return 0;
4306 }
4307 
ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4308 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4309 					     u16 tag, u16 len,
4310 					     const void *ptr, void *data)
4311 {
4312 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4313 
4314 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4315 		return -EPROTO;
4316 
4317 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4318 		return -ENOBUFS;
4319 
4320 	svc_rdy_ext->n_ext_hal_reg_caps++;
4321 	return 0;
4322 }
4323 
ath12k_wmi_ext_hal_reg_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4324 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4325 				       u16 len, const void *ptr, void *data)
4326 {
4327 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4328 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4329 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4330 	int ret;
4331 	u32 i;
4332 
4333 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4334 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4335 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4336 				  ath12k_wmi_ext_hal_reg_caps_parse,
4337 				  svc_rdy_ext);
4338 	if (ret) {
4339 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4340 		return ret;
4341 	}
4342 
4343 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4344 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4345 						      svc_rdy_ext->soc_hal_reg_caps,
4346 						      svc_rdy_ext->ext_hal_reg_caps, i,
4347 						      &reg_cap);
4348 		if (ret) {
4349 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4350 			return ret;
4351 		}
4352 
4353 		if (reg_cap.phy_id >= MAX_RADIOS) {
4354 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4355 			return -EINVAL;
4356 		}
4357 
4358 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4359 	}
4360 	return 0;
4361 }
4362 
ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4363 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4364 						 u16 len, const void *ptr,
4365 						 void *data)
4366 {
4367 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4368 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4369 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4370 	u32 phy_id_map;
4371 	int pdev_index = 0;
4372 	int ret;
4373 
4374 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4375 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4376 
4377 	soc->num_radios = 0;
4378 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4379 	soc->fw_pdev_count = 0;
4380 
4381 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4382 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4383 							    svc_rdy_ext,
4384 							    hw_mode_id, soc->num_radios,
4385 							    &soc->pdevs[pdev_index]);
4386 		if (ret) {
4387 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4388 				    soc->num_radios);
4389 			return ret;
4390 		}
4391 
4392 		soc->num_radios++;
4393 
4394 		/* For single_pdev_only targets,
4395 		 * save mac_phy capability in the same pdev
4396 		 */
4397 		if (soc->hw_params->single_pdev_only)
4398 			pdev_index = 0;
4399 		else
4400 			pdev_index = soc->num_radios;
4401 
4402 		/* TODO: mac_phy_cap prints */
4403 		phy_id_map >>= 1;
4404 	}
4405 
4406 	if (soc->hw_params->single_pdev_only) {
4407 		soc->num_radios = 1;
4408 		soc->pdevs[0].pdev_id = 0;
4409 	}
4410 
4411 	return 0;
4412 }
4413 
ath12k_wmi_dma_ring_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4414 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4415 					  u16 tag, u16 len,
4416 					  const void *ptr, void *data)
4417 {
4418 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4419 
4420 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4421 		return -EPROTO;
4422 
4423 	parse->n_dma_ring_caps++;
4424 	return 0;
4425 }
4426 
ath12k_wmi_alloc_dbring_caps(struct ath12k_base * ab,u32 num_cap)4427 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4428 					u32 num_cap)
4429 {
4430 	size_t sz;
4431 	void *ptr;
4432 
4433 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4434 	ptr = kzalloc(sz, GFP_ATOMIC);
4435 	if (!ptr)
4436 		return -ENOMEM;
4437 
4438 	ab->db_caps = ptr;
4439 	ab->num_db_cap = num_cap;
4440 
4441 	return 0;
4442 }
4443 
ath12k_wmi_free_dbring_caps(struct ath12k_base * ab)4444 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4445 {
4446 	kfree(ab->db_caps);
4447 	ab->db_caps = NULL;
4448 	ab->num_db_cap = 0;
4449 }
4450 
ath12k_wmi_dma_ring_caps(struct ath12k_base * ab,u16 len,const void * ptr,void * data)4451 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4452 				    u16 len, const void *ptr, void *data)
4453 {
4454 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4455 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4456 	struct ath12k_dbring_cap *dir_buff_caps;
4457 	int ret;
4458 	u32 i;
4459 
4460 	dma_caps_parse->n_dma_ring_caps = 0;
4461 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4462 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4463 				  ath12k_wmi_dma_ring_caps_parse,
4464 				  dma_caps_parse);
4465 	if (ret) {
4466 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4467 		return ret;
4468 	}
4469 
4470 	if (!dma_caps_parse->n_dma_ring_caps)
4471 		return 0;
4472 
4473 	if (ab->num_db_cap) {
4474 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4475 		return 0;
4476 	}
4477 
4478 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4479 	if (ret)
4480 		return ret;
4481 
4482 	dir_buff_caps = ab->db_caps;
4483 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4484 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4485 			ath12k_warn(ab, "Invalid module id %d\n",
4486 				    le32_to_cpu(dma_caps[i].module_id));
4487 			ret = -EINVAL;
4488 			goto free_dir_buff;
4489 		}
4490 
4491 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4492 		dir_buff_caps[i].pdev_id =
4493 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4494 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4495 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4496 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4497 	}
4498 
4499 	return 0;
4500 
4501 free_dir_buff:
4502 	ath12k_wmi_free_dbring_caps(ab);
4503 	return ret;
4504 }
4505 
ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4506 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4507 					u16 tag, u16 len,
4508 					const void *ptr, void *data)
4509 {
4510 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4511 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4512 	int ret;
4513 
4514 	switch (tag) {
4515 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4516 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4517 						&svc_rdy_ext->arg);
4518 		if (ret) {
4519 			ath12k_warn(ab, "unable to extract ext params\n");
4520 			return ret;
4521 		}
4522 		break;
4523 
4524 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4525 		svc_rdy_ext->hw_caps = ptr;
4526 		svc_rdy_ext->arg.num_hw_modes =
4527 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4528 		break;
4529 
4530 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4531 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4532 							    svc_rdy_ext);
4533 		if (ret)
4534 			return ret;
4535 		break;
4536 
4537 	case WMI_TAG_ARRAY_STRUCT:
4538 		if (!svc_rdy_ext->hw_mode_done) {
4539 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4540 			if (ret)
4541 				return ret;
4542 
4543 			svc_rdy_ext->hw_mode_done = true;
4544 		} else if (!svc_rdy_ext->mac_phy_done) {
4545 			svc_rdy_ext->n_mac_phy_caps = 0;
4546 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4547 						  ath12k_wmi_mac_phy_caps_parse,
4548 						  svc_rdy_ext);
4549 			if (ret) {
4550 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4551 				return ret;
4552 			}
4553 
4554 			svc_rdy_ext->mac_phy_done = true;
4555 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4556 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4557 			if (ret)
4558 				return ret;
4559 
4560 			svc_rdy_ext->ext_hal_reg_done = true;
4561 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4562 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4563 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4564 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4565 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4566 			svc_rdy_ext->oem_dma_ring_cap_done = true;
4567 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4568 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4569 						       &svc_rdy_ext->dma_caps_parse);
4570 			if (ret)
4571 				return ret;
4572 
4573 			svc_rdy_ext->dma_ring_cap_done = true;
4574 		}
4575 		break;
4576 
4577 	default:
4578 		break;
4579 	}
4580 	return 0;
4581 }
4582 
ath12k_service_ready_ext_event(struct ath12k_base * ab,struct sk_buff * skb)4583 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4584 					  struct sk_buff *skb)
4585 {
4586 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4587 	int ret;
4588 
4589 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4590 				  ath12k_wmi_svc_rdy_ext_parse,
4591 				  &svc_rdy_ext);
4592 	if (ret) {
4593 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4594 		goto err;
4595 	}
4596 
4597 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4598 		complete(&ab->wmi_ab.service_ready);
4599 
4600 	kfree(svc_rdy_ext.mac_phy_caps);
4601 	return 0;
4602 
4603 err:
4604 	ath12k_wmi_free_dbring_caps(ab);
4605 	return ret;
4606 }
4607 
ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_svc_rdy_ext2_arg * arg)4608 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4609 				      const void *ptr,
4610 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4611 {
4612 	const struct wmi_service_ready_ext2_event *ev = ptr;
4613 
4614 	if (!ev)
4615 		return -EINVAL;
4616 
4617 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4618 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4619 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4620 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4621 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4622 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4623 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4624 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4625 	return 0;
4626 }
4627 
ath12k_wmi_eht_caps_parse(struct ath12k_pdev * pdev,u32 band,const __le32 cap_mac_info[],const __le32 cap_phy_info[],const __le32 supp_mcs[],const struct ath12k_wmi_ppe_threshold_params * ppet,__le32 cap_info_internal)4628 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4629 				      const __le32 cap_mac_info[],
4630 				      const __le32 cap_phy_info[],
4631 				      const __le32 supp_mcs[],
4632 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4633 				       __le32 cap_info_internal)
4634 {
4635 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4636 	u32 support_320mhz;
4637 	u8 i;
4638 
4639 	if (band == NL80211_BAND_6GHZ)
4640 		support_320mhz = cap_band->eht_cap_phy_info[0] &
4641 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4642 
4643 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4644 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4645 
4646 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4647 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4648 
4649 	if (band == NL80211_BAND_6GHZ)
4650 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4651 
4652 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4653 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4654 	if (band != NL80211_BAND_2GHZ) {
4655 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4656 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4657 	}
4658 
4659 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4660 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4661 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4662 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4663 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4664 
4665 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4666 }
4667 
4668 static int
ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base * ab,const struct ath12k_wmi_caps_ext_params * caps,struct ath12k_pdev * pdev)4669 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4670 				      const struct ath12k_wmi_caps_ext_params *caps,
4671 				      struct ath12k_pdev *pdev)
4672 {
4673 	struct ath12k_band_cap *cap_band;
4674 	u32 bands, support_320mhz;
4675 	int i;
4676 
4677 	if (ab->hw_params->single_pdev_only) {
4678 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4679 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4680 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4681 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4682 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4683 			return 0;
4684 		}
4685 
4686 		for (i = 0; i < ab->fw_pdev_count; i++) {
4687 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4688 
4689 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4690 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4691 				bands = fw_pdev->supported_bands;
4692 				break;
4693 			}
4694 		}
4695 
4696 		if (i == ab->fw_pdev_count)
4697 			return -EINVAL;
4698 	} else {
4699 		bands = pdev->cap.supported_bands;
4700 	}
4701 
4702 	if (bands & WMI_HOST_WLAN_2G_CAP) {
4703 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4704 					  caps->eht_cap_mac_info_2ghz,
4705 					  caps->eht_cap_phy_info_2ghz,
4706 					  caps->eht_supp_mcs_ext_2ghz,
4707 					  &caps->eht_ppet_2ghz,
4708 					  caps->eht_cap_info_internal);
4709 	}
4710 
4711 	if (bands & WMI_HOST_WLAN_5G_CAP) {
4712 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4713 					  caps->eht_cap_mac_info_5ghz,
4714 					  caps->eht_cap_phy_info_5ghz,
4715 					  caps->eht_supp_mcs_ext_5ghz,
4716 					  &caps->eht_ppet_5ghz,
4717 					  caps->eht_cap_info_internal);
4718 
4719 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4720 					  caps->eht_cap_mac_info_5ghz,
4721 					  caps->eht_cap_phy_info_5ghz,
4722 					  caps->eht_supp_mcs_ext_5ghz,
4723 					  &caps->eht_ppet_5ghz,
4724 					  caps->eht_cap_info_internal);
4725 	}
4726 
4727 	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
4728 	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
4729 
4730 	return 0;
4731 }
4732 
ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4733 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4734 					   u16 len, const void *ptr,
4735 					   void *data)
4736 {
4737 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4738 	int i = 0, ret;
4739 
4740 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4741 		return -EPROTO;
4742 
4743 	if (ab->hw_params->single_pdev_only) {
4744 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4745 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4746 			return 0;
4747 	} else {
4748 		for (i = 0; i < ab->num_radios; i++) {
4749 			if (ab->pdevs[i].pdev_id ==
4750 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4751 				break;
4752 		}
4753 
4754 		if (i == ab->num_radios)
4755 			return -EINVAL;
4756 	}
4757 
4758 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4759 	if (ret) {
4760 		ath12k_warn(ab,
4761 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4762 			    ret, ab->pdevs[i].pdev_id);
4763 		return ret;
4764 	}
4765 
4766 	return 0;
4767 }
4768 
ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4769 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4770 					 u16 tag, u16 len,
4771 					 const void *ptr, void *data)
4772 {
4773 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4774 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4775 	int ret;
4776 
4777 	switch (tag) {
4778 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4779 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4780 						 &parse->arg);
4781 		if (ret) {
4782 			ath12k_warn(ab,
4783 				    "failed to extract wmi service ready ext2 parameters: %d\n",
4784 				    ret);
4785 			return ret;
4786 		}
4787 		break;
4788 
4789 	case WMI_TAG_ARRAY_STRUCT:
4790 		if (!parse->dma_ring_cap_done) {
4791 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4792 						       &parse->dma_caps_parse);
4793 			if (ret)
4794 				return ret;
4795 
4796 			parse->dma_ring_cap_done = true;
4797 		} else if (!parse->spectral_bin_scaling_done) {
4798 			/* TODO: This is a place-holder as WMI tag for
4799 			 * spectral scaling is before
4800 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4801 			 */
4802 			parse->spectral_bin_scaling_done = true;
4803 		} else if (!parse->mac_phy_caps_ext_done) {
4804 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4805 						  ath12k_wmi_tlv_mac_phy_caps_ext,
4806 						  parse);
4807 			if (ret) {
4808 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4809 					    ret);
4810 				return ret;
4811 			}
4812 
4813 			parse->mac_phy_caps_ext_done = true;
4814 		}
4815 		break;
4816 	default:
4817 		break;
4818 	}
4819 
4820 	return 0;
4821 }
4822 
ath12k_service_ready_ext2_event(struct ath12k_base * ab,struct sk_buff * skb)4823 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4824 					   struct sk_buff *skb)
4825 {
4826 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4827 	int ret;
4828 
4829 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4830 				  ath12k_wmi_svc_rdy_ext2_parse,
4831 				  &svc_rdy_ext2);
4832 	if (ret) {
4833 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4834 		goto err;
4835 	}
4836 
4837 	complete(&ab->wmi_ab.service_ready);
4838 
4839 	return 0;
4840 
4841 err:
4842 	ath12k_wmi_free_dbring_caps(ab);
4843 	return ret;
4844 }
4845 
ath12k_pull_vdev_start_resp_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_start_resp_event * vdev_rsp)4846 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4847 					   struct wmi_vdev_start_resp_event *vdev_rsp)
4848 {
4849 	const void **tb;
4850 	const struct wmi_vdev_start_resp_event *ev;
4851 	int ret;
4852 
4853 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4854 	if (IS_ERR(tb)) {
4855 		ret = PTR_ERR(tb);
4856 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4857 		return ret;
4858 	}
4859 
4860 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4861 	if (!ev) {
4862 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
4863 		kfree(tb);
4864 		return -EPROTO;
4865 	}
4866 
4867 	*vdev_rsp = *ev;
4868 
4869 	kfree(tb);
4870 	return 0;
4871 }
4872 
4873 static struct ath12k_reg_rule
create_ext_reg_rules_from_wmi(u32 num_reg_rules,struct ath12k_wmi_reg_rule_ext_params * wmi_reg_rule)4874 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
4875 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
4876 {
4877 	struct ath12k_reg_rule *reg_rule_ptr;
4878 	u32 count;
4879 
4880 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
4881 			       GFP_ATOMIC);
4882 
4883 	if (!reg_rule_ptr)
4884 		return NULL;
4885 
4886 	for (count = 0; count < num_reg_rules; count++) {
4887 		reg_rule_ptr[count].start_freq =
4888 			le32_get_bits(wmi_reg_rule[count].freq_info,
4889 				      REG_RULE_START_FREQ);
4890 		reg_rule_ptr[count].end_freq =
4891 			le32_get_bits(wmi_reg_rule[count].freq_info,
4892 				      REG_RULE_END_FREQ);
4893 		reg_rule_ptr[count].max_bw =
4894 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4895 				      REG_RULE_MAX_BW);
4896 		reg_rule_ptr[count].reg_power =
4897 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4898 				      REG_RULE_REG_PWR);
4899 		reg_rule_ptr[count].ant_gain =
4900 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4901 				      REG_RULE_ANT_GAIN);
4902 		reg_rule_ptr[count].flags =
4903 			le32_get_bits(wmi_reg_rule[count].flag_info,
4904 				      REG_RULE_FLAGS);
4905 		reg_rule_ptr[count].psd_flag =
4906 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4907 				      REG_RULE_PSD_INFO);
4908 		reg_rule_ptr[count].psd_eirp =
4909 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4910 				      REG_RULE_PSD_EIRP);
4911 	}
4912 
4913 	return reg_rule_ptr;
4914 }
4915 
ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params * rule,u32 num_reg_rules)4916 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
4917 					    u32 num_reg_rules)
4918 {
4919 	u8 num_invalid_5ghz_rules = 0;
4920 	u32 count, start_freq;
4921 
4922 	for (count = 0; count < num_reg_rules; count++) {
4923 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
4924 
4925 		if (start_freq >= ATH12K_MIN_6G_FREQ)
4926 			num_invalid_5ghz_rules++;
4927 	}
4928 
4929 	return num_invalid_5ghz_rules;
4930 }
4931 
ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_reg_info * reg_info)4932 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
4933 						   struct sk_buff *skb,
4934 						   struct ath12k_reg_info *reg_info)
4935 {
4936 	const void **tb;
4937 	const struct wmi_reg_chan_list_cc_ext_event *ev;
4938 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
4939 	u32 num_2g_reg_rules, num_5g_reg_rules;
4940 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
4941 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
4942 	u8 num_invalid_5ghz_ext_rules;
4943 	u32 total_reg_rules = 0;
4944 	int ret, i, j;
4945 
4946 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
4947 
4948 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4949 	if (IS_ERR(tb)) {
4950 		ret = PTR_ERR(tb);
4951 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4952 		return ret;
4953 	}
4954 
4955 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
4956 	if (!ev) {
4957 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
4958 		kfree(tb);
4959 		return -EPROTO;
4960 	}
4961 
4962 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
4963 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
4964 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
4965 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
4966 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
4967 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
4968 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
4969 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
4970 
4971 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4972 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4973 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
4974 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4975 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
4976 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4977 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
4978 	}
4979 
4980 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
4981 	total_reg_rules += num_2g_reg_rules;
4982 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
4983 	total_reg_rules += num_5g_reg_rules;
4984 
4985 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
4986 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
4987 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
4988 		kfree(tb);
4989 		return -EINVAL;
4990 	}
4991 
4992 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4993 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
4994 
4995 		if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
4996 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
4997 				    i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
4998 			kfree(tb);
4999 			return -EINVAL;
5000 		}
5001 
5002 		total_reg_rules += num_6g_reg_rules_ap[i];
5003 	}
5004 
5005 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5006 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5007 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5008 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
5009 
5010 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5011 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5012 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
5013 
5014 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5015 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5016 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
5017 
5018 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
5019 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
5020 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6G_REG_RULES) {
5021 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
5022 				    i);
5023 			kfree(tb);
5024 			return -EINVAL;
5025 		}
5026 	}
5027 
5028 	if (!total_reg_rules) {
5029 		ath12k_warn(ab, "No reg rules available\n");
5030 		kfree(tb);
5031 		return -EINVAL;
5032 	}
5033 
5034 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
5035 
5036 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
5037 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
5038 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
5039 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
5040 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
5041 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
5042 
5043 	switch (le32_to_cpu(ev->status_code)) {
5044 	case WMI_REG_SET_CC_STATUS_PASS:
5045 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
5046 		break;
5047 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
5048 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
5049 		break;
5050 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
5051 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
5052 		break;
5053 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
5054 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
5055 		break;
5056 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
5057 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
5058 		break;
5059 	case WMI_REG_SET_CC_STATUS_FAIL:
5060 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
5061 		break;
5062 	}
5063 
5064 	reg_info->is_ext_reg_event = true;
5065 
5066 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
5067 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
5068 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
5069 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
5070 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
5071 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
5072 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
5073 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
5074 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
5075 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
5076 
5077 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5078 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5079 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
5080 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
5081 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
5082 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5083 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
5084 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
5085 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
5086 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
5087 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
5088 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
5089 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
5090 	}
5091 
5092 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5093 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
5094 		   __func__, reg_info->alpha2, reg_info->dfs_region,
5095 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
5096 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
5097 		   reg_info->phybitmap);
5098 
5099 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5100 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
5101 		   num_2g_reg_rules, num_5g_reg_rules);
5102 
5103 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5104 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
5105 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
5106 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
5107 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
5108 
5109 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5110 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5111 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
5112 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
5113 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
5114 
5115 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5116 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
5117 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
5118 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
5119 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
5120 
5121 	ext_wmi_reg_rule =
5122 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
5123 			+ sizeof(*ev)
5124 			+ sizeof(struct wmi_tlv));
5125 
5126 	if (num_2g_reg_rules) {
5127 		reg_info->reg_rules_2g_ptr =
5128 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
5129 						      ext_wmi_reg_rule);
5130 
5131 		if (!reg_info->reg_rules_2g_ptr) {
5132 			kfree(tb);
5133 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
5134 			return -ENOMEM;
5135 		}
5136 	}
5137 
5138 	ext_wmi_reg_rule += num_2g_reg_rules;
5139 
5140 	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
5141 	 * for few countries along with separate 6 GHz rule.
5142 	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
5143 	 * causes intersect check to be true, and same rules will be
5144 	 * shown multiple times in iw cmd.
5145 	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
5146 	 */
5147 	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
5148 								       num_5g_reg_rules);
5149 
5150 	if (num_invalid_5ghz_ext_rules) {
5151 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5152 			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
5153 			   reg_info->alpha2, reg_info->num_5g_reg_rules,
5154 			   num_invalid_5ghz_ext_rules);
5155 
5156 		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
5157 		reg_info->num_5g_reg_rules = num_5g_reg_rules;
5158 	}
5159 
5160 	if (num_5g_reg_rules) {
5161 		reg_info->reg_rules_5g_ptr =
5162 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
5163 						      ext_wmi_reg_rule);
5164 
5165 		if (!reg_info->reg_rules_5g_ptr) {
5166 			kfree(tb);
5167 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5168 			return -ENOMEM;
5169 		}
5170 	}
5171 
5172 	/* We have adjusted the number of 5 GHz reg rules above. But still those
5173 	 * many rules needs to be adjusted in ext_wmi_reg_rule.
5174 	 *
5175 	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
5176 	 */
5177 	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
5178 
5179 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
5180 		reg_info->reg_rules_6g_ap_ptr[i] =
5181 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
5182 						      ext_wmi_reg_rule);
5183 
5184 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
5185 			kfree(tb);
5186 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
5187 			return -ENOMEM;
5188 		}
5189 
5190 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
5191 	}
5192 
5193 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
5194 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5195 			reg_info->reg_rules_6g_client_ptr[j][i] =
5196 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
5197 							      ext_wmi_reg_rule);
5198 
5199 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
5200 				kfree(tb);
5201 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
5202 				return -ENOMEM;
5203 			}
5204 
5205 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
5206 		}
5207 	}
5208 
5209 	reg_info->client_type = le32_to_cpu(ev->client_type);
5210 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
5211 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
5212 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
5213 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
5214 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
5215 		le32_to_cpu(ev->domain_code_6g_ap_sp);
5216 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
5217 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
5218 
5219 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5220 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5221 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5222 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5223 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5224 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5225 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5226 	}
5227 
5228 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5229 
5230 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5231 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
5232 
5233 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5234 
5235 	kfree(tb);
5236 	return 0;
5237 }
5238 
ath12k_pull_peer_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_delete_resp_event * peer_del_resp)5239 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5240 					struct wmi_peer_delete_resp_event *peer_del_resp)
5241 {
5242 	const void **tb;
5243 	const struct wmi_peer_delete_resp_event *ev;
5244 	int ret;
5245 
5246 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5247 	if (IS_ERR(tb)) {
5248 		ret = PTR_ERR(tb);
5249 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5250 		return ret;
5251 	}
5252 
5253 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5254 	if (!ev) {
5255 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
5256 		kfree(tb);
5257 		return -EPROTO;
5258 	}
5259 
5260 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5261 
5262 	peer_del_resp->vdev_id = ev->vdev_id;
5263 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5264 			ev->peer_macaddr.addr);
5265 
5266 	kfree(tb);
5267 	return 0;
5268 }
5269 
ath12k_pull_vdev_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)5270 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5271 					struct sk_buff *skb,
5272 					u32 *vdev_id)
5273 {
5274 	const void **tb;
5275 	const struct wmi_vdev_delete_resp_event *ev;
5276 	int ret;
5277 
5278 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5279 	if (IS_ERR(tb)) {
5280 		ret = PTR_ERR(tb);
5281 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5282 		return ret;
5283 	}
5284 
5285 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5286 	if (!ev) {
5287 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5288 		kfree(tb);
5289 		return -EPROTO;
5290 	}
5291 
5292 	*vdev_id = le32_to_cpu(ev->vdev_id);
5293 
5294 	kfree(tb);
5295 	return 0;
5296 }
5297 
ath12k_pull_bcn_tx_status_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id,u32 * tx_status)5298 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5299 					struct sk_buff *skb,
5300 					u32 *vdev_id, u32 *tx_status)
5301 {
5302 	const void **tb;
5303 	const struct wmi_bcn_tx_status_event *ev;
5304 	int ret;
5305 
5306 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5307 	if (IS_ERR(tb)) {
5308 		ret = PTR_ERR(tb);
5309 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5310 		return ret;
5311 	}
5312 
5313 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
5314 	if (!ev) {
5315 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
5316 		kfree(tb);
5317 		return -EPROTO;
5318 	}
5319 
5320 	*vdev_id = le32_to_cpu(ev->vdev_id);
5321 	*tx_status = le32_to_cpu(ev->tx_status);
5322 
5323 	kfree(tb);
5324 	return 0;
5325 }
5326 
ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)5327 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5328 					      u32 *vdev_id)
5329 {
5330 	const void **tb;
5331 	const struct wmi_vdev_stopped_event *ev;
5332 	int ret;
5333 
5334 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5335 	if (IS_ERR(tb)) {
5336 		ret = PTR_ERR(tb);
5337 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5338 		return ret;
5339 	}
5340 
5341 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
5342 	if (!ev) {
5343 		ath12k_warn(ab, "failed to fetch vdev stop ev");
5344 		kfree(tb);
5345 		return -EPROTO;
5346 	}
5347 
5348 	*vdev_id = le32_to_cpu(ev->vdev_id);
5349 
5350 	kfree(tb);
5351 	return 0;
5352 }
5353 
ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5354 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
5355 					u16 tag, u16 len,
5356 					const void *ptr, void *data)
5357 {
5358 	struct wmi_tlv_mgmt_rx_parse *parse = data;
5359 
5360 	switch (tag) {
5361 	case WMI_TAG_MGMT_RX_HDR:
5362 		parse->fixed = ptr;
5363 		break;
5364 	case WMI_TAG_ARRAY_BYTE:
5365 		if (!parse->frame_buf_done) {
5366 			parse->frame_buf = ptr;
5367 			parse->frame_buf_done = true;
5368 		}
5369 		break;
5370 	}
5371 	return 0;
5372 }
5373 
ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_wmi_mgmt_rx_arg * hdr)5374 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
5375 					  struct sk_buff *skb,
5376 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
5377 {
5378 	struct wmi_tlv_mgmt_rx_parse parse = { };
5379 	const struct ath12k_wmi_mgmt_rx_params *ev;
5380 	const u8 *frame;
5381 	int i, ret;
5382 
5383 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5384 				  ath12k_wmi_tlv_mgmt_rx_parse,
5385 				  &parse);
5386 	if (ret) {
5387 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
5388 		return ret;
5389 	}
5390 
5391 	ev = parse.fixed;
5392 	frame = parse.frame_buf;
5393 
5394 	if (!ev || !frame) {
5395 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
5396 		return -EPROTO;
5397 	}
5398 
5399 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
5400 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
5401 	hdr->channel = le32_to_cpu(ev->channel);
5402 	hdr->snr = le32_to_cpu(ev->snr);
5403 	hdr->rate = le32_to_cpu(ev->rate);
5404 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
5405 	hdr->buf_len = le32_to_cpu(ev->buf_len);
5406 	hdr->status = le32_to_cpu(ev->status);
5407 	hdr->flags = le32_to_cpu(ev->flags);
5408 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
5409 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
5410 
5411 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
5412 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
5413 
5414 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
5415 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
5416 		return -EPROTO;
5417 	}
5418 
5419 	/* shift the sk_buff to point to `frame` */
5420 	skb_trim(skb, 0);
5421 	skb_put(skb, frame - skb->data);
5422 	skb_pull(skb, frame - skb->data);
5423 	skb_put(skb, hdr->buf_len);
5424 
5425 	return 0;
5426 }
5427 
wmi_process_mgmt_tx_comp(struct ath12k * ar,u32 desc_id,u32 status)5428 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
5429 				    u32 status)
5430 {
5431 	struct sk_buff *msdu;
5432 	struct ieee80211_tx_info *info;
5433 	struct ath12k_skb_cb *skb_cb;
5434 	int num_mgmt;
5435 
5436 	spin_lock_bh(&ar->txmgmt_idr_lock);
5437 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
5438 
5439 	if (!msdu) {
5440 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
5441 			    desc_id);
5442 		spin_unlock_bh(&ar->txmgmt_idr_lock);
5443 		return -ENOENT;
5444 	}
5445 
5446 	idr_remove(&ar->txmgmt_idr, desc_id);
5447 	spin_unlock_bh(&ar->txmgmt_idr_lock);
5448 
5449 	skb_cb = ATH12K_SKB_CB(msdu);
5450 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
5451 
5452 	info = IEEE80211_SKB_CB(msdu);
5453 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
5454 		info->flags |= IEEE80211_TX_STAT_ACK;
5455 
5456 	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
5457 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
5458 
5459 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
5460 
5461 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5462 
5463 	/* WARN when we received this event without doing any mgmt tx */
5464 	if (num_mgmt < 0)
5465 		WARN_ON_ONCE(1);
5466 
5467 	if (!num_mgmt)
5468 		wake_up(&ar->txmgmt_empty_waitq);
5469 
5470 	return 0;
5471 }
5472 
ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_mgmt_tx_compl_event * param)5473 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
5474 					       struct sk_buff *skb,
5475 					       struct wmi_mgmt_tx_compl_event *param)
5476 {
5477 	const void **tb;
5478 	const struct wmi_mgmt_tx_compl_event *ev;
5479 	int ret;
5480 
5481 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5482 	if (IS_ERR(tb)) {
5483 		ret = PTR_ERR(tb);
5484 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5485 		return ret;
5486 	}
5487 
5488 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
5489 	if (!ev) {
5490 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
5491 		kfree(tb);
5492 		return -EPROTO;
5493 	}
5494 
5495 	param->pdev_id = ev->pdev_id;
5496 	param->desc_id = ev->desc_id;
5497 	param->status = ev->status;
5498 
5499 	kfree(tb);
5500 	return 0;
5501 }
5502 
ath12k_wmi_event_scan_started(struct ath12k * ar)5503 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
5504 {
5505 	lockdep_assert_held(&ar->data_lock);
5506 
5507 	switch (ar->scan.state) {
5508 	case ATH12K_SCAN_IDLE:
5509 	case ATH12K_SCAN_RUNNING:
5510 	case ATH12K_SCAN_ABORTING:
5511 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5512 			    ath12k_scan_state_str(ar->scan.state),
5513 			    ar->scan.state);
5514 		break;
5515 	case ATH12K_SCAN_STARTING:
5516 		ar->scan.state = ATH12K_SCAN_RUNNING;
5517 
5518 		if (ar->scan.is_roc)
5519 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
5520 
5521 		complete(&ar->scan.started);
5522 		break;
5523 	}
5524 }
5525 
ath12k_wmi_event_scan_start_failed(struct ath12k * ar)5526 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5527 {
5528 	lockdep_assert_held(&ar->data_lock);
5529 
5530 	switch (ar->scan.state) {
5531 	case ATH12K_SCAN_IDLE:
5532 	case ATH12K_SCAN_RUNNING:
5533 	case ATH12K_SCAN_ABORTING:
5534 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5535 			    ath12k_scan_state_str(ar->scan.state),
5536 			    ar->scan.state);
5537 		break;
5538 	case ATH12K_SCAN_STARTING:
5539 		complete(&ar->scan.started);
5540 		__ath12k_mac_scan_finish(ar);
5541 		break;
5542 	}
5543 }
5544 
ath12k_wmi_event_scan_completed(struct ath12k * ar)5545 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5546 {
5547 	lockdep_assert_held(&ar->data_lock);
5548 
5549 	switch (ar->scan.state) {
5550 	case ATH12K_SCAN_IDLE:
5551 	case ATH12K_SCAN_STARTING:
5552 		/* One suspected reason scan can be completed while starting is
5553 		 * if firmware fails to deliver all scan events to the host,
5554 		 * e.g. when transport pipe is full. This has been observed
5555 		 * with spectral scan phyerr events starving wmi transport
5556 		 * pipe. In such case the "scan completed" event should be (and
5557 		 * is) ignored by the host as it may be just firmware's scan
5558 		 * state machine recovering.
5559 		 */
5560 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5561 			    ath12k_scan_state_str(ar->scan.state),
5562 			    ar->scan.state);
5563 		break;
5564 	case ATH12K_SCAN_RUNNING:
5565 	case ATH12K_SCAN_ABORTING:
5566 		__ath12k_mac_scan_finish(ar);
5567 		break;
5568 	}
5569 }
5570 
ath12k_wmi_event_scan_bss_chan(struct ath12k * ar)5571 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5572 {
5573 	lockdep_assert_held(&ar->data_lock);
5574 
5575 	switch (ar->scan.state) {
5576 	case ATH12K_SCAN_IDLE:
5577 	case ATH12K_SCAN_STARTING:
5578 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5579 			    ath12k_scan_state_str(ar->scan.state),
5580 			    ar->scan.state);
5581 		break;
5582 	case ATH12K_SCAN_RUNNING:
5583 	case ATH12K_SCAN_ABORTING:
5584 		ar->scan_channel = NULL;
5585 		break;
5586 	}
5587 }
5588 
ath12k_wmi_event_scan_foreign_chan(struct ath12k * ar,u32 freq)5589 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5590 {
5591 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5592 
5593 	lockdep_assert_held(&ar->data_lock);
5594 
5595 	switch (ar->scan.state) {
5596 	case ATH12K_SCAN_IDLE:
5597 	case ATH12K_SCAN_STARTING:
5598 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5599 			    ath12k_scan_state_str(ar->scan.state),
5600 			    ar->scan.state);
5601 		break;
5602 	case ATH12K_SCAN_RUNNING:
5603 	case ATH12K_SCAN_ABORTING:
5604 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
5605 
5606 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
5607 			complete(&ar->scan.on_channel);
5608 
5609 		break;
5610 	}
5611 }
5612 
5613 static const char *
ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)5614 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5615 			       enum wmi_scan_completion_reason reason)
5616 {
5617 	switch (type) {
5618 	case WMI_SCAN_EVENT_STARTED:
5619 		return "started";
5620 	case WMI_SCAN_EVENT_COMPLETED:
5621 		switch (reason) {
5622 		case WMI_SCAN_REASON_COMPLETED:
5623 			return "completed";
5624 		case WMI_SCAN_REASON_CANCELLED:
5625 			return "completed [cancelled]";
5626 		case WMI_SCAN_REASON_PREEMPTED:
5627 			return "completed [preempted]";
5628 		case WMI_SCAN_REASON_TIMEDOUT:
5629 			return "completed [timedout]";
5630 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
5631 			return "completed [internal err]";
5632 		case WMI_SCAN_REASON_MAX:
5633 			break;
5634 		}
5635 		return "completed [unknown]";
5636 	case WMI_SCAN_EVENT_BSS_CHANNEL:
5637 		return "bss channel";
5638 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5639 		return "foreign channel";
5640 	case WMI_SCAN_EVENT_DEQUEUED:
5641 		return "dequeued";
5642 	case WMI_SCAN_EVENT_PREEMPTED:
5643 		return "preempted";
5644 	case WMI_SCAN_EVENT_START_FAILED:
5645 		return "start failed";
5646 	case WMI_SCAN_EVENT_RESTARTED:
5647 		return "restarted";
5648 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5649 		return "foreign channel exit";
5650 	default:
5651 		return "unknown";
5652 	}
5653 }
5654 
ath12k_pull_scan_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_scan_event * scan_evt_param)5655 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5656 			       struct wmi_scan_event *scan_evt_param)
5657 {
5658 	const void **tb;
5659 	const struct wmi_scan_event *ev;
5660 	int ret;
5661 
5662 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5663 	if (IS_ERR(tb)) {
5664 		ret = PTR_ERR(tb);
5665 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5666 		return ret;
5667 	}
5668 
5669 	ev = tb[WMI_TAG_SCAN_EVENT];
5670 	if (!ev) {
5671 		ath12k_warn(ab, "failed to fetch scan ev");
5672 		kfree(tb);
5673 		return -EPROTO;
5674 	}
5675 
5676 	scan_evt_param->event_type = ev->event_type;
5677 	scan_evt_param->reason = ev->reason;
5678 	scan_evt_param->channel_freq = ev->channel_freq;
5679 	scan_evt_param->scan_req_id = ev->scan_req_id;
5680 	scan_evt_param->scan_id = ev->scan_id;
5681 	scan_evt_param->vdev_id = ev->vdev_id;
5682 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5683 
5684 	kfree(tb);
5685 	return 0;
5686 }
5687 
ath12k_pull_peer_sta_kickout_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_sta_kickout_arg * arg)5688 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5689 					   struct wmi_peer_sta_kickout_arg *arg)
5690 {
5691 	const void **tb;
5692 	const struct wmi_peer_sta_kickout_event *ev;
5693 	int ret;
5694 
5695 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5696 	if (IS_ERR(tb)) {
5697 		ret = PTR_ERR(tb);
5698 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5699 		return ret;
5700 	}
5701 
5702 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5703 	if (!ev) {
5704 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5705 		kfree(tb);
5706 		return -EPROTO;
5707 	}
5708 
5709 	arg->mac_addr = ev->peer_macaddr.addr;
5710 
5711 	kfree(tb);
5712 	return 0;
5713 }
5714 
ath12k_pull_roam_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_roam_event * roam_ev)5715 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5716 			       struct wmi_roam_event *roam_ev)
5717 {
5718 	const void **tb;
5719 	const struct wmi_roam_event *ev;
5720 	int ret;
5721 
5722 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5723 	if (IS_ERR(tb)) {
5724 		ret = PTR_ERR(tb);
5725 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5726 		return ret;
5727 	}
5728 
5729 	ev = tb[WMI_TAG_ROAM_EVENT];
5730 	if (!ev) {
5731 		ath12k_warn(ab, "failed to fetch roam ev");
5732 		kfree(tb);
5733 		return -EPROTO;
5734 	}
5735 
5736 	roam_ev->vdev_id = ev->vdev_id;
5737 	roam_ev->reason = ev->reason;
5738 	roam_ev->rssi = ev->rssi;
5739 
5740 	kfree(tb);
5741 	return 0;
5742 }
5743 
freq_to_idx(struct ath12k * ar,int freq)5744 static int freq_to_idx(struct ath12k *ar, int freq)
5745 {
5746 	struct ieee80211_supported_band *sband;
5747 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5748 	int band, ch, idx = 0;
5749 
5750 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5751 		if (!ar->mac.sbands[band].channels)
5752 			continue;
5753 
5754 		sband = hw->wiphy->bands[band];
5755 		if (!sband)
5756 			continue;
5757 
5758 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
5759 			if (sband->channels[ch].center_freq == freq)
5760 				goto exit;
5761 	}
5762 
5763 exit:
5764 	return idx;
5765 }
5766 
ath12k_pull_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_chan_info_event * ch_info_ev)5767 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5768 				    struct wmi_chan_info_event *ch_info_ev)
5769 {
5770 	const void **tb;
5771 	const struct wmi_chan_info_event *ev;
5772 	int ret;
5773 
5774 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5775 	if (IS_ERR(tb)) {
5776 		ret = PTR_ERR(tb);
5777 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5778 		return ret;
5779 	}
5780 
5781 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5782 	if (!ev) {
5783 		ath12k_warn(ab, "failed to fetch chan info ev");
5784 		kfree(tb);
5785 		return -EPROTO;
5786 	}
5787 
5788 	ch_info_ev->err_code = ev->err_code;
5789 	ch_info_ev->freq = ev->freq;
5790 	ch_info_ev->cmd_flags = ev->cmd_flags;
5791 	ch_info_ev->noise_floor = ev->noise_floor;
5792 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
5793 	ch_info_ev->cycle_count = ev->cycle_count;
5794 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5795 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5796 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
5797 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5798 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5799 	ch_info_ev->vdev_id = ev->vdev_id;
5800 
5801 	kfree(tb);
5802 	return 0;
5803 }
5804 
5805 static int
ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_pdev_bss_chan_info_event * bss_ch_info_ev)5806 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5807 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5808 {
5809 	const void **tb;
5810 	const struct wmi_pdev_bss_chan_info_event *ev;
5811 	int ret;
5812 
5813 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5814 	if (IS_ERR(tb)) {
5815 		ret = PTR_ERR(tb);
5816 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5817 		return ret;
5818 	}
5819 
5820 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5821 	if (!ev) {
5822 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5823 		kfree(tb);
5824 		return -EPROTO;
5825 	}
5826 
5827 	bss_ch_info_ev->pdev_id = ev->pdev_id;
5828 	bss_ch_info_ev->freq = ev->freq;
5829 	bss_ch_info_ev->noise_floor = ev->noise_floor;
5830 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5831 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5832 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5833 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5834 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5835 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5836 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5837 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5838 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5839 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5840 
5841 	kfree(tb);
5842 	return 0;
5843 }
5844 
5845 static int
ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_install_key_complete_arg * arg)5846 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5847 				      struct wmi_vdev_install_key_complete_arg *arg)
5848 {
5849 	const void **tb;
5850 	const struct wmi_vdev_install_key_compl_event *ev;
5851 	int ret;
5852 
5853 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5854 	if (IS_ERR(tb)) {
5855 		ret = PTR_ERR(tb);
5856 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5857 		return ret;
5858 	}
5859 
5860 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5861 	if (!ev) {
5862 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5863 		kfree(tb);
5864 		return -EPROTO;
5865 	}
5866 
5867 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
5868 	arg->macaddr = ev->peer_macaddr.addr;
5869 	arg->key_idx = le32_to_cpu(ev->key_idx);
5870 	arg->key_flags = le32_to_cpu(ev->key_flags);
5871 	arg->status = le32_to_cpu(ev->status);
5872 
5873 	kfree(tb);
5874 	return 0;
5875 }
5876 
ath12k_pull_peer_assoc_conf_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_assoc_conf_arg * peer_assoc_conf)5877 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
5878 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5879 {
5880 	const void **tb;
5881 	const struct wmi_peer_assoc_conf_event *ev;
5882 	int ret;
5883 
5884 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5885 	if (IS_ERR(tb)) {
5886 		ret = PTR_ERR(tb);
5887 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5888 		return ret;
5889 	}
5890 
5891 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5892 	if (!ev) {
5893 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
5894 		kfree(tb);
5895 		return -EPROTO;
5896 	}
5897 
5898 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
5899 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5900 
5901 	kfree(tb);
5902 	return 0;
5903 }
5904 
5905 static int
ath12k_pull_pdev_temp_ev(struct ath12k_base * ab,struct sk_buff * skb,const struct wmi_pdev_temperature_event * ev)5906 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5907 			 const struct wmi_pdev_temperature_event *ev)
5908 {
5909 	const void **tb;
5910 	int ret;
5911 
5912 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5913 	if (IS_ERR(tb)) {
5914 		ret = PTR_ERR(tb);
5915 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5916 		return ret;
5917 	}
5918 
5919 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
5920 	if (!ev) {
5921 		ath12k_warn(ab, "failed to fetch pdev temp ev");
5922 		kfree(tb);
5923 		return -EPROTO;
5924 	}
5925 
5926 	kfree(tb);
5927 	return 0;
5928 }
5929 
ath12k_wmi_op_ep_tx_credits(struct ath12k_base * ab)5930 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
5931 {
5932 	/* try to send pending beacons first. they take priority */
5933 	wake_up(&ab->wmi_ab.tx_credits_wq);
5934 }
5935 
ath12k_wmi_htc_tx_complete(struct ath12k_base * ab,struct sk_buff * skb)5936 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
5937 				       struct sk_buff *skb)
5938 {
5939 	dev_kfree_skb(skb);
5940 }
5941 
ath12k_reg_is_world_alpha(char * alpha)5942 static bool ath12k_reg_is_world_alpha(char *alpha)
5943 {
5944 	if (alpha[0] == '0' && alpha[1] == '0')
5945 		return true;
5946 
5947 	if (alpha[0] == 'n' && alpha[1] == 'a')
5948 		return true;
5949 
5950 	return false;
5951 }
5952 
ath12k_reg_chan_list_event(struct ath12k_base * ab,struct sk_buff * skb)5953 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
5954 {
5955 	struct ath12k_reg_info *reg_info = NULL;
5956 	struct ieee80211_regdomain *regd = NULL;
5957 	bool intersect = false;
5958 	int ret = 0, pdev_idx, i, j;
5959 	struct ath12k *ar;
5960 
5961 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
5962 	if (!reg_info) {
5963 		ret = -ENOMEM;
5964 		goto fallback;
5965 	}
5966 
5967 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
5968 
5969 	if (ret) {
5970 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
5971 		goto fallback;
5972 	}
5973 
5974 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
5975 		/* In case of failure to set the requested ctry,
5976 		 * fw retains the current regd. We print a failure info
5977 		 * and return from here.
5978 		 */
5979 		ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
5980 		goto mem_free;
5981 	}
5982 
5983 	pdev_idx = reg_info->phy_id;
5984 
5985 	if (pdev_idx >= ab->num_radios) {
5986 		/* Process the event for phy0 only if single_pdev_only
5987 		 * is true. If pdev_idx is valid but not 0, discard the
5988 		 * event. Otherwise, it goes to fallback.
5989 		 */
5990 		if (ab->hw_params->single_pdev_only &&
5991 		    pdev_idx < ab->hw_params->num_rxdma_per_pdev)
5992 			goto mem_free;
5993 		else
5994 			goto fallback;
5995 	}
5996 
5997 	/* Avoid multiple overwrites to default regd, during core
5998 	 * stop-start after mac registration.
5999 	 */
6000 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
6001 	    !memcmp(ab->default_regd[pdev_idx]->alpha2,
6002 		    reg_info->alpha2, 2))
6003 		goto mem_free;
6004 
6005 	/* Intersect new rules with default regd if a new country setting was
6006 	 * requested, i.e a default regd was already set during initialization
6007 	 * and the regd coming from this event has a valid country info.
6008 	 */
6009 	if (ab->default_regd[pdev_idx] &&
6010 	    !ath12k_reg_is_world_alpha((char *)
6011 		ab->default_regd[pdev_idx]->alpha2) &&
6012 	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
6013 		intersect = true;
6014 
6015 	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
6016 	if (!regd) {
6017 		ath12k_warn(ab, "failed to build regd from reg_info\n");
6018 		goto fallback;
6019 	}
6020 
6021 	spin_lock(&ab->base_lock);
6022 	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
6023 		/* Once mac is registered, ar is valid and all CC events from
6024 		 * fw is considered to be received due to user requests
6025 		 * currently.
6026 		 * Free previously built regd before assigning the newly
6027 		 * generated regd to ar. NULL pointer handling will be
6028 		 * taken care by kfree itself.
6029 		 */
6030 		ar = ab->pdevs[pdev_idx].ar;
6031 		kfree(ab->new_regd[pdev_idx]);
6032 		ab->new_regd[pdev_idx] = regd;
6033 		queue_work(ab->workqueue, &ar->regd_update_work);
6034 	} else {
6035 		/* Multiple events for the same *ar is not expected. But we
6036 		 * can still clear any previously stored default_regd if we
6037 		 * are receiving this event for the same radio by mistake.
6038 		 * NULL pointer handling will be taken care by kfree itself.
6039 		 */
6040 		kfree(ab->default_regd[pdev_idx]);
6041 		/* This regd would be applied during mac registration */
6042 		ab->default_regd[pdev_idx] = regd;
6043 	}
6044 	ab->dfs_region = reg_info->dfs_region;
6045 	spin_unlock(&ab->base_lock);
6046 
6047 	goto mem_free;
6048 
6049 fallback:
6050 	/* Fallback to older reg (by sending previous country setting
6051 	 * again if fw has succeeded and we failed to process here.
6052 	 * The Regdomain should be uniform across driver and fw. Since the
6053 	 * FW has processed the command and sent a success status, we expect
6054 	 * this function to succeed as well. If it doesn't, CTRY needs to be
6055 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
6056 	 */
6057 	/* TODO: This is rare, but still should also be handled */
6058 	WARN_ON(1);
6059 mem_free:
6060 	if (reg_info) {
6061 		kfree(reg_info->reg_rules_2g_ptr);
6062 		kfree(reg_info->reg_rules_5g_ptr);
6063 		if (reg_info->is_ext_reg_event) {
6064 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
6065 				kfree(reg_info->reg_rules_6g_ap_ptr[i]);
6066 
6067 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
6068 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
6069 					kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
6070 		}
6071 		kfree(reg_info);
6072 	}
6073 	return ret;
6074 }
6075 
ath12k_wmi_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)6076 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
6077 				const void *ptr, void *data)
6078 {
6079 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
6080 	struct wmi_ready_event fixed_param;
6081 	struct ath12k_wmi_mac_addr_params *addr_list;
6082 	struct ath12k_pdev *pdev;
6083 	u32 num_mac_addr;
6084 	int i;
6085 
6086 	switch (tag) {
6087 	case WMI_TAG_READY_EVENT:
6088 		memset(&fixed_param, 0, sizeof(fixed_param));
6089 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
6090 		       min_t(u16, sizeof(fixed_param), len));
6091 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
6092 		rdy_parse->num_extra_mac_addr =
6093 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
6094 
6095 		ether_addr_copy(ab->mac_addr,
6096 				fixed_param.ready_event_min.mac_addr.addr);
6097 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
6098 		ab->wmi_ready = true;
6099 		break;
6100 	case WMI_TAG_ARRAY_FIXED_STRUCT:
6101 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
6102 		num_mac_addr = rdy_parse->num_extra_mac_addr;
6103 
6104 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6105 			break;
6106 
6107 		for (i = 0; i < ab->num_radios; i++) {
6108 			pdev = &ab->pdevs[i];
6109 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6110 		}
6111 		ab->pdevs_macaddr_valid = true;
6112 		break;
6113 	default:
6114 		break;
6115 	}
6116 
6117 	return 0;
6118 }
6119 
ath12k_ready_event(struct ath12k_base * ab,struct sk_buff * skb)6120 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
6121 {
6122 	struct ath12k_wmi_rdy_parse rdy_parse = { };
6123 	int ret;
6124 
6125 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6126 				  ath12k_wmi_rdy_parse, &rdy_parse);
6127 	if (ret) {
6128 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
6129 		return ret;
6130 	}
6131 
6132 	complete(&ab->wmi_ab.unified_ready);
6133 	return 0;
6134 }
6135 
ath12k_peer_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)6136 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6137 {
6138 	struct wmi_peer_delete_resp_event peer_del_resp;
6139 	struct ath12k *ar;
6140 
6141 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6142 		ath12k_warn(ab, "failed to extract peer delete resp");
6143 		return;
6144 	}
6145 
6146 	rcu_read_lock();
6147 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
6148 	if (!ar) {
6149 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6150 			    peer_del_resp.vdev_id);
6151 		rcu_read_unlock();
6152 		return;
6153 	}
6154 
6155 	complete(&ar->peer_delete_done);
6156 	rcu_read_unlock();
6157 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6158 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6159 }
6160 
ath12k_vdev_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)6161 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
6162 					  struct sk_buff *skb)
6163 {
6164 	struct ath12k *ar;
6165 	u32 vdev_id = 0;
6166 
6167 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6168 		ath12k_warn(ab, "failed to extract vdev delete resp");
6169 		return;
6170 	}
6171 
6172 	rcu_read_lock();
6173 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6174 	if (!ar) {
6175 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6176 			    vdev_id);
6177 		rcu_read_unlock();
6178 		return;
6179 	}
6180 
6181 	complete(&ar->vdev_delete_done);
6182 
6183 	rcu_read_unlock();
6184 
6185 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6186 		   vdev_id);
6187 }
6188 
ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)6189 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
6190 {
6191 	switch (vdev_resp_status) {
6192 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6193 		return "invalid vdev id";
6194 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6195 		return "not supported";
6196 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6197 		return "dfs violation";
6198 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6199 		return "invalid regdomain";
6200 	default:
6201 		return "unknown";
6202 	}
6203 }
6204 
ath12k_vdev_start_resp_event(struct ath12k_base * ab,struct sk_buff * skb)6205 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
6206 {
6207 	struct wmi_vdev_start_resp_event vdev_start_resp;
6208 	struct ath12k *ar;
6209 	u32 status;
6210 
6211 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6212 		ath12k_warn(ab, "failed to extract vdev start resp");
6213 		return;
6214 	}
6215 
6216 	rcu_read_lock();
6217 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
6218 	if (!ar) {
6219 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6220 			    vdev_start_resp.vdev_id);
6221 		rcu_read_unlock();
6222 		return;
6223 	}
6224 
6225 	ar->last_wmi_vdev_start_status = 0;
6226 
6227 	status = le32_to_cpu(vdev_start_resp.status);
6228 
6229 	if (WARN_ON_ONCE(status)) {
6230 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6231 			    status, ath12k_wmi_vdev_resp_print(status));
6232 		ar->last_wmi_vdev_start_status = status;
6233 	}
6234 
6235 	complete(&ar->vdev_setup_done);
6236 
6237 	rcu_read_unlock();
6238 
6239 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6240 		   vdev_start_resp.vdev_id);
6241 }
6242 
ath12k_bcn_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)6243 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6244 {
6245 	u32 vdev_id, tx_status;
6246 
6247 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6248 		ath12k_warn(ab, "failed to extract bcn tx status");
6249 		return;
6250 	}
6251 }
6252 
ath12k_vdev_stopped_event(struct ath12k_base * ab,struct sk_buff * skb)6253 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6254 {
6255 	struct ath12k *ar;
6256 	u32 vdev_id = 0;
6257 
6258 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6259 		ath12k_warn(ab, "failed to extract vdev stopped event");
6260 		return;
6261 	}
6262 
6263 	rcu_read_lock();
6264 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6265 	if (!ar) {
6266 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6267 			    vdev_id);
6268 		rcu_read_unlock();
6269 		return;
6270 	}
6271 
6272 	complete(&ar->vdev_setup_done);
6273 
6274 	rcu_read_unlock();
6275 
6276 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6277 }
6278 
ath12k_mgmt_rx_event(struct ath12k_base * ab,struct sk_buff * skb)6279 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6280 {
6281 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6282 	struct ath12k *ar;
6283 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6284 	struct ieee80211_hdr *hdr;
6285 	u16 fc;
6286 	struct ieee80211_supported_band *sband;
6287 
6288 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6289 		ath12k_warn(ab, "failed to extract mgmt rx event");
6290 		dev_kfree_skb(skb);
6291 		return;
6292 	}
6293 
6294 	memset(status, 0, sizeof(*status));
6295 
6296 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
6297 		   rx_ev.status);
6298 
6299 	rcu_read_lock();
6300 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
6301 
6302 	if (!ar) {
6303 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
6304 			    rx_ev.pdev_id);
6305 		dev_kfree_skb(skb);
6306 		goto exit;
6307 	}
6308 
6309 	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
6310 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
6311 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
6312 			     WMI_RX_STATUS_ERR_CRC))) {
6313 		dev_kfree_skb(skb);
6314 		goto exit;
6315 	}
6316 
6317 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
6318 		status->flag |= RX_FLAG_MMIC_ERROR;
6319 
6320 	if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
6321 	    rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
6322 		status->band = NL80211_BAND_6GHZ;
6323 		status->freq = rx_ev.chan_freq;
6324 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
6325 		status->band = NL80211_BAND_2GHZ;
6326 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
6327 		status->band = NL80211_BAND_5GHZ;
6328 	} else {
6329 		/* Shouldn't happen unless list of advertised channels to
6330 		 * mac80211 has been changed.
6331 		 */
6332 		WARN_ON_ONCE(1);
6333 		dev_kfree_skb(skb);
6334 		goto exit;
6335 	}
6336 
6337 	if (rx_ev.phy_mode == MODE_11B &&
6338 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
6339 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6340 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
6341 
6342 	sband = &ar->mac.sbands[status->band];
6343 
6344 	if (status->band != NL80211_BAND_6GHZ)
6345 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
6346 							      status->band);
6347 
6348 	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
6349 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
6350 
6351 	hdr = (struct ieee80211_hdr *)skb->data;
6352 	fc = le16_to_cpu(hdr->frame_control);
6353 
6354 	/* Firmware is guaranteed to report all essential management frames via
6355 	 * WMI while it can deliver some extra via HTT. Since there can be
6356 	 * duplicates split the reporting wrt monitor/sniffing.
6357 	 */
6358 	status->flag |= RX_FLAG_SKIP_MONITOR;
6359 
6360 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
6361 	 * including group privacy action frames.
6362 	 */
6363 	if (ieee80211_has_protected(hdr->frame_control)) {
6364 		status->flag |= RX_FLAG_DECRYPTED;
6365 
6366 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
6367 			status->flag |= RX_FLAG_IV_STRIPPED |
6368 					RX_FLAG_MMIC_STRIPPED;
6369 			hdr->frame_control = __cpu_to_le16(fc &
6370 					     ~IEEE80211_FCTL_PROTECTED);
6371 		}
6372 	}
6373 
6374 	if (ieee80211_is_beacon(hdr->frame_control))
6375 		ath12k_mac_handle_beacon(ar, skb);
6376 
6377 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6378 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
6379 		   skb, skb->len,
6380 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
6381 
6382 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6383 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
6384 		   status->freq, status->band, status->signal,
6385 		   status->rate_idx);
6386 
6387 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
6388 
6389 exit:
6390 	rcu_read_unlock();
6391 }
6392 
ath12k_mgmt_tx_compl_event(struct ath12k_base * ab,struct sk_buff * skb)6393 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
6394 {
6395 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
6396 	struct ath12k *ar;
6397 
6398 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
6399 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
6400 		return;
6401 	}
6402 
6403 	rcu_read_lock();
6404 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
6405 	if (!ar) {
6406 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
6407 			    tx_compl_param.pdev_id);
6408 		goto exit;
6409 	}
6410 
6411 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
6412 				 le32_to_cpu(tx_compl_param.status));
6413 
6414 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6415 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6416 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
6417 		   tx_compl_param.status);
6418 
6419 exit:
6420 	rcu_read_unlock();
6421 }
6422 
ath12k_get_ar_on_scan_state(struct ath12k_base * ab,u32 vdev_id,enum ath12k_scan_state state)6423 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
6424 						  u32 vdev_id,
6425 						  enum ath12k_scan_state state)
6426 {
6427 	int i;
6428 	struct ath12k_pdev *pdev;
6429 	struct ath12k *ar;
6430 
6431 	for (i = 0; i < ab->num_radios; i++) {
6432 		pdev = rcu_dereference(ab->pdevs_active[i]);
6433 		if (pdev && pdev->ar) {
6434 			ar = pdev->ar;
6435 
6436 			spin_lock_bh(&ar->data_lock);
6437 			if (ar->scan.state == state &&
6438 			    ar->scan.arvif &&
6439 			    ar->scan.arvif->vdev_id == vdev_id) {
6440 				spin_unlock_bh(&ar->data_lock);
6441 				return ar;
6442 			}
6443 			spin_unlock_bh(&ar->data_lock);
6444 		}
6445 	}
6446 	return NULL;
6447 }
6448 
ath12k_scan_event(struct ath12k_base * ab,struct sk_buff * skb)6449 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
6450 {
6451 	struct ath12k *ar;
6452 	struct wmi_scan_event scan_ev = {0};
6453 
6454 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
6455 		ath12k_warn(ab, "failed to extract scan event");
6456 		return;
6457 	}
6458 
6459 	rcu_read_lock();
6460 
6461 	/* In case the scan was cancelled, ex. during interface teardown,
6462 	 * the interface will not be found in active interfaces.
6463 	 * Rather, in such scenarios, iterate over the active pdev's to
6464 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6465 	 * aborting scan's vdev id matches this event info.
6466 	 */
6467 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
6468 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
6469 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6470 						 ATH12K_SCAN_ABORTING);
6471 		if (!ar)
6472 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6473 							 ATH12K_SCAN_RUNNING);
6474 	} else {
6475 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
6476 	}
6477 
6478 	if (!ar) {
6479 		ath12k_warn(ab, "Received scan event for unknown vdev");
6480 		rcu_read_unlock();
6481 		return;
6482 	}
6483 
6484 	spin_lock_bh(&ar->data_lock);
6485 
6486 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6487 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
6488 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
6489 						  le32_to_cpu(scan_ev.reason)),
6490 		   le32_to_cpu(scan_ev.event_type),
6491 		   le32_to_cpu(scan_ev.reason),
6492 		   le32_to_cpu(scan_ev.channel_freq),
6493 		   le32_to_cpu(scan_ev.scan_req_id),
6494 		   le32_to_cpu(scan_ev.scan_id),
6495 		   le32_to_cpu(scan_ev.vdev_id),
6496 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
6497 
6498 	switch (le32_to_cpu(scan_ev.event_type)) {
6499 	case WMI_SCAN_EVENT_STARTED:
6500 		ath12k_wmi_event_scan_started(ar);
6501 		break;
6502 	case WMI_SCAN_EVENT_COMPLETED:
6503 		ath12k_wmi_event_scan_completed(ar);
6504 		break;
6505 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6506 		ath12k_wmi_event_scan_bss_chan(ar);
6507 		break;
6508 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6509 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
6510 		break;
6511 	case WMI_SCAN_EVENT_START_FAILED:
6512 		ath12k_warn(ab, "received scan start failure event\n");
6513 		ath12k_wmi_event_scan_start_failed(ar);
6514 		break;
6515 	case WMI_SCAN_EVENT_DEQUEUED:
6516 		__ath12k_mac_scan_finish(ar);
6517 		break;
6518 	case WMI_SCAN_EVENT_PREEMPTED:
6519 	case WMI_SCAN_EVENT_RESTARTED:
6520 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6521 	default:
6522 		break;
6523 	}
6524 
6525 	spin_unlock_bh(&ar->data_lock);
6526 
6527 	rcu_read_unlock();
6528 }
6529 
ath12k_peer_sta_kickout_event(struct ath12k_base * ab,struct sk_buff * skb)6530 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6531 {
6532 	struct wmi_peer_sta_kickout_arg arg = {};
6533 	struct ieee80211_sta *sta;
6534 	struct ath12k_peer *peer;
6535 	struct ath12k *ar;
6536 
6537 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6538 		ath12k_warn(ab, "failed to extract peer sta kickout event");
6539 		return;
6540 	}
6541 
6542 	rcu_read_lock();
6543 
6544 	spin_lock_bh(&ab->base_lock);
6545 
6546 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6547 
6548 	if (!peer) {
6549 		ath12k_warn(ab, "peer not found %pM\n",
6550 			    arg.mac_addr);
6551 		goto exit;
6552 	}
6553 
6554 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6555 	if (!ar) {
6556 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6557 			    peer->vdev_id);
6558 		goto exit;
6559 	}
6560 
6561 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
6562 					   arg.mac_addr, NULL);
6563 	if (!sta) {
6564 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6565 			    arg.mac_addr);
6566 		goto exit;
6567 	}
6568 
6569 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6570 		   arg.mac_addr);
6571 
6572 	ieee80211_report_low_ack(sta, 10);
6573 
6574 exit:
6575 	spin_unlock_bh(&ab->base_lock);
6576 	rcu_read_unlock();
6577 }
6578 
ath12k_roam_event(struct ath12k_base * ab,struct sk_buff * skb)6579 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6580 {
6581 	struct wmi_roam_event roam_ev = {};
6582 	struct ath12k *ar;
6583 	u32 vdev_id;
6584 	u8 roam_reason;
6585 
6586 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6587 		ath12k_warn(ab, "failed to extract roam event");
6588 		return;
6589 	}
6590 
6591 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
6592 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
6593 				   WMI_ROAM_REASON_MASK);
6594 
6595 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6596 		   "wmi roam event vdev %u reason %d rssi %d\n",
6597 		   vdev_id, roam_reason, roam_ev.rssi);
6598 
6599 	rcu_read_lock();
6600 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6601 	if (!ar) {
6602 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
6603 		rcu_read_unlock();
6604 		return;
6605 	}
6606 
6607 	if (roam_reason >= WMI_ROAM_REASON_MAX)
6608 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6609 			    roam_reason, vdev_id);
6610 
6611 	switch (roam_reason) {
6612 	case WMI_ROAM_REASON_BEACON_MISS:
6613 		ath12k_mac_handle_beacon_miss(ar, vdev_id);
6614 		break;
6615 	case WMI_ROAM_REASON_BETTER_AP:
6616 	case WMI_ROAM_REASON_LOW_RSSI:
6617 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6618 	case WMI_ROAM_REASON_HO_FAILED:
6619 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6620 			    roam_reason, vdev_id);
6621 		break;
6622 	}
6623 
6624 	rcu_read_unlock();
6625 }
6626 
ath12k_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)6627 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6628 {
6629 	struct wmi_chan_info_event ch_info_ev = {0};
6630 	struct ath12k *ar;
6631 	struct survey_info *survey;
6632 	int idx;
6633 	/* HW channel counters frequency value in hertz */
6634 	u32 cc_freq_hz = ab->cc_freq_hz;
6635 
6636 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
6637 		ath12k_warn(ab, "failed to extract chan info event");
6638 		return;
6639 	}
6640 
6641 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6642 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6643 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6644 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6645 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6646 		   ch_info_ev.mac_clk_mhz);
6647 
6648 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6649 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6650 		return;
6651 	}
6652 
6653 	rcu_read_lock();
6654 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6655 	if (!ar) {
6656 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6657 			    ch_info_ev.vdev_id);
6658 		rcu_read_unlock();
6659 		return;
6660 	}
6661 	spin_lock_bh(&ar->data_lock);
6662 
6663 	switch (ar->scan.state) {
6664 	case ATH12K_SCAN_IDLE:
6665 	case ATH12K_SCAN_STARTING:
6666 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6667 		goto exit;
6668 	case ATH12K_SCAN_RUNNING:
6669 	case ATH12K_SCAN_ABORTING:
6670 		break;
6671 	}
6672 
6673 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6674 	if (idx >= ARRAY_SIZE(ar->survey)) {
6675 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6676 			    ch_info_ev.freq, idx);
6677 		goto exit;
6678 	}
6679 
6680 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
6681 	 * HW channel counters frequency value
6682 	 */
6683 	if (ch_info_ev.mac_clk_mhz)
6684 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6685 
6686 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6687 		survey = &ar->survey[idx];
6688 		memset(survey, 0, sizeof(*survey));
6689 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6690 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6691 				 SURVEY_INFO_TIME_BUSY;
6692 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6693 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6694 					    cc_freq_hz);
6695 	}
6696 exit:
6697 	spin_unlock_bh(&ar->data_lock);
6698 	rcu_read_unlock();
6699 }
6700 
6701 static void
ath12k_pdev_bss_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)6702 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6703 {
6704 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6705 	struct survey_info *survey;
6706 	struct ath12k *ar;
6707 	u32 cc_freq_hz = ab->cc_freq_hz;
6708 	u64 busy, total, tx, rx, rx_bss;
6709 	int idx;
6710 
6711 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6712 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
6713 		return;
6714 	}
6715 
6716 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6717 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6718 
6719 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6720 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6721 
6722 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6723 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6724 
6725 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6726 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6727 
6728 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6729 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6730 
6731 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6732 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6733 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6734 		   bss_ch_info_ev.noise_floor, busy, total,
6735 		   tx, rx, rx_bss);
6736 
6737 	rcu_read_lock();
6738 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6739 
6740 	if (!ar) {
6741 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6742 			    bss_ch_info_ev.pdev_id);
6743 		rcu_read_unlock();
6744 		return;
6745 	}
6746 
6747 	spin_lock_bh(&ar->data_lock);
6748 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6749 	if (idx >= ARRAY_SIZE(ar->survey)) {
6750 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6751 			    bss_ch_info_ev.freq, idx);
6752 		goto exit;
6753 	}
6754 
6755 	survey = &ar->survey[idx];
6756 
6757 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
6758 	survey->time      = div_u64(total, cc_freq_hz);
6759 	survey->time_busy = div_u64(busy, cc_freq_hz);
6760 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
6761 	survey->time_tx   = div_u64(tx, cc_freq_hz);
6762 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
6763 			     SURVEY_INFO_TIME |
6764 			     SURVEY_INFO_TIME_BUSY |
6765 			     SURVEY_INFO_TIME_RX |
6766 			     SURVEY_INFO_TIME_TX);
6767 exit:
6768 	spin_unlock_bh(&ar->data_lock);
6769 	complete(&ar->bss_survey_done);
6770 
6771 	rcu_read_unlock();
6772 }
6773 
ath12k_vdev_install_key_compl_event(struct ath12k_base * ab,struct sk_buff * skb)6774 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6775 						struct sk_buff *skb)
6776 {
6777 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6778 	struct ath12k *ar;
6779 
6780 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6781 		ath12k_warn(ab, "failed to extract install key compl event");
6782 		return;
6783 	}
6784 
6785 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6786 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6787 		   install_key_compl.key_idx, install_key_compl.key_flags,
6788 		   install_key_compl.macaddr, install_key_compl.status);
6789 
6790 	rcu_read_lock();
6791 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6792 	if (!ar) {
6793 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6794 			    install_key_compl.vdev_id);
6795 		rcu_read_unlock();
6796 		return;
6797 	}
6798 
6799 	ar->install_key_status = 0;
6800 
6801 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6802 		ath12k_warn(ab, "install key failed for %pM status %d\n",
6803 			    install_key_compl.macaddr, install_key_compl.status);
6804 		ar->install_key_status = install_key_compl.status;
6805 	}
6806 
6807 	complete(&ar->install_key_done);
6808 	rcu_read_unlock();
6809 }
6810 
ath12k_wmi_tlv_services_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)6811 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6812 					  u16 tag, u16 len,
6813 					  const void *ptr,
6814 					  void *data)
6815 {
6816 	const struct wmi_service_available_event *ev;
6817 	u32 *wmi_ext2_service_bitmap;
6818 	int i, j;
6819 	u16 expected_len;
6820 
6821 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6822 	if (len < expected_len) {
6823 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
6824 			    len, tag);
6825 		return -EINVAL;
6826 	}
6827 
6828 	switch (tag) {
6829 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
6830 		ev = (struct wmi_service_available_event *)ptr;
6831 		for (i = 0, j = WMI_MAX_SERVICE;
6832 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6833 		     i++) {
6834 			do {
6835 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6836 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6837 					set_bit(j, ab->wmi_ab.svc_map);
6838 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6839 		}
6840 
6841 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6842 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
6843 			   ev->wmi_service_segment_bitmap[0],
6844 			   ev->wmi_service_segment_bitmap[1],
6845 			   ev->wmi_service_segment_bitmap[2],
6846 			   ev->wmi_service_segment_bitmap[3]);
6847 		break;
6848 	case WMI_TAG_ARRAY_UINT32:
6849 		wmi_ext2_service_bitmap = (u32 *)ptr;
6850 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
6851 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
6852 		     i++) {
6853 			do {
6854 				if (wmi_ext2_service_bitmap[i] &
6855 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6856 					set_bit(j, ab->wmi_ab.svc_map);
6857 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6858 		}
6859 
6860 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6861 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
6862 			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
6863 			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
6864 		break;
6865 	}
6866 	return 0;
6867 }
6868 
ath12k_service_available_event(struct ath12k_base * ab,struct sk_buff * skb)6869 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
6870 {
6871 	int ret;
6872 
6873 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6874 				  ath12k_wmi_tlv_services_parser,
6875 				  NULL);
6876 	return ret;
6877 }
6878 
ath12k_peer_assoc_conf_event(struct ath12k_base * ab,struct sk_buff * skb)6879 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
6880 {
6881 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
6882 	struct ath12k *ar;
6883 
6884 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
6885 		ath12k_warn(ab, "failed to extract peer assoc conf event");
6886 		return;
6887 	}
6888 
6889 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6890 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
6891 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
6892 
6893 	rcu_read_lock();
6894 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
6895 
6896 	if (!ar) {
6897 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
6898 			    peer_assoc_conf.vdev_id);
6899 		rcu_read_unlock();
6900 		return;
6901 	}
6902 
6903 	complete(&ar->peer_assoc_done);
6904 	rcu_read_unlock();
6905 }
6906 
6907 static void
ath12k_wmi_fw_vdev_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,char * buf,u32 * length)6908 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
6909 			      struct ath12k_fw_stats *fw_stats,
6910 			      char *buf, u32 *length)
6911 {
6912 	const struct ath12k_fw_stats_vdev *vdev;
6913 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
6914 	struct ath12k_link_vif *arvif;
6915 	u32 len = *length;
6916 	u8 *vif_macaddr;
6917 	int i;
6918 
6919 	len += scnprintf(buf + len, buf_len - len, "\n");
6920 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
6921 			 "ath12k VDEV stats");
6922 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
6923 			 "=================");
6924 
6925 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
6926 		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
6927 		if (!arvif)
6928 			continue;
6929 		vif_macaddr = arvif->ahvif->vif->addr;
6930 
6931 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6932 				 "VDEV ID", vdev->vdev_id);
6933 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
6934 				 "VDEV MAC address", vif_macaddr);
6935 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6936 				 "beacon snr", vdev->beacon_snr);
6937 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6938 				 "data snr", vdev->data_snr);
6939 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6940 				 "num rx frames", vdev->num_rx_frames);
6941 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6942 				 "num rts fail", vdev->num_rts_fail);
6943 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6944 				 "num rts success", vdev->num_rts_success);
6945 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6946 				 "num rx err", vdev->num_rx_err);
6947 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6948 				 "num rx discard", vdev->num_rx_discard);
6949 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6950 				 "num tx not acked", vdev->num_tx_not_acked);
6951 
6952 		for (i = 0 ; i < WLAN_MAX_AC; i++)
6953 			len += scnprintf(buf + len, buf_len - len,
6954 					"%25s [%02d] %u\n",
6955 					"num tx frames", i,
6956 					vdev->num_tx_frames[i]);
6957 
6958 		for (i = 0 ; i < WLAN_MAX_AC; i++)
6959 			len += scnprintf(buf + len, buf_len - len,
6960 					"%25s [%02d] %u\n",
6961 					"num tx frames retries", i,
6962 					vdev->num_tx_frames_retries[i]);
6963 
6964 		for (i = 0 ; i < WLAN_MAX_AC; i++)
6965 			len += scnprintf(buf + len, buf_len - len,
6966 					"%25s [%02d] %u\n",
6967 					"num tx frames failures", i,
6968 					vdev->num_tx_frames_failures[i]);
6969 
6970 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
6971 			len += scnprintf(buf + len, buf_len - len,
6972 					"%25s [%02d] 0x%08x\n",
6973 					"tx rate history", i,
6974 					vdev->tx_rate_history[i]);
6975 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
6976 			len += scnprintf(buf + len, buf_len - len,
6977 					"%25s [%02d] %u\n",
6978 					"beacon rssi history", i,
6979 					vdev->beacon_rssi_history[i]);
6980 
6981 		len += scnprintf(buf + len, buf_len - len, "\n");
6982 		*length = len;
6983 	}
6984 }
6985 
6986 static void
ath12k_wmi_fw_bcn_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,char * buf,u32 * length)6987 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
6988 			     struct ath12k_fw_stats *fw_stats,
6989 			     char *buf, u32 *length)
6990 {
6991 	const struct ath12k_fw_stats_bcn *bcn;
6992 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
6993 	struct ath12k_link_vif *arvif;
6994 	u32 len = *length;
6995 	size_t num_bcn;
6996 
6997 	num_bcn = list_count_nodes(&fw_stats->bcn);
6998 
6999 	len += scnprintf(buf + len, buf_len - len, "\n");
7000 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
7001 			 "ath12k Beacon stats", num_bcn);
7002 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7003 			 "===================");
7004 
7005 	list_for_each_entry(bcn, &fw_stats->bcn, list) {
7006 		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
7007 		if (!arvif)
7008 			continue;
7009 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7010 				 "VDEV ID", bcn->vdev_id);
7011 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
7012 				 "VDEV MAC address", arvif->ahvif->vif->addr);
7013 		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7014 				 "================");
7015 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7016 				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
7017 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
7018 				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
7019 
7020 		len += scnprintf(buf + len, buf_len - len, "\n");
7021 		*length = len;
7022 	}
7023 }
7024 
7025 static void
ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev * pdev,char * buf,u32 * length,u64 fw_soc_drop_cnt)7026 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7027 				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
7028 {
7029 	u32 len = *length;
7030 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7031 
7032 	len = scnprintf(buf + len, buf_len - len, "\n");
7033 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
7034 			"ath12k PDEV stats");
7035 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7036 			"=================");
7037 
7038 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7039 			"Channel noise floor", pdev->ch_noise_floor);
7040 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7041 			"Channel TX power", pdev->chan_tx_power);
7042 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7043 			"TX frame count", pdev->tx_frame_count);
7044 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7045 			"RX frame count", pdev->rx_frame_count);
7046 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7047 			"RX clear count", pdev->rx_clear_count);
7048 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7049 			"Cycle count", pdev->cycle_count);
7050 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7051 			"PHY error count", pdev->phy_err_count);
7052 	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
7053 			"soc drop count", fw_soc_drop_cnt);
7054 
7055 	*length = len;
7056 }
7057 
7058 static void
ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev * pdev,char * buf,u32 * length)7059 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7060 				 char *buf, u32 *length)
7061 {
7062 	u32 len = *length;
7063 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7064 
7065 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7066 			 "ath12k PDEV TX stats");
7067 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7068 			 "====================");
7069 
7070 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7071 			 "HTT cookies queued", pdev->comp_queued);
7072 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7073 			 "HTT cookies disp.", pdev->comp_delivered);
7074 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7075 			 "MSDU queued", pdev->msdu_enqued);
7076 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7077 			 "MPDU queued", pdev->mpdu_enqued);
7078 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7079 			 "MSDUs dropped", pdev->wmm_drop);
7080 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7081 			 "Local enqued", pdev->local_enqued);
7082 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7083 			 "Local freed", pdev->local_freed);
7084 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7085 			 "HW queued", pdev->hw_queued);
7086 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7087 			 "PPDUs reaped", pdev->hw_reaped);
7088 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7089 			 "Num underruns", pdev->underrun);
7090 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7091 			 "PPDUs cleaned", pdev->tx_abort);
7092 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7093 			 "MPDUs requeued", pdev->mpdus_requed);
7094 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7095 			 "Excessive retries", pdev->tx_ko);
7096 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7097 			 "HW rate", pdev->data_rc);
7098 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7099 			 "Sched self triggers", pdev->self_triggers);
7100 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7101 			 "Dropped due to SW retries",
7102 			 pdev->sw_retry_failure);
7103 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7104 			 "Illegal rate phy errors",
7105 			 pdev->illgl_rate_phy_err);
7106 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7107 			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
7108 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7109 			 "TX timeout", pdev->pdev_tx_timeout);
7110 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7111 			 "PDEV resets", pdev->pdev_resets);
7112 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7113 			 "Stateless TIDs alloc failures",
7114 			 pdev->stateless_tid_alloc_failure);
7115 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7116 			 "PHY underrun", pdev->phy_underrun);
7117 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
7118 			 "MPDU is more than txop limit", pdev->txop_ovf);
7119 	*length = len;
7120 }
7121 
7122 static void
ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev * pdev,char * buf,u32 * length)7123 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
7124 				 char *buf, u32 *length)
7125 {
7126 	u32 len = *length;
7127 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7128 
7129 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
7130 			 "ath12k PDEV RX stats");
7131 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
7132 			 "====================");
7133 
7134 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7135 			 "Mid PPDU route change",
7136 			 pdev->mid_ppdu_route_change);
7137 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7138 			 "Tot. number of statuses", pdev->status_rcvd);
7139 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7140 			 "Extra frags on rings 0", pdev->r0_frags);
7141 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7142 			 "Extra frags on rings 1", pdev->r1_frags);
7143 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7144 			 "Extra frags on rings 2", pdev->r2_frags);
7145 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7146 			 "Extra frags on rings 3", pdev->r3_frags);
7147 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7148 			 "MSDUs delivered to HTT", pdev->htt_msdus);
7149 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7150 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
7151 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7152 			 "MSDUs delivered to stack", pdev->loc_msdus);
7153 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7154 			 "MPDUs delivered to stack", pdev->loc_mpdus);
7155 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7156 			 "Oversized AMSUs", pdev->oversize_amsdu);
7157 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7158 			 "PHY errors", pdev->phy_errs);
7159 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7160 			 "PHY errors drops", pdev->phy_err_drop);
7161 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
7162 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
7163 	*length = len;
7164 }
7165 
7166 static void
ath12k_wmi_fw_pdev_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,char * buf,u32 * length)7167 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
7168 			      struct ath12k_fw_stats *fw_stats,
7169 			      char *buf, u32 *length)
7170 {
7171 	const struct ath12k_fw_stats_pdev *pdev;
7172 	u32 len = *length;
7173 
7174 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
7175 					struct ath12k_fw_stats_pdev, list);
7176 	if (!pdev) {
7177 		ath12k_warn(ar->ab, "failed to get pdev stats\n");
7178 		return;
7179 	}
7180 
7181 	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
7182 					   ar->ab->fw_soc_drop_count);
7183 	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
7184 	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
7185 
7186 	*length = len;
7187 }
7188 
ath12k_wmi_fw_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,u32 stats_id,char * buf)7189 void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
7190 			      struct ath12k_fw_stats *fw_stats,
7191 			      u32 stats_id, char *buf)
7192 {
7193 	u32 len = 0;
7194 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
7195 
7196 	spin_lock_bh(&ar->data_lock);
7197 
7198 	switch (stats_id) {
7199 	case WMI_REQUEST_VDEV_STAT:
7200 		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
7201 		break;
7202 	case WMI_REQUEST_BCN_STAT:
7203 		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
7204 		break;
7205 	case WMI_REQUEST_PDEV_STAT:
7206 		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
7207 		break;
7208 	default:
7209 		break;
7210 	}
7211 
7212 	spin_unlock_bh(&ar->data_lock);
7213 
7214 	if (len >= buf_len)
7215 		buf[len - 1] = 0;
7216 	else
7217 		buf[len] = 0;
7218 
7219 	ath12k_debugfs_fw_stats_reset(ar);
7220 }
7221 
7222 static void
ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params * src,struct ath12k_fw_stats_vdev * dst)7223 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
7224 			   struct ath12k_fw_stats_vdev *dst)
7225 {
7226 	int i;
7227 
7228 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7229 	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
7230 	dst->data_snr = le32_to_cpu(src->data_snr);
7231 	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
7232 	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
7233 	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
7234 	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
7235 	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
7236 	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
7237 
7238 	for (i = 0; i < WLAN_MAX_AC; i++)
7239 		dst->num_tx_frames[i] =
7240 			le32_to_cpu(src->num_tx_frames[i]);
7241 
7242 	for (i = 0; i < WLAN_MAX_AC; i++)
7243 		dst->num_tx_frames_retries[i] =
7244 			le32_to_cpu(src->num_tx_frames_retries[i]);
7245 
7246 	for (i = 0; i < WLAN_MAX_AC; i++)
7247 		dst->num_tx_frames_failures[i] =
7248 			le32_to_cpu(src->num_tx_frames_failures[i]);
7249 
7250 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7251 		dst->tx_rate_history[i] =
7252 			le32_to_cpu(src->tx_rate_history[i]);
7253 
7254 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
7255 		dst->beacon_rssi_history[i] =
7256 			le32_to_cpu(src->beacon_rssi_history[i]);
7257 }
7258 
7259 static void
ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params * src,struct ath12k_fw_stats_bcn * dst)7260 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
7261 			  struct ath12k_fw_stats_bcn *dst)
7262 {
7263 	dst->vdev_id = le32_to_cpu(src->vdev_id);
7264 	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
7265 	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
7266 }
7267 
7268 static void
ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params * src,struct ath12k_fw_stats_pdev * dst)7269 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
7270 				struct ath12k_fw_stats_pdev *dst)
7271 {
7272 	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
7273 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
7274 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
7275 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
7276 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
7277 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
7278 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
7279 }
7280 
7281 static void
ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params * src,struct ath12k_fw_stats_pdev * dst)7282 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
7283 			      struct ath12k_fw_stats_pdev *dst)
7284 {
7285 	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
7286 	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
7287 	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
7288 	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
7289 	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
7290 	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
7291 	dst->local_freed = a_sle32_to_cpu(src->local_freed);
7292 	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
7293 	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
7294 	dst->underrun = a_sle32_to_cpu(src->underrun);
7295 	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
7296 	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
7297 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
7298 	dst->data_rc = __le32_to_cpu(src->data_rc);
7299 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
7300 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
7301 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
7302 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
7303 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
7304 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
7305 	dst->stateless_tid_alloc_failure =
7306 		__le32_to_cpu(src->stateless_tid_alloc_failure);
7307 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
7308 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
7309 }
7310 
7311 static void
ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params * src,struct ath12k_fw_stats_pdev * dst)7312 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
7313 			      struct ath12k_fw_stats_pdev *dst)
7314 {
7315 	dst->mid_ppdu_route_change =
7316 		a_sle32_to_cpu(src->mid_ppdu_route_change);
7317 	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
7318 	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
7319 	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
7320 	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
7321 	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
7322 	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
7323 	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
7324 	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
7325 	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
7326 	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
7327 	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
7328 	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
7329 	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
7330 }
7331 
ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base * ab,struct wmi_tlv_fw_stats_parse * parse,const void * ptr,u16 len)7332 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
7333 					      struct wmi_tlv_fw_stats_parse *parse,
7334 					      const void *ptr,
7335 					      u16 len)
7336 {
7337 	const struct wmi_stats_event *ev = parse->ev;
7338 	struct ath12k_fw_stats stats = {0};
7339 	struct ath12k *ar;
7340 	struct ath12k_link_vif *arvif;
7341 	struct ieee80211_sta *sta;
7342 	struct ath12k_sta *ahsta;
7343 	struct ath12k_link_sta *arsta;
7344 	int i, ret = 0;
7345 	const void *data = ptr;
7346 
7347 	INIT_LIST_HEAD(&stats.vdevs);
7348 	INIT_LIST_HEAD(&stats.bcn);
7349 	INIT_LIST_HEAD(&stats.pdevs);
7350 
7351 	if (!ev) {
7352 		ath12k_warn(ab, "failed to fetch update stats ev");
7353 		return -EPROTO;
7354 	}
7355 
7356 	rcu_read_lock();
7357 
7358 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
7359 	if (!ar) {
7360 		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
7361 			    le32_to_cpu(ev->pdev_id));
7362 		ret = -EPROTO;
7363 		goto exit;
7364 	}
7365 
7366 	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
7367 		const struct wmi_vdev_stats_params *src;
7368 		struct ath12k_fw_stats_vdev *dst;
7369 
7370 		src = data;
7371 		if (len < sizeof(*src)) {
7372 			ret = -EPROTO;
7373 			goto exit;
7374 		}
7375 
7376 		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
7377 		if (arvif) {
7378 			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
7379 							   arvif->bssid,
7380 							   NULL);
7381 			if (sta) {
7382 				ahsta = ath12k_sta_to_ahsta(sta);
7383 				arsta = &ahsta->deflink;
7384 				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
7385 				ath12k_dbg(ab, ATH12K_DBG_WMI,
7386 					   "wmi stats vdev id %d snr %d\n",
7387 					   src->vdev_id, src->beacon_snr);
7388 			} else {
7389 				ath12k_dbg(ab, ATH12K_DBG_WMI,
7390 					   "not found station bssid %pM for vdev stat\n",
7391 					   arvif->bssid);
7392 			}
7393 		}
7394 
7395 		data += sizeof(*src);
7396 		len -= sizeof(*src);
7397 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7398 		if (!dst)
7399 			continue;
7400 		ath12k_wmi_pull_vdev_stats(src, dst);
7401 		stats.stats_id = WMI_REQUEST_VDEV_STAT;
7402 		list_add_tail(&dst->list, &stats.vdevs);
7403 	}
7404 	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
7405 		const struct ath12k_wmi_bcn_stats_params *src;
7406 		struct ath12k_fw_stats_bcn *dst;
7407 
7408 		src = data;
7409 		if (len < sizeof(*src)) {
7410 			ret = -EPROTO;
7411 			goto exit;
7412 		}
7413 
7414 		data += sizeof(*src);
7415 		len -= sizeof(*src);
7416 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7417 		if (!dst)
7418 			continue;
7419 		ath12k_wmi_pull_bcn_stats(src, dst);
7420 		stats.stats_id = WMI_REQUEST_BCN_STAT;
7421 		list_add_tail(&dst->list, &stats.bcn);
7422 	}
7423 	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
7424 		const struct ath12k_wmi_pdev_stats_params *src;
7425 		struct ath12k_fw_stats_pdev *dst;
7426 
7427 		src = data;
7428 		if (len < sizeof(*src)) {
7429 			ret = -EPROTO;
7430 			goto exit;
7431 		}
7432 
7433 		stats.stats_id = WMI_REQUEST_PDEV_STAT;
7434 
7435 		data += sizeof(*src);
7436 		len -= sizeof(*src);
7437 
7438 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
7439 		if (!dst)
7440 			continue;
7441 
7442 		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
7443 		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
7444 		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
7445 		list_add_tail(&dst->list, &stats.pdevs);
7446 	}
7447 
7448 	complete(&ar->fw_stats_complete);
7449 	ath12k_debugfs_fw_stats_process(ar, &stats);
7450 exit:
7451 	rcu_read_unlock();
7452 	return ret;
7453 }
7454 
ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)7455 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
7456 					 u16 tag, u16 len,
7457 					 const void *ptr, void *data)
7458 {
7459 	struct wmi_tlv_fw_stats_parse *parse = data;
7460 	int ret = 0;
7461 
7462 	switch (tag) {
7463 	case WMI_TAG_STATS_EVENT:
7464 		parse->ev = ptr;
7465 		break;
7466 	case WMI_TAG_ARRAY_BYTE:
7467 		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
7468 		break;
7469 	default:
7470 		break;
7471 	}
7472 	return ret;
7473 }
7474 
ath12k_update_stats_event(struct ath12k_base * ab,struct sk_buff * skb)7475 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
7476 {
7477 	int ret;
7478 	struct wmi_tlv_fw_stats_parse parse = {};
7479 
7480 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7481 				  ath12k_wmi_tlv_fw_stats_parse,
7482 				  &parse);
7483 	if (ret)
7484 		ath12k_warn(ab, "failed to parse fw stats %d\n", ret);
7485 }
7486 
7487 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
7488  * is not part of BDF CTL(Conformance test limits) table entries.
7489  */
ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base * ab,struct sk_buff * skb)7490 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
7491 						 struct sk_buff *skb)
7492 {
7493 	const void **tb;
7494 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
7495 	int ret;
7496 
7497 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7498 	if (IS_ERR(tb)) {
7499 		ret = PTR_ERR(tb);
7500 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7501 		return;
7502 	}
7503 
7504 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
7505 	if (!ev) {
7506 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
7507 		kfree(tb);
7508 		return;
7509 	}
7510 
7511 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7512 		   "pdev ctl failsafe check ev status %d\n",
7513 		   ev->ctl_failsafe_status);
7514 
7515 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
7516 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
7517 	 */
7518 	if (ev->ctl_failsafe_status != 0)
7519 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
7520 			    ev->ctl_failsafe_status);
7521 
7522 	kfree(tb);
7523 }
7524 
7525 static void
ath12k_wmi_process_csa_switch_count_event(struct ath12k_base * ab,const struct ath12k_wmi_pdev_csa_event * ev,const u32 * vdev_ids)7526 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
7527 					  const struct ath12k_wmi_pdev_csa_event *ev,
7528 					  const u32 *vdev_ids)
7529 {
7530 	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
7531 	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
7532 	struct ieee80211_bss_conf *conf;
7533 	struct ath12k_link_vif *arvif;
7534 	struct ath12k_vif *ahvif;
7535 	int i;
7536 
7537 	rcu_read_lock();
7538 	for (i = 0; i < num_vdevs; i++) {
7539 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
7540 
7541 		if (!arvif) {
7542 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
7543 				    vdev_ids[i]);
7544 			continue;
7545 		}
7546 		ahvif = arvif->ahvif;
7547 
7548 		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
7549 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
7550 				    arvif->link_id);
7551 			continue;
7552 		}
7553 
7554 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
7555 		if (!conf) {
7556 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
7557 				    ahvif->vif->addr, arvif->link_id);
7558 			continue;
7559 		}
7560 
7561 		if (!arvif->is_up || !conf->csa_active)
7562 			continue;
7563 
7564 		/* Finish CSA when counter reaches zero */
7565 		if (!current_switch_count) {
7566 			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
7567 			arvif->current_cntdown_counter = 0;
7568 		} else if (current_switch_count > 1) {
7569 			/* If the count in event is not what we expect, don't update the
7570 			 * mac80211 count. Since during beacon Tx failure, count in the
7571 			 * firmware will not decrement and this event will come with the
7572 			 * previous count value again
7573 			 */
7574 			if (current_switch_count != arvif->current_cntdown_counter)
7575 				continue;
7576 
7577 			arvif->current_cntdown_counter =
7578 				ieee80211_beacon_update_cntdwn(ahvif->vif,
7579 							       arvif->link_id);
7580 		}
7581 	}
7582 	rcu_read_unlock();
7583 }
7584 
7585 static void
ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base * ab,struct sk_buff * skb)7586 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
7587 					      struct sk_buff *skb)
7588 {
7589 	const void **tb;
7590 	const struct ath12k_wmi_pdev_csa_event *ev;
7591 	const u32 *vdev_ids;
7592 	int ret;
7593 
7594 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7595 	if (IS_ERR(tb)) {
7596 		ret = PTR_ERR(tb);
7597 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7598 		return;
7599 	}
7600 
7601 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
7602 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
7603 
7604 	if (!ev || !vdev_ids) {
7605 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
7606 		kfree(tb);
7607 		return;
7608 	}
7609 
7610 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7611 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
7612 		   ev->current_switch_count, ev->pdev_id,
7613 		   ev->num_vdevs);
7614 
7615 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
7616 
7617 	kfree(tb);
7618 }
7619 
7620 static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base * ab,struct sk_buff * skb)7621 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
7622 {
7623 	const void **tb;
7624 	struct ath12k_mac_get_any_chanctx_conf_arg arg;
7625 	const struct ath12k_wmi_pdev_radar_event *ev;
7626 	struct ath12k *ar;
7627 	int ret;
7628 
7629 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7630 	if (IS_ERR(tb)) {
7631 		ret = PTR_ERR(tb);
7632 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7633 		return;
7634 	}
7635 
7636 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
7637 
7638 	if (!ev) {
7639 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
7640 		kfree(tb);
7641 		return;
7642 	}
7643 
7644 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7645 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
7646 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
7647 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
7648 		   ev->freq_offset, ev->sidx);
7649 
7650 	rcu_read_lock();
7651 
7652 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
7653 
7654 	if (!ar) {
7655 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
7656 			    ev->pdev_id);
7657 		goto exit;
7658 	}
7659 
7660 	arg.ar = ar;
7661 	arg.chanctx_conf = NULL;
7662 	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
7663 					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
7664 	if (!arg.chanctx_conf) {
7665 		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
7666 		goto exit;
7667 	}
7668 
7669 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
7670 		   ev->pdev_id);
7671 
7672 	if (ar->dfs_block_radar_events)
7673 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
7674 	else
7675 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
7676 
7677 exit:
7678 	rcu_read_unlock();
7679 
7680 	kfree(tb);
7681 }
7682 
ath12k_tm_wmi_event_segmented(struct ath12k_base * ab,u32 cmd_id,struct sk_buff * skb)7683 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
7684 					  struct sk_buff *skb)
7685 {
7686 	const struct ath12k_wmi_ftm_event *ev;
7687 	const void **tb;
7688 	int ret;
7689 	u16 length;
7690 
7691 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7692 
7693 	if (IS_ERR(tb)) {
7694 		ret = PTR_ERR(tb);
7695 		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
7696 		return;
7697 	}
7698 
7699 	ev = tb[WMI_TAG_ARRAY_BYTE];
7700 	if (!ev) {
7701 		ath12k_warn(ab, "failed to fetch ftm msg\n");
7702 		kfree(tb);
7703 		return;
7704 	}
7705 
7706 	length = skb->len - TLV_HDR_SIZE;
7707 	ath12k_tm_process_event(ab, cmd_id, ev, length);
7708 	kfree(tb);
7709 	tb = NULL;
7710 }
7711 
7712 static void
ath12k_wmi_pdev_temperature_event(struct ath12k_base * ab,struct sk_buff * skb)7713 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
7714 				  struct sk_buff *skb)
7715 {
7716 	struct ath12k *ar;
7717 	struct wmi_pdev_temperature_event ev = {0};
7718 
7719 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
7720 		ath12k_warn(ab, "failed to extract pdev temperature event");
7721 		return;
7722 	}
7723 
7724 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7725 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
7726 
7727 	rcu_read_lock();
7728 
7729 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
7730 	if (!ar) {
7731 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
7732 		goto exit;
7733 	}
7734 
7735 exit:
7736 	rcu_read_unlock();
7737 }
7738 
ath12k_fils_discovery_event(struct ath12k_base * ab,struct sk_buff * skb)7739 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
7740 					struct sk_buff *skb)
7741 {
7742 	const void **tb;
7743 	const struct wmi_fils_discovery_event *ev;
7744 	int ret;
7745 
7746 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7747 	if (IS_ERR(tb)) {
7748 		ret = PTR_ERR(tb);
7749 		ath12k_warn(ab,
7750 			    "failed to parse FILS discovery event tlv %d\n",
7751 			    ret);
7752 		return;
7753 	}
7754 
7755 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
7756 	if (!ev) {
7757 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
7758 		kfree(tb);
7759 		return;
7760 	}
7761 
7762 	ath12k_warn(ab,
7763 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
7764 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
7765 
7766 	kfree(tb);
7767 }
7768 
ath12k_probe_resp_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)7769 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
7770 					      struct sk_buff *skb)
7771 {
7772 	const void **tb;
7773 	const struct wmi_probe_resp_tx_status_event *ev;
7774 	int ret;
7775 
7776 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7777 	if (IS_ERR(tb)) {
7778 		ret = PTR_ERR(tb);
7779 		ath12k_warn(ab,
7780 			    "failed to parse probe response transmission status event tlv: %d\n",
7781 			    ret);
7782 		return;
7783 	}
7784 
7785 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
7786 	if (!ev) {
7787 		ath12k_warn(ab,
7788 			    "failed to fetch probe response transmission status event");
7789 		kfree(tb);
7790 		return;
7791 	}
7792 
7793 	if (ev->tx_status)
7794 		ath12k_warn(ab,
7795 			    "Probe response transmission failed for vdev_id %u, status %u\n",
7796 			    ev->vdev_id, ev->tx_status);
7797 
7798 	kfree(tb);
7799 }
7800 
ath12k_wmi_p2p_noa_event(struct ath12k_base * ab,struct sk_buff * skb)7801 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
7802 				    struct sk_buff *skb)
7803 {
7804 	const void **tb;
7805 	const struct wmi_p2p_noa_event *ev;
7806 	const struct ath12k_wmi_p2p_noa_info *noa;
7807 	struct ath12k *ar;
7808 	int ret, vdev_id;
7809 
7810 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7811 	if (IS_ERR(tb)) {
7812 		ret = PTR_ERR(tb);
7813 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
7814 		return ret;
7815 	}
7816 
7817 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
7818 	noa = tb[WMI_TAG_P2P_NOA_INFO];
7819 
7820 	if (!ev || !noa) {
7821 		ret = -EPROTO;
7822 		goto out;
7823 	}
7824 
7825 	vdev_id = __le32_to_cpu(ev->vdev_id);
7826 
7827 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7828 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
7829 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
7830 
7831 	rcu_read_lock();
7832 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
7833 	if (!ar) {
7834 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
7835 			    vdev_id);
7836 		ret = -EINVAL;
7837 		goto unlock;
7838 	}
7839 
7840 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
7841 
7842 	ret = 0;
7843 
7844 unlock:
7845 	rcu_read_unlock();
7846 out:
7847 	kfree(tb);
7848 	return ret;
7849 }
7850 
ath12k_rfkill_state_change_event(struct ath12k_base * ab,struct sk_buff * skb)7851 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
7852 					     struct sk_buff *skb)
7853 {
7854 	const struct wmi_rfkill_state_change_event *ev;
7855 	const void **tb;
7856 	int ret;
7857 
7858 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7859 	if (IS_ERR(tb)) {
7860 		ret = PTR_ERR(tb);
7861 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7862 		return;
7863 	}
7864 
7865 	ev = tb[WMI_TAG_RFKILL_EVENT];
7866 	if (!ev) {
7867 		kfree(tb);
7868 		return;
7869 	}
7870 
7871 	ath12k_dbg(ab, ATH12K_DBG_MAC,
7872 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
7873 		   le32_to_cpu(ev->gpio_pin_num),
7874 		   le32_to_cpu(ev->int_type),
7875 		   le32_to_cpu(ev->radio_state));
7876 
7877 	spin_lock_bh(&ab->base_lock);
7878 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
7879 	spin_unlock_bh(&ab->base_lock);
7880 
7881 	queue_work(ab->workqueue, &ab->rfkill_work);
7882 	kfree(tb);
7883 }
7884 
7885 static void
ath12k_wmi_diag_event(struct ath12k_base * ab,struct sk_buff * skb)7886 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
7887 {
7888 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
7889 }
7890 
ath12k_wmi_twt_enable_event(struct ath12k_base * ab,struct sk_buff * skb)7891 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
7892 					struct sk_buff *skb)
7893 {
7894 	const void **tb;
7895 	const struct wmi_twt_enable_event *ev;
7896 	int ret;
7897 
7898 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7899 	if (IS_ERR(tb)) {
7900 		ret = PTR_ERR(tb);
7901 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
7902 			    ret);
7903 		return;
7904 	}
7905 
7906 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
7907 	if (!ev) {
7908 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
7909 		goto exit;
7910 	}
7911 
7912 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
7913 		   le32_to_cpu(ev->pdev_id),
7914 		   le32_to_cpu(ev->status));
7915 
7916 exit:
7917 	kfree(tb);
7918 }
7919 
ath12k_wmi_twt_disable_event(struct ath12k_base * ab,struct sk_buff * skb)7920 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
7921 					 struct sk_buff *skb)
7922 {
7923 	const void **tb;
7924 	const struct wmi_twt_disable_event *ev;
7925 	int ret;
7926 
7927 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7928 	if (IS_ERR(tb)) {
7929 		ret = PTR_ERR(tb);
7930 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
7931 			    ret);
7932 		return;
7933 	}
7934 
7935 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
7936 	if (!ev) {
7937 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
7938 		goto exit;
7939 	}
7940 
7941 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
7942 		   le32_to_cpu(ev->pdev_id),
7943 		   le32_to_cpu(ev->status));
7944 
7945 exit:
7946 	kfree(tb);
7947 }
7948 
ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)7949 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
7950 					    u16 tag, u16 len,
7951 					    const void *ptr, void *data)
7952 {
7953 	const struct wmi_wow_ev_pg_fault_param *pf_param;
7954 	const struct wmi_wow_ev_param *param;
7955 	struct wmi_wow_ev_arg *arg = data;
7956 	int pf_len;
7957 
7958 	switch (tag) {
7959 	case WMI_TAG_WOW_EVENT_INFO:
7960 		param = ptr;
7961 		arg->wake_reason = le32_to_cpu(param->wake_reason);
7962 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
7963 			   arg->wake_reason, wow_reason(arg->wake_reason));
7964 		break;
7965 
7966 	case WMI_TAG_ARRAY_BYTE:
7967 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
7968 			pf_param = ptr;
7969 			pf_len = le32_to_cpu(pf_param->len);
7970 			if (pf_len > len - sizeof(pf_len) ||
7971 			    pf_len < 0) {
7972 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
7973 					    pf_len);
7974 				return -EINVAL;
7975 			}
7976 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
7977 				   pf_len);
7978 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
7979 					"wow_reason_page_fault packet present",
7980 					"wow_pg_fault ",
7981 					pf_param->data,
7982 					pf_len);
7983 		}
7984 		break;
7985 	default:
7986 		break;
7987 	}
7988 
7989 	return 0;
7990 }
7991 
ath12k_wmi_event_wow_wakeup_host(struct ath12k_base * ab,struct sk_buff * skb)7992 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
7993 {
7994 	struct wmi_wow_ev_arg arg = { };
7995 	int ret;
7996 
7997 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7998 				  ath12k_wmi_wow_wakeup_host_parse,
7999 				  &arg);
8000 	if (ret) {
8001 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
8002 			    ret);
8003 		return;
8004 	}
8005 
8006 	complete(&ab->wow.wakeup_completed);
8007 }
8008 
ath12k_wmi_gtk_offload_status_event(struct ath12k_base * ab,struct sk_buff * skb)8009 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
8010 						struct sk_buff *skb)
8011 {
8012 	const struct wmi_gtk_offload_status_event *ev;
8013 	struct ath12k_link_vif *arvif;
8014 	__be64 replay_ctr_be;
8015 	u64 replay_ctr;
8016 	const void **tb;
8017 	int ret;
8018 
8019 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8020 	if (IS_ERR(tb)) {
8021 		ret = PTR_ERR(tb);
8022 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8023 		return;
8024 	}
8025 
8026 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
8027 	if (!ev) {
8028 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
8029 		kfree(tb);
8030 		return;
8031 	}
8032 
8033 	rcu_read_lock();
8034 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
8035 	if (!arvif) {
8036 		rcu_read_unlock();
8037 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
8038 			    le32_to_cpu(ev->vdev_id));
8039 		kfree(tb);
8040 		return;
8041 	}
8042 
8043 	replay_ctr = le64_to_cpu(ev->replay_ctr);
8044 	arvif->rekey_data.replay_ctr = replay_ctr;
8045 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
8046 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
8047 
8048 	/* supplicant expects big-endian replay counter */
8049 	replay_ctr_be = cpu_to_be64(replay_ctr);
8050 
8051 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
8052 				   (void *)&replay_ctr_be, GFP_ATOMIC);
8053 
8054 	rcu_read_unlock();
8055 
8056 	kfree(tb);
8057 }
8058 
ath12k_wmi_event_mlo_setup_complete(struct ath12k_base * ab,struct sk_buff * skb)8059 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
8060 						struct sk_buff *skb)
8061 {
8062 	const struct wmi_mlo_setup_complete_event *ev;
8063 	struct ath12k *ar = NULL;
8064 	struct ath12k_pdev *pdev;
8065 	const void **tb;
8066 	int ret, i;
8067 
8068 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8069 	if (IS_ERR(tb)) {
8070 		ret = PTR_ERR(tb);
8071 		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
8072 			    ret);
8073 		return;
8074 	}
8075 
8076 	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
8077 	if (!ev) {
8078 		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
8079 		kfree(tb);
8080 		return;
8081 	}
8082 
8083 	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
8084 		goto skip_lookup;
8085 
8086 	for (i = 0; i < ab->num_radios; i++) {
8087 		pdev = &ab->pdevs[i];
8088 		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
8089 			ar = pdev->ar;
8090 			break;
8091 		}
8092 	}
8093 
8094 skip_lookup:
8095 	if (!ar) {
8096 		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
8097 			    ev->pdev_id, ev->status);
8098 		goto out;
8099 	}
8100 
8101 	ar->mlo_setup_status = le32_to_cpu(ev->status);
8102 	complete(&ar->mlo_setup_done);
8103 
8104 out:
8105 	kfree(tb);
8106 }
8107 
ath12k_wmi_event_teardown_complete(struct ath12k_base * ab,struct sk_buff * skb)8108 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
8109 					       struct sk_buff *skb)
8110 {
8111 	const struct wmi_mlo_teardown_complete_event *ev;
8112 	const void **tb;
8113 	int ret;
8114 
8115 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8116 	if (IS_ERR(tb)) {
8117 		ret = PTR_ERR(tb);
8118 		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
8119 		return;
8120 	}
8121 
8122 	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
8123 	if (!ev) {
8124 		ath12k_warn(ab, "failed to fetch teardown complete event\n");
8125 		kfree(tb);
8126 		return;
8127 	}
8128 
8129 	kfree(tb);
8130 }
8131 
8132 #ifdef CONFIG_ATH12K_DEBUGFS
ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base * ab,const void * ptr,u16 tag,u16 len,struct wmi_tpc_stats_arg * tpc_stats)8133 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
8134 					    const void *ptr, u16 tag, u16 len,
8135 					    struct wmi_tpc_stats_arg *tpc_stats)
8136 {
8137 	u32 len1, len2, len3, len4;
8138 	s16 *dst_ptr;
8139 	s8 *dst_ptr_ctl;
8140 
8141 	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
8142 	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
8143 	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
8144 	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
8145 
8146 	switch (tpc_stats->event_count) {
8147 	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
8148 		if (len1 > len)
8149 			return -ENOBUFS;
8150 
8151 		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
8152 			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
8153 			memcpy(dst_ptr, ptr, len1);
8154 		}
8155 		break;
8156 	case ATH12K_TPC_STATS_RATES_EVENT1:
8157 		if (len2 > len)
8158 			return -ENOBUFS;
8159 
8160 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
8161 			dst_ptr = tpc_stats->rates_array1.rate_array;
8162 			memcpy(dst_ptr, ptr, len2);
8163 		}
8164 		break;
8165 	case ATH12K_TPC_STATS_RATES_EVENT2:
8166 		if (len3 > len)
8167 			return -ENOBUFS;
8168 
8169 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
8170 			dst_ptr = tpc_stats->rates_array2.rate_array;
8171 			memcpy(dst_ptr, ptr, len3);
8172 		}
8173 		break;
8174 	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
8175 		if (len4 > len)
8176 			return -ENOBUFS;
8177 
8178 		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
8179 			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
8180 			memcpy(dst_ptr_ctl, ptr, len4);
8181 		}
8182 		break;
8183 	}
8184 	return 0;
8185 }
8186 
ath12k_tpc_get_reg_pwr(struct ath12k_base * ab,struct wmi_tpc_stats_arg * tpc_stats,struct wmi_max_reg_power_fixed_params * ev)8187 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
8188 				  struct wmi_tpc_stats_arg *tpc_stats,
8189 				  struct wmi_max_reg_power_fixed_params *ev)
8190 {
8191 	struct wmi_max_reg_power_allowed_arg *reg_pwr;
8192 	u32 total_size;
8193 
8194 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8195 		   "Received reg power array type %d length %d for tpc stats\n",
8196 		   ev->reg_power_type, ev->reg_array_len);
8197 
8198 	switch (le32_to_cpu(ev->reg_power_type)) {
8199 	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
8200 		reg_pwr = &tpc_stats->max_reg_allowed_power;
8201 		break;
8202 	default:
8203 		return -EINVAL;
8204 	}
8205 
8206 	/* Each entry is 2 byte hence multiplying the indices with 2 */
8207 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
8208 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
8209 	if (le32_to_cpu(ev->reg_array_len) != total_size) {
8210 		ath12k_warn(ab,
8211 			    "Total size and reg_array_len doesn't match for tpc stats\n");
8212 		return -EINVAL;
8213 	}
8214 
8215 	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
8216 
8217 	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
8218 					 GFP_ATOMIC);
8219 	if (!reg_pwr->reg_pwr_array)
8220 		return -ENOMEM;
8221 
8222 	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
8223 
8224 	return 0;
8225 }
8226 
ath12k_tpc_get_rate_array(struct ath12k_base * ab,struct wmi_tpc_stats_arg * tpc_stats,struct wmi_tpc_rates_array_fixed_params * ev)8227 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
8228 				     struct wmi_tpc_stats_arg *tpc_stats,
8229 				     struct wmi_tpc_rates_array_fixed_params *ev)
8230 {
8231 	struct wmi_tpc_rates_array_arg *rates_array;
8232 	u32 flag = 0, rate_array_len;
8233 
8234 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8235 		   "Received rates array type %d length %d for tpc stats\n",
8236 		   ev->rate_array_type, ev->rate_array_len);
8237 
8238 	switch (le32_to_cpu(ev->rate_array_type)) {
8239 	case ATH12K_TPC_STATS_RATES_ARRAY1:
8240 		rates_array = &tpc_stats->rates_array1;
8241 		flag = WMI_TPC_RATES_ARRAY1;
8242 		break;
8243 	case ATH12K_TPC_STATS_RATES_ARRAY2:
8244 		rates_array = &tpc_stats->rates_array2;
8245 		flag = WMI_TPC_RATES_ARRAY2;
8246 		break;
8247 	default:
8248 		ath12k_warn(ab,
8249 			    "Received invalid type of rates array for tpc stats\n");
8250 		return -EINVAL;
8251 	}
8252 	memcpy(&rates_array->tpc_rates_array, ev,
8253 	       sizeof(struct wmi_tpc_rates_array_fixed_params));
8254 	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
8255 	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
8256 	if (!rates_array->rate_array)
8257 		return -ENOMEM;
8258 
8259 	tpc_stats->tlvs_rcvd |= flag;
8260 	return 0;
8261 }
8262 
ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base * ab,struct wmi_tpc_stats_arg * tpc_stats,struct wmi_tpc_ctl_pwr_fixed_params * ev)8263 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
8264 				      struct wmi_tpc_stats_arg *tpc_stats,
8265 				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
8266 {
8267 	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
8268 	u32 total_size, ctl_array_len, flag = 0;
8269 
8270 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8271 		   "Received ctl array type %d length %d for tpc stats\n",
8272 		   ev->ctl_array_type, ev->ctl_array_len);
8273 
8274 	switch (le32_to_cpu(ev->ctl_array_type)) {
8275 	case ATH12K_TPC_STATS_CTL_ARRAY:
8276 		ctl_array = &tpc_stats->ctl_array;
8277 		flag = WMI_TPC_CTL_PWR_ARRAY;
8278 		break;
8279 	default:
8280 		ath12k_warn(ab,
8281 			    "Received invalid type of ctl pwr table for tpc stats\n");
8282 		return -EINVAL;
8283 	}
8284 
8285 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
8286 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
8287 	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
8288 		ath12k_warn(ab,
8289 			    "Total size and ctl_array_len doesn't match for tpc stats\n");
8290 		return -EINVAL;
8291 	}
8292 
8293 	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
8294 	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
8295 	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
8296 	if (!ctl_array->ctl_pwr_table)
8297 		return -ENOMEM;
8298 
8299 	tpc_stats->tlvs_rcvd |= flag;
8300 	return 0;
8301 }
8302 
ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)8303 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
8304 					      u16 tag, u16 len,
8305 					      const void *ptr, void *data)
8306 {
8307 	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
8308 	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
8309 	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
8310 	struct wmi_tpc_stats_arg *tpc_stats = data;
8311 	struct wmi_tpc_config_params *tpc_config;
8312 	int ret = 0;
8313 
8314 	if (!tpc_stats) {
8315 		ath12k_warn(ab, "tpc stats memory unavailable\n");
8316 		return -EINVAL;
8317 	}
8318 
8319 	switch (tag) {
8320 	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
8321 		tpc_config = (struct wmi_tpc_config_params *)ptr;
8322 		memcpy(&tpc_stats->tpc_config, tpc_config,
8323 		       sizeof(struct wmi_tpc_config_params));
8324 		break;
8325 	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
8326 		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
8327 		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
8328 		break;
8329 	case WMI_TAG_TPC_STATS_RATES_ARRAY:
8330 		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
8331 		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
8332 		break;
8333 	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
8334 		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
8335 		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
8336 		break;
8337 	default:
8338 		ath12k_warn(ab,
8339 			    "Received invalid tag for tpc stats in subtlvs\n");
8340 		return -EINVAL;
8341 	}
8342 	return ret;
8343 }
8344 
ath12k_wmi_tpc_stats_event_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)8345 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
8346 					     u16 tag, u16 len,
8347 					     const void *ptr, void *data)
8348 {
8349 	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
8350 	int ret;
8351 
8352 	switch (tag) {
8353 	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
8354 		ret = 0;
8355 		/* Fixed param is already processed*/
8356 		break;
8357 	case WMI_TAG_ARRAY_STRUCT:
8358 		/* len 0 is expected for array of struct when there
8359 		 * is no content of that type to pack inside that tlv
8360 		 */
8361 		if (len == 0)
8362 			return 0;
8363 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
8364 					  ath12k_wmi_tpc_stats_subtlv_parser,
8365 					  tpc_stats);
8366 		break;
8367 	case WMI_TAG_ARRAY_INT16:
8368 		if (len == 0)
8369 			return 0;
8370 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
8371 						       WMI_TAG_ARRAY_INT16,
8372 						       len, tpc_stats);
8373 		break;
8374 	case WMI_TAG_ARRAY_BYTE:
8375 		if (len == 0)
8376 			return 0;
8377 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
8378 						       WMI_TAG_ARRAY_BYTE,
8379 						       len, tpc_stats);
8380 		break;
8381 	default:
8382 		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
8383 		ret = -EINVAL;
8384 		break;
8385 	}
8386 	return ret;
8387 }
8388 
ath12k_wmi_free_tpc_stats_mem(struct ath12k * ar)8389 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
8390 {
8391 	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
8392 
8393 	lockdep_assert_held(&ar->data_lock);
8394 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
8395 	if (tpc_stats) {
8396 		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
8397 		kfree(tpc_stats->rates_array1.rate_array);
8398 		kfree(tpc_stats->rates_array2.rate_array);
8399 		kfree(tpc_stats->ctl_array.ctl_pwr_table);
8400 		kfree(tpc_stats);
8401 		ar->debug.tpc_stats = NULL;
8402 	}
8403 }
8404 
ath12k_wmi_process_tpc_stats(struct ath12k_base * ab,struct sk_buff * skb)8405 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
8406 					 struct sk_buff *skb)
8407 {
8408 	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
8409 	struct wmi_tpc_stats_arg *tpc_stats;
8410 	const struct wmi_tlv *tlv;
8411 	void *ptr = skb->data;
8412 	struct ath12k *ar;
8413 	u16 tlv_tag;
8414 	u32 event_count;
8415 	int ret;
8416 
8417 	if (!skb->data) {
8418 		ath12k_warn(ab, "No data present in tpc stats event\n");
8419 		return;
8420 	}
8421 
8422 	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
8423 		ath12k_warn(ab, "TPC stats event size invalid\n");
8424 		return;
8425 	}
8426 
8427 	tlv = (struct wmi_tlv *)ptr;
8428 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
8429 	ptr += sizeof(*tlv);
8430 
8431 	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
8432 		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
8433 		return;
8434 	}
8435 
8436 	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
8437 	rcu_read_lock();
8438 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
8439 	if (!ar) {
8440 		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
8441 		rcu_read_unlock();
8442 		return;
8443 	}
8444 	spin_lock_bh(&ar->data_lock);
8445 	if (!ar->debug.tpc_request) {
8446 		/* Event is received either without request or the
8447 		 * timeout, if memory is already allocated free it
8448 		 */
8449 		if (ar->debug.tpc_stats) {
8450 			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
8451 			ath12k_wmi_free_tpc_stats_mem(ar);
8452 		}
8453 		goto unlock;
8454 	}
8455 
8456 	event_count = le32_to_cpu(fixed_param->event_count);
8457 	if (event_count == 0) {
8458 		if (ar->debug.tpc_stats) {
8459 			ath12k_warn(ab,
8460 				    "Invalid tpc memory present\n");
8461 			goto unlock;
8462 		}
8463 		ar->debug.tpc_stats =
8464 			kzalloc(sizeof(struct wmi_tpc_stats_arg),
8465 				GFP_ATOMIC);
8466 		if (!ar->debug.tpc_stats) {
8467 			ath12k_warn(ab,
8468 				    "Failed to allocate memory for tpc stats\n");
8469 			goto unlock;
8470 		}
8471 	}
8472 
8473 	tpc_stats = ar->debug.tpc_stats;
8474 	if (!tpc_stats) {
8475 		ath12k_warn(ab, "tpc stats memory unavailable\n");
8476 		goto unlock;
8477 	}
8478 
8479 	if (!(event_count == 0)) {
8480 		if (event_count != tpc_stats->event_count + 1) {
8481 			ath12k_warn(ab,
8482 				    "Invalid tpc event received\n");
8483 			goto unlock;
8484 		}
8485 	}
8486 	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
8487 	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
8488 	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
8489 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8490 		   "tpc stats event_count %d\n",
8491 		   tpc_stats->event_count);
8492 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8493 				  ath12k_wmi_tpc_stats_event_parser,
8494 				  tpc_stats);
8495 	if (ret) {
8496 		ath12k_wmi_free_tpc_stats_mem(ar);
8497 		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
8498 		goto unlock;
8499 	}
8500 
8501 	if (tpc_stats->end_of_event)
8502 		complete(&ar->debug.tpc_complete);
8503 
8504 unlock:
8505 	spin_unlock_bh(&ar->data_lock);
8506 	rcu_read_unlock();
8507 }
8508 #else
ath12k_wmi_process_tpc_stats(struct ath12k_base * ab,struct sk_buff * skb)8509 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
8510 					 struct sk_buff *skb)
8511 {
8512 }
8513 #endif
8514 
ath12k_wmi_op_rx(struct ath12k_base * ab,struct sk_buff * skb)8515 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
8516 {
8517 	struct wmi_cmd_hdr *cmd_hdr;
8518 	enum wmi_tlv_event_id id;
8519 
8520 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
8521 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
8522 
8523 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
8524 		goto out;
8525 
8526 	switch (id) {
8527 		/* Process all the WMI events here */
8528 	case WMI_SERVICE_READY_EVENTID:
8529 		ath12k_service_ready_event(ab, skb);
8530 		break;
8531 	case WMI_SERVICE_READY_EXT_EVENTID:
8532 		ath12k_service_ready_ext_event(ab, skb);
8533 		break;
8534 	case WMI_SERVICE_READY_EXT2_EVENTID:
8535 		ath12k_service_ready_ext2_event(ab, skb);
8536 		break;
8537 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
8538 		ath12k_reg_chan_list_event(ab, skb);
8539 		break;
8540 	case WMI_READY_EVENTID:
8541 		ath12k_ready_event(ab, skb);
8542 		break;
8543 	case WMI_PEER_DELETE_RESP_EVENTID:
8544 		ath12k_peer_delete_resp_event(ab, skb);
8545 		break;
8546 	case WMI_VDEV_START_RESP_EVENTID:
8547 		ath12k_vdev_start_resp_event(ab, skb);
8548 		break;
8549 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
8550 		ath12k_bcn_tx_status_event(ab, skb);
8551 		break;
8552 	case WMI_VDEV_STOPPED_EVENTID:
8553 		ath12k_vdev_stopped_event(ab, skb);
8554 		break;
8555 	case WMI_MGMT_RX_EVENTID:
8556 		ath12k_mgmt_rx_event(ab, skb);
8557 		/* mgmt_rx_event() owns the skb now! */
8558 		return;
8559 	case WMI_MGMT_TX_COMPLETION_EVENTID:
8560 		ath12k_mgmt_tx_compl_event(ab, skb);
8561 		break;
8562 	case WMI_SCAN_EVENTID:
8563 		ath12k_scan_event(ab, skb);
8564 		break;
8565 	case WMI_PEER_STA_KICKOUT_EVENTID:
8566 		ath12k_peer_sta_kickout_event(ab, skb);
8567 		break;
8568 	case WMI_ROAM_EVENTID:
8569 		ath12k_roam_event(ab, skb);
8570 		break;
8571 	case WMI_CHAN_INFO_EVENTID:
8572 		ath12k_chan_info_event(ab, skb);
8573 		break;
8574 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
8575 		ath12k_pdev_bss_chan_info_event(ab, skb);
8576 		break;
8577 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
8578 		ath12k_vdev_install_key_compl_event(ab, skb);
8579 		break;
8580 	case WMI_SERVICE_AVAILABLE_EVENTID:
8581 		ath12k_service_available_event(ab, skb);
8582 		break;
8583 	case WMI_PEER_ASSOC_CONF_EVENTID:
8584 		ath12k_peer_assoc_conf_event(ab, skb);
8585 		break;
8586 	case WMI_UPDATE_STATS_EVENTID:
8587 		ath12k_update_stats_event(ab, skb);
8588 		break;
8589 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
8590 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
8591 		break;
8592 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
8593 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
8594 		break;
8595 	case WMI_PDEV_TEMPERATURE_EVENTID:
8596 		ath12k_wmi_pdev_temperature_event(ab, skb);
8597 		break;
8598 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
8599 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
8600 		break;
8601 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
8602 		ath12k_fils_discovery_event(ab, skb);
8603 		break;
8604 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
8605 		ath12k_probe_resp_tx_status_event(ab, skb);
8606 		break;
8607 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
8608 		ath12k_rfkill_state_change_event(ab, skb);
8609 		break;
8610 	case WMI_TWT_ENABLE_EVENTID:
8611 		ath12k_wmi_twt_enable_event(ab, skb);
8612 		break;
8613 	case WMI_TWT_DISABLE_EVENTID:
8614 		ath12k_wmi_twt_disable_event(ab, skb);
8615 		break;
8616 	case WMI_P2P_NOA_EVENTID:
8617 		ath12k_wmi_p2p_noa_event(ab, skb);
8618 		break;
8619 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
8620 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
8621 		break;
8622 	case WMI_VDEV_DELETE_RESP_EVENTID:
8623 		ath12k_vdev_delete_resp_event(ab, skb);
8624 		break;
8625 	case WMI_DIAG_EVENTID:
8626 		ath12k_wmi_diag_event(ab, skb);
8627 		break;
8628 	case WMI_WOW_WAKEUP_HOST_EVENTID:
8629 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
8630 		break;
8631 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
8632 		ath12k_wmi_gtk_offload_status_event(ab, skb);
8633 		break;
8634 	case WMI_MLO_SETUP_COMPLETE_EVENTID:
8635 		ath12k_wmi_event_mlo_setup_complete(ab, skb);
8636 		break;
8637 	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
8638 		ath12k_wmi_event_teardown_complete(ab, skb);
8639 		break;
8640 	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
8641 		ath12k_wmi_process_tpc_stats(ab, skb);
8642 		break;
8643 	/* add Unsupported events (rare) here */
8644 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
8645 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
8646 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
8647 		ath12k_dbg(ab, ATH12K_DBG_WMI,
8648 			   "ignoring unsupported event 0x%x\n", id);
8649 		break;
8650 	/* add Unsupported events (frequent) here */
8651 	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
8652 	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
8653 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
8654 		/* debug might flood hence silently ignore (no-op) */
8655 		break;
8656 	case WMI_PDEV_UTF_EVENTID:
8657 		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
8658 			ath12k_tm_wmi_event_segmented(ab, id, skb);
8659 		else
8660 			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
8661 		break;
8662 	default:
8663 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
8664 		break;
8665 	}
8666 
8667 out:
8668 	dev_kfree_skb(skb);
8669 }
8670 
ath12k_connect_pdev_htc_service(struct ath12k_base * ab,u32 pdev_idx)8671 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
8672 					   u32 pdev_idx)
8673 {
8674 	int status;
8675 	static const u32 svc_id[] = {
8676 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
8677 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
8678 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
8679 	};
8680 	struct ath12k_htc_svc_conn_req conn_req = {};
8681 	struct ath12k_htc_svc_conn_resp conn_resp = {};
8682 
8683 	/* these fields are the same for all service endpoints */
8684 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
8685 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
8686 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
8687 
8688 	/* connect to control service */
8689 	conn_req.service_id = svc_id[pdev_idx];
8690 
8691 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
8692 	if (status) {
8693 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
8694 			    status);
8695 		return status;
8696 	}
8697 
8698 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
8699 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
8700 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
8701 
8702 	return 0;
8703 }
8704 
8705 static int
ath12k_wmi_send_unit_test_cmd(struct ath12k * ar,struct wmi_unit_test_cmd ut_cmd,u32 * test_args)8706 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
8707 			      struct wmi_unit_test_cmd ut_cmd,
8708 			      u32 *test_args)
8709 {
8710 	struct ath12k_wmi_pdev *wmi = ar->wmi;
8711 	struct wmi_unit_test_cmd *cmd;
8712 	struct sk_buff *skb;
8713 	struct wmi_tlv *tlv;
8714 	void *ptr;
8715 	u32 *ut_cmd_args;
8716 	int buf_len, arg_len;
8717 	int ret;
8718 	int i;
8719 
8720 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
8721 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
8722 
8723 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
8724 	if (!skb)
8725 		return -ENOMEM;
8726 
8727 	cmd = (struct wmi_unit_test_cmd *)skb->data;
8728 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
8729 						 sizeof(ut_cmd));
8730 
8731 	cmd->vdev_id = ut_cmd.vdev_id;
8732 	cmd->module_id = ut_cmd.module_id;
8733 	cmd->num_args = ut_cmd.num_args;
8734 	cmd->diag_token = ut_cmd.diag_token;
8735 
8736 	ptr = skb->data + sizeof(ut_cmd);
8737 
8738 	tlv = ptr;
8739 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
8740 
8741 	ptr += TLV_HDR_SIZE;
8742 
8743 	ut_cmd_args = ptr;
8744 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
8745 		ut_cmd_args[i] = test_args[i];
8746 
8747 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8748 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
8749 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
8750 		   cmd->diag_token);
8751 
8752 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
8753 
8754 	if (ret) {
8755 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
8756 			    ret);
8757 		dev_kfree_skb(skb);
8758 	}
8759 
8760 	return ret;
8761 }
8762 
ath12k_wmi_simulate_radar(struct ath12k * ar)8763 int ath12k_wmi_simulate_radar(struct ath12k *ar)
8764 {
8765 	struct ath12k_link_vif *arvif;
8766 	u32 dfs_args[DFS_MAX_TEST_ARGS];
8767 	struct wmi_unit_test_cmd wmi_ut;
8768 	bool arvif_found = false;
8769 
8770 	list_for_each_entry(arvif, &ar->arvifs, list) {
8771 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
8772 			arvif_found = true;
8773 			break;
8774 		}
8775 	}
8776 
8777 	if (!arvif_found)
8778 		return -EINVAL;
8779 
8780 	dfs_args[DFS_TEST_CMDID] = 0;
8781 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
8782 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
8783 	 * freq offset (b3 - b10) to unit test. For simulation
8784 	 * purpose this can be set to 0 which is valid.
8785 	 */
8786 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
8787 
8788 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
8789 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
8790 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
8791 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
8792 
8793 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
8794 
8795 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
8796 }
8797 
ath12k_wmi_send_tpc_stats_request(struct ath12k * ar,enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)8798 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
8799 				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
8800 {
8801 	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
8802 	struct ath12k_wmi_pdev *wmi = ar->wmi;
8803 	struct sk_buff *skb;
8804 	struct wmi_tlv *tlv;
8805 	__le32 *pdev_id;
8806 	u32 buf_len;
8807 	void *ptr;
8808 	int ret;
8809 
8810 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
8811 
8812 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
8813 	if (!skb)
8814 		return -ENOMEM;
8815 	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
8816 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
8817 						 sizeof(*cmd));
8818 
8819 	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
8820 	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
8821 	cmd->subid = cpu_to_le32(tpc_stats_type);
8822 
8823 	ptr = skb->data + sizeof(*cmd);
8824 
8825 	/* The below TLV arrays optionally follow this fixed param TLV structure
8826 	 * 1. ARRAY_UINT32 pdev_ids[]
8827 	 *      If this array is present and non-zero length, stats should only
8828 	 *      be provided from the pdevs identified in the array.
8829 	 * 2. ARRAY_UNIT32 vdev_ids[]
8830 	 *      If this array is present and non-zero length, stats should only
8831 	 *      be provided from the vdevs identified in the array.
8832 	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
8833 	 *      If this array is present and non-zero length, stats should only
8834 	 *      be provided from the peers with the MAC addresses specified
8835 	 *      in the array
8836 	 */
8837 	tlv = ptr;
8838 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
8839 	ptr += TLV_HDR_SIZE;
8840 
8841 	pdev_id = ptr;
8842 	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
8843 	ptr += sizeof(*pdev_id);
8844 
8845 	tlv = ptr;
8846 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
8847 	ptr += TLV_HDR_SIZE;
8848 
8849 	tlv = ptr;
8850 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
8851 	ptr += TLV_HDR_SIZE;
8852 
8853 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
8854 	if (ret) {
8855 		ath12k_warn(ar->ab,
8856 			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
8857 		dev_kfree_skb(skb);
8858 		return ret;
8859 	}
8860 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
8861 		   ar->pdev->pdev_id);
8862 
8863 	return ret;
8864 }
8865 
ath12k_wmi_connect(struct ath12k_base * ab)8866 int ath12k_wmi_connect(struct ath12k_base *ab)
8867 {
8868 	u32 i;
8869 	u8 wmi_ep_count;
8870 
8871 	wmi_ep_count = ab->htc.wmi_ep_count;
8872 	if (wmi_ep_count > ab->hw_params->max_radios)
8873 		return -1;
8874 
8875 	for (i = 0; i < wmi_ep_count; i++)
8876 		ath12k_connect_pdev_htc_service(ab, i);
8877 
8878 	return 0;
8879 }
8880 
ath12k_wmi_pdev_detach(struct ath12k_base * ab,u8 pdev_id)8881 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
8882 {
8883 	if (WARN_ON(pdev_id >= MAX_RADIOS))
8884 		return;
8885 
8886 	/* TODO: Deinit any pdev specific wmi resource */
8887 }
8888 
ath12k_wmi_pdev_attach(struct ath12k_base * ab,u8 pdev_id)8889 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
8890 			   u8 pdev_id)
8891 {
8892 	struct ath12k_wmi_pdev *wmi_handle;
8893 
8894 	if (pdev_id >= ab->hw_params->max_radios)
8895 		return -EINVAL;
8896 
8897 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
8898 
8899 	wmi_handle->wmi_ab = &ab->wmi_ab;
8900 
8901 	ab->wmi_ab.ab = ab;
8902 	/* TODO: Init remaining resource specific to pdev */
8903 
8904 	return 0;
8905 }
8906 
ath12k_wmi_attach(struct ath12k_base * ab)8907 int ath12k_wmi_attach(struct ath12k_base *ab)
8908 {
8909 	int ret;
8910 
8911 	ret = ath12k_wmi_pdev_attach(ab, 0);
8912 	if (ret)
8913 		return ret;
8914 
8915 	ab->wmi_ab.ab = ab;
8916 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
8917 
8918 	/* It's overwritten when service_ext_ready is handled */
8919 	if (ab->hw_params->single_pdev_only)
8920 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
8921 
8922 	/* TODO: Init remaining wmi soc resources required */
8923 	init_completion(&ab->wmi_ab.service_ready);
8924 	init_completion(&ab->wmi_ab.unified_ready);
8925 
8926 	return 0;
8927 }
8928 
ath12k_wmi_detach(struct ath12k_base * ab)8929 void ath12k_wmi_detach(struct ath12k_base *ab)
8930 {
8931 	int i;
8932 
8933 	/* TODO: Deinit wmi resource specific to SOC as required */
8934 
8935 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
8936 		ath12k_wmi_pdev_detach(ab, i);
8937 
8938 	ath12k_wmi_free_dbring_caps(ab);
8939 }
8940 
ath12k_wmi_hw_data_filter_cmd(struct ath12k * ar,struct wmi_hw_data_filter_arg * arg)8941 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
8942 {
8943 	struct wmi_hw_data_filter_cmd *cmd;
8944 	struct sk_buff *skb;
8945 	int len;
8946 
8947 	len = sizeof(*cmd);
8948 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8949 
8950 	if (!skb)
8951 		return -ENOMEM;
8952 
8953 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
8954 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
8955 						 sizeof(*cmd));
8956 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
8957 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
8958 
8959 	/* Set all modes in case of disable */
8960 	if (arg->enable)
8961 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
8962 	else
8963 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
8964 
8965 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8966 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
8967 		   arg->enable, arg->hw_filter_bitmap);
8968 
8969 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
8970 }
8971 
ath12k_wmi_wow_host_wakeup_ind(struct ath12k * ar)8972 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
8973 {
8974 	struct wmi_wow_host_wakeup_cmd *cmd;
8975 	struct sk_buff *skb;
8976 	size_t len;
8977 
8978 	len = sizeof(*cmd);
8979 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8980 	if (!skb)
8981 		return -ENOMEM;
8982 
8983 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
8984 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
8985 						 sizeof(*cmd));
8986 
8987 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
8988 
8989 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
8990 }
8991 
ath12k_wmi_wow_enable(struct ath12k * ar)8992 int ath12k_wmi_wow_enable(struct ath12k *ar)
8993 {
8994 	struct wmi_wow_enable_cmd *cmd;
8995 	struct sk_buff *skb;
8996 	int len;
8997 
8998 	len = sizeof(*cmd);
8999 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9000 	if (!skb)
9001 		return -ENOMEM;
9002 
9003 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
9004 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
9005 						 sizeof(*cmd));
9006 
9007 	cmd->enable = cpu_to_le32(1);
9008 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
9009 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
9010 
9011 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
9012 }
9013 
ath12k_wmi_wow_add_wakeup_event(struct ath12k * ar,u32 vdev_id,enum wmi_wow_wakeup_event event,u32 enable)9014 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
9015 				    enum wmi_wow_wakeup_event event,
9016 				    u32 enable)
9017 {
9018 	struct wmi_wow_add_del_event_cmd *cmd;
9019 	struct sk_buff *skb;
9020 	size_t len;
9021 
9022 	len = sizeof(*cmd);
9023 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9024 	if (!skb)
9025 		return -ENOMEM;
9026 
9027 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
9028 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
9029 						 sizeof(*cmd));
9030 	cmd->vdev_id = cpu_to_le32(vdev_id);
9031 	cmd->is_add = cpu_to_le32(enable);
9032 	cmd->event_bitmap = cpu_to_le32((1 << event));
9033 
9034 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
9035 		   wow_wakeup_event(event), enable, vdev_id);
9036 
9037 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
9038 }
9039 
ath12k_wmi_wow_add_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id,const u8 * pattern,const u8 * mask,int pattern_len,int pattern_offset)9040 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
9041 			       const u8 *pattern, const u8 *mask,
9042 			       int pattern_len, int pattern_offset)
9043 {
9044 	struct wmi_wow_add_pattern_cmd *cmd;
9045 	struct wmi_wow_bitmap_pattern_params *bitmap;
9046 	struct wmi_tlv *tlv;
9047 	struct sk_buff *skb;
9048 	void *ptr;
9049 	size_t len;
9050 
9051 	len = sizeof(*cmd) +
9052 	      sizeof(*tlv) +			/* array struct */
9053 	      sizeof(*bitmap) +			/* bitmap */
9054 	      sizeof(*tlv) +			/* empty ipv4 sync */
9055 	      sizeof(*tlv) +			/* empty ipv6 sync */
9056 	      sizeof(*tlv) +			/* empty magic */
9057 	      sizeof(*tlv) +			/* empty info timeout */
9058 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
9059 
9060 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9061 	if (!skb)
9062 		return -ENOMEM;
9063 
9064 	/* cmd */
9065 	ptr = skb->data;
9066 	cmd = ptr;
9067 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
9068 						 sizeof(*cmd));
9069 	cmd->vdev_id = cpu_to_le32(vdev_id);
9070 	cmd->pattern_id = cpu_to_le32(pattern_id);
9071 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
9072 
9073 	ptr += sizeof(*cmd);
9074 
9075 	/* bitmap */
9076 	tlv = ptr;
9077 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
9078 
9079 	ptr += sizeof(*tlv);
9080 
9081 	bitmap = ptr;
9082 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
9083 						    sizeof(*bitmap));
9084 	memcpy(bitmap->patternbuf, pattern, pattern_len);
9085 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
9086 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
9087 	bitmap->pattern_len = cpu_to_le32(pattern_len);
9088 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
9089 	bitmap->pattern_id = cpu_to_le32(pattern_id);
9090 
9091 	ptr += sizeof(*bitmap);
9092 
9093 	/* ipv4 sync */
9094 	tlv = ptr;
9095 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9096 
9097 	ptr += sizeof(*tlv);
9098 
9099 	/* ipv6 sync */
9100 	tlv = ptr;
9101 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9102 
9103 	ptr += sizeof(*tlv);
9104 
9105 	/* magic */
9106 	tlv = ptr;
9107 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
9108 
9109 	ptr += sizeof(*tlv);
9110 
9111 	/* pattern info timeout */
9112 	tlv = ptr;
9113 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
9114 
9115 	ptr += sizeof(*tlv);
9116 
9117 	/* ratelimit interval */
9118 	tlv = ptr;
9119 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
9120 
9121 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
9122 		   vdev_id, pattern_id, pattern_offset, pattern_len);
9123 
9124 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
9125 			bitmap->patternbuf, pattern_len);
9126 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
9127 			bitmap->bitmaskbuf, pattern_len);
9128 
9129 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
9130 }
9131 
ath12k_wmi_wow_del_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id)9132 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
9133 {
9134 	struct wmi_wow_del_pattern_cmd *cmd;
9135 	struct sk_buff *skb;
9136 	size_t len;
9137 
9138 	len = sizeof(*cmd);
9139 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9140 	if (!skb)
9141 		return -ENOMEM;
9142 
9143 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
9144 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
9145 						 sizeof(*cmd));
9146 	cmd->vdev_id = cpu_to_le32(vdev_id);
9147 	cmd->pattern_id = cpu_to_le32(pattern_id);
9148 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
9149 
9150 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
9151 		   vdev_id, pattern_id);
9152 
9153 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
9154 }
9155 
9156 static struct sk_buff *
ath12k_wmi_op_gen_config_pno_start(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno)9157 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
9158 				   struct wmi_pno_scan_req_arg *pno)
9159 {
9160 	struct nlo_configured_params *nlo_list;
9161 	size_t len, nlo_list_len, channel_list_len;
9162 	struct wmi_wow_nlo_config_cmd *cmd;
9163 	__le32 *channel_list;
9164 	struct wmi_tlv *tlv;
9165 	struct sk_buff *skb;
9166 	void *ptr;
9167 	u32 i;
9168 
9169 	len = sizeof(*cmd) +
9170 	      sizeof(*tlv) +
9171 	      /* TLV place holder for array of structures
9172 	       * nlo_configured_params(nlo_list)
9173 	       */
9174 	      sizeof(*tlv);
9175 	      /* TLV place holder for array of uint32 channel_list */
9176 
9177 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
9178 	len += channel_list_len;
9179 
9180 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
9181 	len += nlo_list_len;
9182 
9183 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9184 	if (!skb)
9185 		return ERR_PTR(-ENOMEM);
9186 
9187 	ptr = skb->data;
9188 	cmd = ptr;
9189 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
9190 
9191 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
9192 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
9193 
9194 	/* current FW does not support min-max range for dwell time */
9195 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
9196 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
9197 
9198 	if (pno->do_passive_scan)
9199 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
9200 
9201 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
9202 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
9203 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
9204 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
9205 
9206 	if (pno->enable_pno_scan_randomization) {
9207 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
9208 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
9209 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
9210 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
9211 	}
9212 
9213 	ptr += sizeof(*cmd);
9214 
9215 	/* nlo_configured_params(nlo_list) */
9216 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
9217 	tlv = ptr;
9218 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
9219 
9220 	ptr += sizeof(*tlv);
9221 	nlo_list = ptr;
9222 	for (i = 0; i < pno->uc_networks_count; i++) {
9223 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
9224 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
9225 						     sizeof(*nlo_list));
9226 
9227 		nlo_list[i].ssid.valid = cpu_to_le32(1);
9228 		nlo_list[i].ssid.ssid.ssid_len =
9229 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
9230 		memcpy(nlo_list[i].ssid.ssid.ssid,
9231 		       pno->a_networks[i].ssid.ssid,
9232 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
9233 
9234 		if (pno->a_networks[i].rssi_threshold &&
9235 		    pno->a_networks[i].rssi_threshold > -300) {
9236 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
9237 			nlo_list[i].rssi_cond.rssi =
9238 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
9239 		}
9240 
9241 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
9242 		nlo_list[i].bcast_nw_type.bcast_nw_type =
9243 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
9244 	}
9245 
9246 	ptr += nlo_list_len;
9247 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
9248 	tlv = ptr;
9249 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
9250 	ptr += sizeof(*tlv);
9251 	channel_list = ptr;
9252 
9253 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
9254 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
9255 
9256 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
9257 		   vdev_id);
9258 
9259 	return skb;
9260 }
9261 
ath12k_wmi_op_gen_config_pno_stop(struct ath12k * ar,u32 vdev_id)9262 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
9263 							 u32 vdev_id)
9264 {
9265 	struct wmi_wow_nlo_config_cmd *cmd;
9266 	struct sk_buff *skb;
9267 	size_t len;
9268 
9269 	len = sizeof(*cmd);
9270 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9271 	if (!skb)
9272 		return ERR_PTR(-ENOMEM);
9273 
9274 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
9275 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
9276 
9277 	cmd->vdev_id = cpu_to_le32(vdev_id);
9278 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
9279 
9280 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9281 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
9282 	return skb;
9283 }
9284 
ath12k_wmi_wow_config_pno(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno_scan)9285 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
9286 			      struct wmi_pno_scan_req_arg  *pno_scan)
9287 {
9288 	struct sk_buff *skb;
9289 
9290 	if (pno_scan->enable)
9291 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
9292 	else
9293 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
9294 
9295 	if (IS_ERR_OR_NULL(skb))
9296 		return -ENOMEM;
9297 
9298 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
9299 }
9300 
ath12k_wmi_fill_ns_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable,bool ext)9301 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
9302 				       struct wmi_arp_ns_offload_arg *offload,
9303 				       void **ptr,
9304 				       bool enable,
9305 				       bool ext)
9306 {
9307 	struct wmi_ns_offload_params *ns;
9308 	struct wmi_tlv *tlv;
9309 	void *buf_ptr = *ptr;
9310 	u32 ns_cnt, ns_ext_tuples;
9311 	int i, max_offloads;
9312 
9313 	ns_cnt = offload->ipv6_count;
9314 
9315 	tlv  = buf_ptr;
9316 
9317 	if (ext) {
9318 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
9319 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9320 						 ns_ext_tuples * sizeof(*ns));
9321 		i = WMI_MAX_NS_OFFLOADS;
9322 		max_offloads = offload->ipv6_count;
9323 	} else {
9324 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9325 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
9326 		i = 0;
9327 		max_offloads = WMI_MAX_NS_OFFLOADS;
9328 	}
9329 
9330 	buf_ptr += sizeof(*tlv);
9331 
9332 	for (; i < max_offloads; i++) {
9333 		ns = buf_ptr;
9334 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
9335 							sizeof(*ns));
9336 
9337 		if (enable) {
9338 			if (i < ns_cnt)
9339 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
9340 
9341 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
9342 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
9343 
9344 			if (offload->ipv6_type[i])
9345 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
9346 
9347 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
9348 
9349 			if (!is_zero_ether_addr(ns->target_mac.addr))
9350 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
9351 
9352 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9353 				   "wmi index %d ns_solicited %pI6 target %pI6",
9354 				   i, ns->solicitation_ipaddr,
9355 				   ns->target_ipaddr[0]);
9356 		}
9357 
9358 		buf_ptr += sizeof(*ns);
9359 	}
9360 
9361 	*ptr = buf_ptr;
9362 }
9363 
ath12k_wmi_fill_arp_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable)9364 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
9365 					struct wmi_arp_ns_offload_arg *offload,
9366 					void **ptr,
9367 					bool enable)
9368 {
9369 	struct wmi_arp_offload_params *arp;
9370 	struct wmi_tlv *tlv;
9371 	void *buf_ptr = *ptr;
9372 	int i;
9373 
9374 	/* fill arp tuple */
9375 	tlv = buf_ptr;
9376 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
9377 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
9378 	buf_ptr += sizeof(*tlv);
9379 
9380 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
9381 		arp = buf_ptr;
9382 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
9383 							 sizeof(*arp));
9384 
9385 		if (enable && i < offload->ipv4_count) {
9386 			/* Copy the target ip addr and flags */
9387 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
9388 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
9389 
9390 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
9391 				   arp->target_ipaddr);
9392 		}
9393 
9394 		buf_ptr += sizeof(*arp);
9395 	}
9396 
9397 	*ptr = buf_ptr;
9398 }
9399 
ath12k_wmi_arp_ns_offload(struct ath12k * ar,struct ath12k_link_vif * arvif,struct wmi_arp_ns_offload_arg * offload,bool enable)9400 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
9401 			      struct ath12k_link_vif *arvif,
9402 			      struct wmi_arp_ns_offload_arg *offload,
9403 			      bool enable)
9404 {
9405 	struct wmi_set_arp_ns_offload_cmd *cmd;
9406 	struct wmi_tlv *tlv;
9407 	struct sk_buff *skb;
9408 	void *buf_ptr;
9409 	size_t len;
9410 	u8 ns_cnt, ns_ext_tuples = 0;
9411 
9412 	ns_cnt = offload->ipv6_count;
9413 
9414 	len = sizeof(*cmd) +
9415 	      sizeof(*tlv) +
9416 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
9417 	      sizeof(*tlv) +
9418 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
9419 
9420 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
9421 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
9422 		len += sizeof(*tlv) +
9423 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
9424 	}
9425 
9426 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9427 	if (!skb)
9428 		return -ENOMEM;
9429 
9430 	buf_ptr = skb->data;
9431 	cmd = buf_ptr;
9432 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
9433 						 sizeof(*cmd));
9434 	cmd->flags = cpu_to_le32(0);
9435 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9436 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
9437 
9438 	buf_ptr += sizeof(*cmd);
9439 
9440 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
9441 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
9442 
9443 	if (ns_ext_tuples)
9444 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
9445 
9446 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
9447 }
9448 
ath12k_wmi_gtk_rekey_offload(struct ath12k * ar,struct ath12k_link_vif * arvif,bool enable)9449 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
9450 				 struct ath12k_link_vif *arvif, bool enable)
9451 {
9452 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
9453 	struct wmi_gtk_rekey_offload_cmd *cmd;
9454 	struct sk_buff *skb;
9455 	__le64 replay_ctr;
9456 	int len;
9457 
9458 	len = sizeof(*cmd);
9459 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9460 	if (!skb)
9461 		return -ENOMEM;
9462 
9463 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
9464 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
9465 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9466 
9467 	if (enable) {
9468 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
9469 
9470 		/* the length in rekey_data and cmd is equal */
9471 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
9472 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
9473 
9474 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
9475 		memcpy(cmd->replay_ctr, &replay_ctr,
9476 		       sizeof(replay_ctr));
9477 	} else {
9478 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
9479 	}
9480 
9481 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
9482 		   arvif->vdev_id, enable);
9483 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
9484 }
9485 
ath12k_wmi_gtk_rekey_getinfo(struct ath12k * ar,struct ath12k_link_vif * arvif)9486 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
9487 				 struct ath12k_link_vif *arvif)
9488 {
9489 	struct wmi_gtk_rekey_offload_cmd *cmd;
9490 	struct sk_buff *skb;
9491 	int len;
9492 
9493 	len = sizeof(*cmd);
9494 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
9495 	if (!skb)
9496 		return -ENOMEM;
9497 
9498 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
9499 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
9500 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
9501 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
9502 
9503 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
9504 		   arvif->vdev_id);
9505 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
9506 }
9507 
ath12k_wmi_sta_keepalive(struct ath12k * ar,const struct wmi_sta_keepalive_arg * arg)9508 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
9509 			     const struct wmi_sta_keepalive_arg *arg)
9510 {
9511 	struct wmi_sta_keepalive_arp_resp_params *arp;
9512 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9513 	struct wmi_sta_keepalive_cmd *cmd;
9514 	struct sk_buff *skb;
9515 	size_t len;
9516 
9517 	len = sizeof(*cmd) + sizeof(*arp);
9518 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9519 	if (!skb)
9520 		return -ENOMEM;
9521 
9522 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
9523 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
9524 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9525 	cmd->enabled = cpu_to_le32(arg->enabled);
9526 	cmd->interval = cpu_to_le32(arg->interval);
9527 	cmd->method = cpu_to_le32(arg->method);
9528 
9529 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
9530 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
9531 						 sizeof(*arp));
9532 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
9533 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
9534 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
9535 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
9536 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
9537 	}
9538 
9539 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
9540 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
9541 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
9542 
9543 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
9544 }
9545 
ath12k_wmi_mlo_setup(struct ath12k * ar,struct wmi_mlo_setup_arg * mlo_params)9546 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
9547 {
9548 	struct wmi_mlo_setup_cmd *cmd;
9549 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9550 	u32 *partner_links, num_links;
9551 	int i, ret, buf_len, arg_len;
9552 	struct sk_buff *skb;
9553 	struct wmi_tlv *tlv;
9554 	void *ptr;
9555 
9556 	num_links = mlo_params->num_partner_links;
9557 	arg_len = num_links * sizeof(u32);
9558 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
9559 
9560 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
9561 	if (!skb)
9562 		return -ENOMEM;
9563 
9564 	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
9565 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
9566 						 sizeof(*cmd));
9567 	cmd->mld_group_id = mlo_params->group_id;
9568 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9569 	ptr = skb->data + sizeof(*cmd);
9570 
9571 	tlv = ptr;
9572 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
9573 	ptr += TLV_HDR_SIZE;
9574 
9575 	partner_links = ptr;
9576 	for (i = 0; i < num_links; i++)
9577 		partner_links[i] = mlo_params->partner_link_id[i];
9578 
9579 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
9580 	if (ret) {
9581 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
9582 			    ret);
9583 		dev_kfree_skb(skb);
9584 		return ret;
9585 	}
9586 
9587 	return 0;
9588 }
9589 
ath12k_wmi_mlo_ready(struct ath12k * ar)9590 int ath12k_wmi_mlo_ready(struct ath12k *ar)
9591 {
9592 	struct wmi_mlo_ready_cmd *cmd;
9593 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9594 	struct sk_buff *skb;
9595 	int ret, len;
9596 
9597 	len = sizeof(*cmd);
9598 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9599 	if (!skb)
9600 		return -ENOMEM;
9601 
9602 	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
9603 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
9604 						 sizeof(*cmd));
9605 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9606 
9607 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
9608 	if (ret) {
9609 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
9610 			    ret);
9611 		dev_kfree_skb(skb);
9612 		return ret;
9613 	}
9614 
9615 	return 0;
9616 }
9617 
ath12k_wmi_mlo_teardown(struct ath12k * ar)9618 int ath12k_wmi_mlo_teardown(struct ath12k *ar)
9619 {
9620 	struct wmi_mlo_teardown_cmd *cmd;
9621 	struct ath12k_wmi_pdev *wmi = ar->wmi;
9622 	struct sk_buff *skb;
9623 	int ret, len;
9624 
9625 	len = sizeof(*cmd);
9626 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
9627 	if (!skb)
9628 		return -ENOMEM;
9629 
9630 	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
9631 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
9632 						 sizeof(*cmd));
9633 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
9634 	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
9635 
9636 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
9637 	if (ret) {
9638 		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
9639 			    ret);
9640 		dev_kfree_skb(skb);
9641 		return ret;
9642 	}
9643 
9644 	return 0;
9645 }
9646