xref: /linux/drivers/net/wireless/ath/ath10k/wmi.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7  */
8 
9 #include <linux/skbuff.h>
10 #include <linux/ctype.h>
11 
12 #include "core.h"
13 #include "htc.h"
14 #include "debug.h"
15 #include "wmi.h"
16 #include "wmi-tlv.h"
17 #include "mac.h"
18 #include "testmode.h"
19 #include "wmi-ops.h"
20 #include "p2p.h"
21 #include "hw.h"
22 #include "hif.h"
23 #include "txrx.h"
24 
25 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
26 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
27 #define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
28 
29 /* MAIN WMI cmd track */
30 static struct wmi_cmd_map wmi_cmd_map = {
31 	.init_cmdid = WMI_INIT_CMDID,
32 	.start_scan_cmdid = WMI_START_SCAN_CMDID,
33 	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
34 	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
35 	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
36 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
37 	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
38 	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
39 	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
40 	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
41 	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
42 	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
43 	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
44 	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
45 	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
46 	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
47 	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
48 	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
49 	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
50 	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
51 	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
52 	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
53 	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
54 	.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
55 	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
56 	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
57 	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
58 	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
59 	.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
60 	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
61 	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
62 	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
63 	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
64 	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
65 	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
66 	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
67 	.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
68 	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
69 	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
70 	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
71 	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
72 	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
73 	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
74 	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
75 	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
76 	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
77 	.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
78 	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
79 	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
80 	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
81 	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
82 	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
83 	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
84 	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
85 	.roam_scan_mode = WMI_ROAM_SCAN_MODE,
86 	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
87 	.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
88 	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
89 	.roam_ap_profile = WMI_ROAM_AP_PROFILE,
90 	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
91 	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
92 	.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
93 	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
94 	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
95 	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
96 	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
97 	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
98 	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
99 	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
100 	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
101 	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
102 	.wlan_profile_set_hist_intvl_cmdid =
103 				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
104 	.wlan_profile_get_profile_data_cmdid =
105 				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
106 	.wlan_profile_enable_profile_id_cmdid =
107 				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
108 	.wlan_profile_list_profile_id_cmdid =
109 				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
110 	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
111 	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
112 	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
113 	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
114 	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
115 	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
116 	.wow_enable_disable_wake_event_cmdid =
117 				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
118 	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
119 	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
120 	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
121 	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
122 	.vdev_spectral_scan_configure_cmdid =
123 				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
124 	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
125 	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
126 	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
127 	.network_list_offload_config_cmdid =
128 				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
129 	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
130 	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
131 	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
132 	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
133 	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
134 	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
135 	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
136 	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
137 	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
138 	.echo_cmdid = WMI_ECHO_CMDID,
139 	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
140 	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
141 	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
142 	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
143 	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
144 	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
145 	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
146 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
147 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
148 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
149 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
150 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
151 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
152 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
153 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
154 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
155 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
156 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
157 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
158 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
159 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
160 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
161 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
162 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
163 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
164 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
165 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
166 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
167 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
168 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
169 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
170 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
171 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
172 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
173 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
174 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
175 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
176 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
177 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
178 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
179 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
180 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
181 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
182 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
183 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
184 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
185 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
186 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
187 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
188 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
189 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
190 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
191 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
192 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
193 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
194 };
195 
196 /* 10.X WMI cmd track */
197 static struct wmi_cmd_map wmi_10x_cmd_map = {
198 	.init_cmdid = WMI_10X_INIT_CMDID,
199 	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
200 	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
201 	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
202 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
203 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
204 	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
205 	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
206 	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
207 	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
208 	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
209 	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
210 	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
211 	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
212 	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
213 	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
214 	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
215 	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
216 	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
217 	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
218 	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
219 	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
220 	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
221 	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
222 	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
223 	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
224 	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
225 	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
226 	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
227 	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
228 	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
229 	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
230 	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
231 	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
232 	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
233 	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
234 	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
235 	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
236 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
237 	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
238 	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
239 	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
240 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
241 	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
242 	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
243 	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
244 	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
245 	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
246 	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
247 	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
248 	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
249 	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
250 	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
251 	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
252 	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
253 	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
254 	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
255 	.roam_scan_rssi_change_threshold =
256 				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
257 	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
258 	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
259 	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
260 	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
261 	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
262 	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
263 	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
264 	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
265 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
266 	.ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
267 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
268 	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
269 	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
270 	.wlan_profile_set_hist_intvl_cmdid =
271 				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
272 	.wlan_profile_get_profile_data_cmdid =
273 				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
274 	.wlan_profile_enable_profile_id_cmdid =
275 				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
276 	.wlan_profile_list_profile_id_cmdid =
277 				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
278 	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
279 	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
280 	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
281 	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
282 	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
283 	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
284 	.wow_enable_disable_wake_event_cmdid =
285 				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
286 	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
287 	.wow_hostwakeup_from_sleep_cmdid =
288 				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
289 	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
290 	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
291 	.vdev_spectral_scan_configure_cmdid =
292 				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
293 	.vdev_spectral_scan_enable_cmdid =
294 				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
295 	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
296 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
297 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
298 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
299 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
300 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
301 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
302 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
303 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
304 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
305 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
306 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
307 	.echo_cmdid = WMI_10X_ECHO_CMDID,
308 	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
309 	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
310 	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
311 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
312 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
313 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
314 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
315 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
316 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
317 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
318 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
319 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
320 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
321 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
322 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
323 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
324 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
325 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
326 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
327 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
328 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
329 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
330 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
331 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
332 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
333 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
334 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
335 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
336 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
337 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
338 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
339 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
340 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
341 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
342 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
343 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
344 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
345 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
346 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
347 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
348 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
349 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
350 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
351 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
352 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
353 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
354 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
355 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
356 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
357 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
358 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
359 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
360 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
361 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
362 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
363 };
364 
365 /* 10.2.4 WMI cmd track */
366 static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
367 	.init_cmdid = WMI_10_2_INIT_CMDID,
368 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
369 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
370 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
371 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
372 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
373 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
374 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
375 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
376 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
377 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
378 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
379 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
380 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
381 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
382 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
383 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
384 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
385 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
386 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
387 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
388 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
389 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
390 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
391 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
392 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
393 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
394 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
395 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
396 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
397 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
398 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
399 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
400 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
401 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
402 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
403 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
404 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
405 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
406 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
407 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
408 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
409 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
410 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
411 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
412 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
413 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
414 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
415 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
416 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
417 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
418 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
419 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
420 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
421 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
422 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
423 	.roam_scan_rssi_change_threshold =
424 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
425 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
426 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
427 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
428 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
429 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
430 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
431 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
432 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
433 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
434 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
435 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
436 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
437 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
438 	.wlan_profile_set_hist_intvl_cmdid =
439 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
440 	.wlan_profile_get_profile_data_cmdid =
441 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
442 	.wlan_profile_enable_profile_id_cmdid =
443 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
444 	.wlan_profile_list_profile_id_cmdid =
445 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
446 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
447 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
448 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
449 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
450 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
451 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
452 	.wow_enable_disable_wake_event_cmdid =
453 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
454 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
455 	.wow_hostwakeup_from_sleep_cmdid =
456 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
457 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
458 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
459 	.vdev_spectral_scan_configure_cmdid =
460 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
461 	.vdev_spectral_scan_enable_cmdid =
462 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
463 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
464 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
465 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
466 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
467 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
468 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
469 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
470 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
471 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
472 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
473 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
474 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
475 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
476 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
477 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
478 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
479 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
480 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
481 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
482 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
483 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
484 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
485 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
486 	.pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
487 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
488 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
489 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
490 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
491 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
492 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
493 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
494 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
495 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
496 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
497 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
498 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
499 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
500 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
501 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
502 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
503 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
504 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
505 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
506 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
507 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
508 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
509 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
510 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
511 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
512 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
513 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
514 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
515 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
516 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
517 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
518 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
519 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
520 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
521 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
522 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
523 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
524 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
525 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
526 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
527 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
528 	.pdev_bss_chan_info_request_cmdid =
529 		WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
530 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
531 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
532 	.set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
533 };
534 
535 /* 10.4 WMI cmd track */
536 static struct wmi_cmd_map wmi_10_4_cmd_map = {
537 	.init_cmdid = WMI_10_4_INIT_CMDID,
538 	.start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
539 	.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
540 	.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
541 	.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
542 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
543 	.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
544 	.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
545 	.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
546 	.pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
547 	.pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
548 	.pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
549 	.pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
550 	.pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
551 	.pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
552 	.pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
553 	.pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
554 	.pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
555 	.pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
556 	.vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
557 	.vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
558 	.vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
559 	.vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
560 	.vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
561 	.vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
562 	.vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
563 	.vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
564 	.vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
565 	.peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
566 	.peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
567 	.peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
568 	.peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
569 	.peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
570 	.peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
571 	.peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
572 	.peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
573 	.bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
574 	.pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
575 	.bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
576 	.bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
577 	.prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
578 	.mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
579 	.prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
580 	.addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
581 	.addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
582 	.addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
583 	.delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
584 	.addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
585 	.send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
586 	.sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
587 	.sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
588 	.sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
589 	.pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
590 	.pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
591 	.roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
592 	.roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
593 	.roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
594 	.roam_scan_rssi_change_threshold =
595 				WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
596 	.roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
597 	.ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
598 	.ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
599 	.ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
600 	.p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
601 	.p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
602 	.p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
603 	.p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
604 	.p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
605 	.ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
606 	.ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
607 	.peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
608 	.wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
609 	.wlan_profile_set_hist_intvl_cmdid =
610 				WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
611 	.wlan_profile_get_profile_data_cmdid =
612 				WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
613 	.wlan_profile_enable_profile_id_cmdid =
614 				WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
615 	.wlan_profile_list_profile_id_cmdid =
616 				WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
617 	.pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
618 	.pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
619 	.add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
620 	.rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
621 	.wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
622 	.wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
623 	.wow_enable_disable_wake_event_cmdid =
624 				WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
625 	.wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
626 	.wow_hostwakeup_from_sleep_cmdid =
627 				WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
628 	.rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
629 	.rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
630 	.vdev_spectral_scan_configure_cmdid =
631 				WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
632 	.vdev_spectral_scan_enable_cmdid =
633 				WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
634 	.request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
635 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
636 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
637 	.gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
638 	.csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
639 	.csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
640 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
641 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
642 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
643 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
644 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
645 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
646 	.echo_cmdid = WMI_10_4_ECHO_CMDID,
647 	.pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
648 	.dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
649 	.pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
650 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
651 	.vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
652 	.vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
653 	.force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
654 	.gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
655 	.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
656 	.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
657 	.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
658 	.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
659 	.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
660 	.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
661 	.vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
662 	.wlan_peer_caching_add_peer_cmdid =
663 			WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
664 	.wlan_peer_caching_evict_peer_cmdid =
665 			WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
666 	.wlan_peer_caching_restore_peer_cmdid =
667 			WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
668 	.wlan_peer_caching_print_all_peers_info_cmdid =
669 			WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
670 	.peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
671 	.peer_add_proxy_sta_entry_cmdid =
672 			WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
673 	.rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
674 	.oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
675 	.nan_cmdid = WMI_10_4_NAN_CMDID,
676 	.vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
677 	.qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
678 	.pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
679 	.pdev_smart_ant_set_rx_antenna_cmdid =
680 			WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
681 	.peer_smart_ant_set_tx_antenna_cmdid =
682 			WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
683 	.peer_smart_ant_set_train_info_cmdid =
684 			WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
685 	.peer_smart_ant_set_node_config_ops_cmdid =
686 			WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
687 	.pdev_set_antenna_switch_table_cmdid =
688 			WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
689 	.pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
690 	.pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
691 	.pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
692 	.pdev_ratepwr_chainmsk_table_cmdid =
693 			WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
694 	.pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
695 	.tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
696 	.fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
697 	.vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
698 	.peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
699 	.pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
700 	.pdev_get_ani_ofdm_config_cmdid =
701 			WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
702 	.pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
703 	.pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
704 	.pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
705 	.pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
706 	.vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
707 	.pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
708 	.vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
709 	.vdev_filter_neighbor_rx_packets_cmdid =
710 			WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
711 	.mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
712 	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
713 	.pdev_bss_chan_info_request_cmdid =
714 			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
715 	.ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
716 	.vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
717 	.set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
718 	.atf_ssid_grouping_request_cmdid =
719 			WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
720 	.peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
721 	.set_periodic_channel_stats_cfg_cmdid =
722 			WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
723 	.peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
724 	.btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
725 	.peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
726 	.peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
727 	.peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
728 	.pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
729 	.coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
730 	.pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
731 	.pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
732 	.vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
733 	.prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
734 	.config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
735 	.debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
736 	.get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
737 	.pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
738 	.vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
739 	.pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
740 	.tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
741 	.tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
742 	.tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
743 	.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
744 	.per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
745 };
746 
747 static struct wmi_peer_param_map wmi_peer_param_map = {
748 	.smps_state = WMI_PEER_SMPS_STATE,
749 	.ampdu = WMI_PEER_AMPDU,
750 	.authorize = WMI_PEER_AUTHORIZE,
751 	.chan_width = WMI_PEER_CHAN_WIDTH,
752 	.nss = WMI_PEER_NSS,
753 	.use_4addr = WMI_PEER_USE_4ADDR,
754 	.use_fixed_power = WMI_PEER_USE_FIXED_PWR,
755 	.debug = WMI_PEER_DEBUG,
756 	.phymode = WMI_PEER_PHYMODE,
757 	.dummy_var = WMI_PEER_DUMMY_VAR,
758 };
759 
760 /* MAIN WMI VDEV param map */
761 static struct wmi_vdev_param_map wmi_vdev_param_map = {
762 	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
763 	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
764 	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
765 	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
766 	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
767 	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
768 	.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
769 	.preamble = WMI_VDEV_PARAM_PREAMBLE,
770 	.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
771 	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
772 	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
773 	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
774 	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
775 	.wmi_vdev_oc_scheduler_air_time_limit =
776 					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
777 	.wds = WMI_VDEV_PARAM_WDS,
778 	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
779 	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
780 	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
781 	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
782 	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
783 	.chwidth = WMI_VDEV_PARAM_CHWIDTH,
784 	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
785 	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
786 	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
787 	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
788 	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
789 	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
790 	.sgi = WMI_VDEV_PARAM_SGI,
791 	.ldpc = WMI_VDEV_PARAM_LDPC,
792 	.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
793 	.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
794 	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
795 	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
796 	.nss = WMI_VDEV_PARAM_NSS,
797 	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
798 	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
799 	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
800 	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
801 	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
802 	.ap_keepalive_min_idle_inactive_time_secs =
803 			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
804 	.ap_keepalive_max_idle_inactive_time_secs =
805 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
806 	.ap_keepalive_max_unresponsive_time_secs =
807 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
808 	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
809 	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
810 	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
811 	.txbf = WMI_VDEV_PARAM_TXBF,
812 	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
813 	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
814 	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
815 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
816 					WMI_VDEV_PARAM_UNSUPPORTED,
817 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
818 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
819 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
820 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
821 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
822 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
823 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
824 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
825 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
826 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
827 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
828 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
829 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
830 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
831 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
832 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
833 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
834 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
835 };
836 
837 /* 10.X WMI VDEV param map */
838 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
839 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
840 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
841 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
842 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
843 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
844 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
845 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
846 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
847 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
848 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
849 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
850 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
851 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
852 	.wmi_vdev_oc_scheduler_air_time_limit =
853 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
854 	.wds = WMI_10X_VDEV_PARAM_WDS,
855 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
856 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
857 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
858 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
859 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
860 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
861 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
862 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
863 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
864 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
865 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
866 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
867 	.sgi = WMI_10X_VDEV_PARAM_SGI,
868 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
869 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
870 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
871 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
872 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
873 	.nss = WMI_10X_VDEV_PARAM_NSS,
874 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
875 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
876 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
877 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
878 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
879 	.ap_keepalive_min_idle_inactive_time_secs =
880 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
881 	.ap_keepalive_max_idle_inactive_time_secs =
882 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
883 	.ap_keepalive_max_unresponsive_time_secs =
884 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
885 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
886 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
887 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
888 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
889 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
890 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
891 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
892 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
893 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
894 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
895 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
896 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
897 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
898 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
899 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
900 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
901 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
902 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
903 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
904 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
905 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
906 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
907 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
908 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
909 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
910 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
911 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
912 };
913 
914 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
915 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
916 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
917 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
918 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
919 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
920 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
921 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
922 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
923 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
924 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
925 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
926 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
927 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
928 	.wmi_vdev_oc_scheduler_air_time_limit =
929 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
930 	.wds = WMI_10X_VDEV_PARAM_WDS,
931 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
932 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
933 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
934 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
935 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
936 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
937 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
938 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
939 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
940 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
941 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
942 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
943 	.sgi = WMI_10X_VDEV_PARAM_SGI,
944 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
945 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
946 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
947 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
948 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
949 	.nss = WMI_10X_VDEV_PARAM_NSS,
950 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
951 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
952 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
953 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
954 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
955 	.ap_keepalive_min_idle_inactive_time_secs =
956 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
957 	.ap_keepalive_max_idle_inactive_time_secs =
958 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
959 	.ap_keepalive_max_unresponsive_time_secs =
960 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
961 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
962 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
963 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
964 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
965 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
966 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
967 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
968 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
969 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
970 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
971 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
972 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
973 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
974 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
975 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
976 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
977 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
978 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
979 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
980 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
981 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
982 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
983 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
984 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
985 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
986 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
987 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
988 };
989 
990 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
991 	.rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
992 	.fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
993 	.beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
994 	.listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
995 	.multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
996 	.mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
997 	.slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
998 	.preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
999 	.swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
1000 	.wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
1001 	.wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
1002 	.wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
1003 	.dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
1004 	.wmi_vdev_oc_scheduler_air_time_limit =
1005 	       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
1006 	.wds = WMI_10_4_VDEV_PARAM_WDS,
1007 	.atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
1008 	.bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
1009 	.bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
1010 	.bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
1011 	.feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
1012 	.chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
1013 	.chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
1014 	.disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
1015 	.sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
1016 	.mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
1017 	.protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
1018 	.fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
1019 	.sgi = WMI_10_4_VDEV_PARAM_SGI,
1020 	.ldpc = WMI_10_4_VDEV_PARAM_LDPC,
1021 	.tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
1022 	.rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
1023 	.intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
1024 	.def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
1025 	.nss = WMI_10_4_VDEV_PARAM_NSS,
1026 	.bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
1027 	.mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
1028 	.mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
1029 	.dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
1030 	.unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
1031 	.ap_keepalive_min_idle_inactive_time_secs =
1032 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
1033 	.ap_keepalive_max_idle_inactive_time_secs =
1034 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
1035 	.ap_keepalive_max_unresponsive_time_secs =
1036 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
1037 	.ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
1038 	.mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
1039 	.enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
1040 	.txbf = WMI_10_4_VDEV_PARAM_TXBF,
1041 	.packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
1042 	.drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
1043 	.tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
1044 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
1045 	       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
1046 	.rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
1047 	.cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
1048 	.mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
1049 	.rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
1050 	.vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
1051 	.vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
1052 	.early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
1053 	.early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
1054 	.early_rx_bmiss_sample_cycle =
1055 	       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
1056 	.early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
1057 	.early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
1058 	.early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
1059 	.proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
1060 	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
1061 	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
1062 	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
1063 	.inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
1064 	.dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
1065 	.disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
1066 	.rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
1067 };
1068 
1069 static struct wmi_pdev_param_map wmi_pdev_param_map = {
1070 	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
1071 	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
1072 	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1073 	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1074 	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
1075 	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
1076 	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
1077 	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1078 	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
1079 	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
1080 	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1081 	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1082 	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
1083 	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1084 	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
1085 	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1086 	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1087 	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1088 	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1089 	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1090 	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1091 	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1092 	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1093 	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
1094 	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
1095 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1096 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1097 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1098 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1099 	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1100 	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1101 	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1102 	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1103 	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
1104 	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1105 	.dcs = WMI_PDEV_PARAM_DCS,
1106 	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
1107 	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1108 	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1109 	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1110 	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1111 	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
1112 	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
1113 	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1114 	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1115 	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1116 	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
1117 	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1118 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
1119 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1120 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1121 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1122 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1123 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1124 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1125 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1126 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1127 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1128 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1129 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1130 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1131 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1132 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1133 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1134 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1135 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1136 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1137 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1138 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1139 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1140 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1141 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1142 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1143 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1144 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1145 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1146 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1147 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1148 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1149 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1150 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1151 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1152 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1153 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1154 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1155 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1156 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1157 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1158 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1159 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1160 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1161 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1162 };
1163 
1164 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1165 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1166 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1167 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1168 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1169 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1170 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1171 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1172 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1173 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1174 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1175 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1176 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1177 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1178 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1179 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1180 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1181 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1182 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1183 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1184 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1185 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1186 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1187 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1188 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1189 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1190 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1191 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1192 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1193 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1194 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1195 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1196 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1197 	.bcnflt_stats_update_period =
1198 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1199 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1200 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1201 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1202 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1203 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1204 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1205 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1206 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1207 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1208 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1209 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1210 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1211 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1212 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1213 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1214 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1215 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1216 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1217 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1218 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1219 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1220 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1221 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1222 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1223 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1224 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1225 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1226 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1227 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1228 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1229 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1230 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1231 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1232 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1233 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1234 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1235 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1236 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1237 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1238 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1239 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1240 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1241 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1242 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1243 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1244 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1245 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1246 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1247 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1248 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1249 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1250 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1251 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1252 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1253 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1254 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1255 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1256 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1257 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1258 };
1259 
1260 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1261 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1262 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1263 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1264 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1265 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1266 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1267 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1268 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1269 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1270 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1271 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1272 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1273 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1274 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1275 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1276 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1277 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1278 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1279 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1280 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1281 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1282 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1283 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1284 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1285 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1286 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1287 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1288 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1289 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1290 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1291 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1292 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1293 	.bcnflt_stats_update_period =
1294 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1295 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1296 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1297 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1298 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1299 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1300 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1301 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1302 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1303 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1304 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1305 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1306 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1307 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1308 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1309 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1310 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1311 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1312 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1313 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1314 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1315 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1316 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1317 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1318 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1319 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1320 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1321 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1322 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1323 	.peer_sta_ps_statechg_enable =
1324 				WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
1325 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1326 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1327 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1328 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1329 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1330 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1331 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1332 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1333 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1334 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1335 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1336 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1337 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1338 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1339 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1340 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1341 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1342 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1343 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1344 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1345 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1346 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1347 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1348 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1349 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1350 	.pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
1351 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1352 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1353 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1354 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1355 };
1356 
1357 /* firmware 10.2 specific mappings */
1358 static struct wmi_cmd_map wmi_10_2_cmd_map = {
1359 	.init_cmdid = WMI_10_2_INIT_CMDID,
1360 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
1361 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
1362 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
1363 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
1364 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
1365 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
1366 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
1367 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
1368 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
1369 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
1370 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
1371 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
1372 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
1373 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
1374 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
1375 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
1376 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
1377 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
1378 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
1379 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
1380 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
1381 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
1382 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
1383 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
1384 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
1385 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
1386 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
1387 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
1388 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
1389 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
1390 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
1391 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
1392 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
1393 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
1394 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
1395 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
1396 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1397 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
1398 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
1399 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
1400 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1401 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
1402 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
1403 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
1404 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
1405 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
1406 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
1407 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
1408 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
1409 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
1410 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
1411 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
1412 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
1413 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
1414 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
1415 	.roam_scan_rssi_change_threshold =
1416 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
1417 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
1418 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
1419 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
1420 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
1421 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
1422 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
1423 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
1424 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
1425 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
1426 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
1427 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
1428 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
1429 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
1430 	.wlan_profile_set_hist_intvl_cmdid =
1431 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1432 	.wlan_profile_get_profile_data_cmdid =
1433 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1434 	.wlan_profile_enable_profile_id_cmdid =
1435 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1436 	.wlan_profile_list_profile_id_cmdid =
1437 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1438 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
1439 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
1440 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
1441 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
1442 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
1443 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
1444 	.wow_enable_disable_wake_event_cmdid =
1445 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
1446 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
1447 	.wow_hostwakeup_from_sleep_cmdid =
1448 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
1449 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
1450 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
1451 	.vdev_spectral_scan_configure_cmdid =
1452 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
1453 	.vdev_spectral_scan_enable_cmdid =
1454 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
1455 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
1456 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
1457 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
1458 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
1459 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
1460 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
1461 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
1462 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
1463 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
1464 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
1465 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
1466 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
1467 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
1468 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
1469 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
1470 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
1471 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
1472 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1473 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1474 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
1475 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
1476 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
1477 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
1478 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
1479 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
1480 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
1481 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
1482 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
1483 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
1484 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
1485 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
1486 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
1487 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
1488 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1489 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
1490 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
1491 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
1492 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
1493 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
1494 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1495 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1496 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
1497 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
1498 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
1499 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
1500 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
1501 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
1502 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
1503 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
1504 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
1505 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
1506 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1507 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1508 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
1509 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
1510 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
1511 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
1512 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
1513 };
1514 
1515 static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1516 	.tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
1517 	.rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
1518 	.txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
1519 	.txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
1520 	.txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
1521 	.beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
1522 	.beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
1523 	.resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1524 	.protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
1525 	.dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
1526 	.non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1527 	.agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
1528 	.sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
1529 	.ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1530 	.ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
1531 	.ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
1532 	.ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
1533 	.ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
1534 	.ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
1535 	.ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1536 	.ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1537 	.ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
1538 	.ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1539 	.l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
1540 	.dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
1541 	.pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1542 	.pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1543 	.pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1544 	.pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1545 	.pdev_stats_update_period =
1546 			WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1547 	.vdev_stats_update_period =
1548 			WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1549 	.peer_stats_update_period =
1550 			WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1551 	.bcnflt_stats_update_period =
1552 			WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1553 	.pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
1554 	.arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
1555 	.dcs = WMI_10_4_PDEV_PARAM_DCS,
1556 	.ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
1557 	.ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
1558 	.ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
1559 	.ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
1560 	.ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
1561 	.dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
1562 	.proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
1563 	.idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
1564 	.power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
1565 	.fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
1566 	.burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
1567 	.burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
1568 	.cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
1569 	.aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
1570 	.rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
1571 	.smart_antenna_default_antenna =
1572 			WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
1573 	.igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
1574 	.igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
1575 	.antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
1576 	.rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
1577 	.set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
1578 	.proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
1579 	.set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
1580 	.set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
1581 	.remove_mcast2ucast_buffer =
1582 			WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
1583 	.peer_sta_ps_statechg_enable =
1584 			WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
1585 	.igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
1586 	.block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
1587 	.set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
1588 	.set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
1589 	.set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
1590 	.txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
1591 	.set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
1592 	.set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
1593 	.en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
1594 	.mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
1595 	.noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
1596 	.noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
1597 	.dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
1598 	.set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
1599 	.atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
1600 	.atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
1601 	.ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
1602 	.mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
1603 	.sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
1604 	.signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
1605 	.signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
1606 	.enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
1607 	.enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
1608 	.cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
1609 	.rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
1610 	.pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
1611 	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1612 	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1613 	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1614 	.enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1615 };
1616 
1617 static const u8 wmi_key_cipher_suites[] = {
1618 	[WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
1619 	[WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
1620 	[WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
1621 	[WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
1622 	[WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
1623 	[WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
1624 	[WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
1625 	[WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
1626 	[WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
1627 };
1628 
1629 static const u8 wmi_tlv_key_cipher_suites[] = {
1630 	[WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
1631 	[WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
1632 	[WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
1633 	[WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
1634 	[WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
1635 	[WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
1636 	[WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
1637 	[WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
1638 	[WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
1639 };
1640 
1641 static const struct wmi_peer_flags_map wmi_peer_flags_map = {
1642 	.auth = WMI_PEER_AUTH,
1643 	.qos = WMI_PEER_QOS,
1644 	.need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
1645 	.need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
1646 	.apsd = WMI_PEER_APSD,
1647 	.ht = WMI_PEER_HT,
1648 	.bw40 = WMI_PEER_40MHZ,
1649 	.stbc = WMI_PEER_STBC,
1650 	.ldbc = WMI_PEER_LDPC,
1651 	.dyn_mimops = WMI_PEER_DYN_MIMOPS,
1652 	.static_mimops = WMI_PEER_STATIC_MIMOPS,
1653 	.spatial_mux = WMI_PEER_SPATIAL_MUX,
1654 	.vht = WMI_PEER_VHT,
1655 	.bw80 = WMI_PEER_80MHZ,
1656 	.vht_2g = WMI_PEER_VHT_2G,
1657 	.pmf = WMI_PEER_PMF,
1658 	.bw160 = WMI_PEER_160MHZ,
1659 };
1660 
1661 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
1662 	.auth = WMI_10X_PEER_AUTH,
1663 	.qos = WMI_10X_PEER_QOS,
1664 	.need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
1665 	.need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
1666 	.apsd = WMI_10X_PEER_APSD,
1667 	.ht = WMI_10X_PEER_HT,
1668 	.bw40 = WMI_10X_PEER_40MHZ,
1669 	.stbc = WMI_10X_PEER_STBC,
1670 	.ldbc = WMI_10X_PEER_LDPC,
1671 	.dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
1672 	.static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
1673 	.spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
1674 	.vht = WMI_10X_PEER_VHT,
1675 	.bw80 = WMI_10X_PEER_80MHZ,
1676 	.bw160 = WMI_10X_PEER_160MHZ,
1677 };
1678 
1679 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
1680 	.auth = WMI_10_2_PEER_AUTH,
1681 	.qos = WMI_10_2_PEER_QOS,
1682 	.need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
1683 	.need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
1684 	.apsd = WMI_10_2_PEER_APSD,
1685 	.ht = WMI_10_2_PEER_HT,
1686 	.bw40 = WMI_10_2_PEER_40MHZ,
1687 	.stbc = WMI_10_2_PEER_STBC,
1688 	.ldbc = WMI_10_2_PEER_LDPC,
1689 	.dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
1690 	.static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
1691 	.spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
1692 	.vht = WMI_10_2_PEER_VHT,
1693 	.bw80 = WMI_10_2_PEER_80MHZ,
1694 	.vht_2g = WMI_10_2_PEER_VHT_2G,
1695 	.pmf = WMI_10_2_PEER_PMF,
1696 	.bw160 = WMI_10_2_PEER_160MHZ,
1697 };
1698 
ath10k_wmi_put_wmi_channel(struct ath10k * ar,struct wmi_channel * ch,const struct wmi_channel_arg * arg)1699 void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
1700 				const struct wmi_channel_arg *arg)
1701 {
1702 	u32 flags = 0;
1703 	struct ieee80211_channel *chan = NULL;
1704 
1705 	memset(ch, 0, sizeof(*ch));
1706 
1707 	if (arg->passive)
1708 		flags |= WMI_CHAN_FLAG_PASSIVE;
1709 	if (arg->allow_ibss)
1710 		flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1711 	if (arg->allow_ht)
1712 		flags |= WMI_CHAN_FLAG_ALLOW_HT;
1713 	if (arg->allow_vht)
1714 		flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1715 	if (arg->ht40plus)
1716 		flags |= WMI_CHAN_FLAG_HT40_PLUS;
1717 	if (arg->chan_radar)
1718 		flags |= WMI_CHAN_FLAG_DFS;
1719 
1720 	ch->band_center_freq2 = 0;
1721 	ch->mhz = __cpu_to_le32(arg->freq);
1722 	ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
1723 	if (arg->mode == MODE_11AC_VHT80_80) {
1724 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
1725 		chan = ieee80211_get_channel(ar->hw->wiphy,
1726 					     arg->band_center_freq2 - 10);
1727 	}
1728 
1729 	if (arg->mode == MODE_11AC_VHT160) {
1730 		u32 band_center_freq1;
1731 		u32 band_center_freq2;
1732 
1733 		if (arg->freq > arg->band_center_freq1) {
1734 			band_center_freq1 = arg->band_center_freq1 + 40;
1735 			band_center_freq2 = arg->band_center_freq1 - 40;
1736 		} else {
1737 			band_center_freq1 = arg->band_center_freq1 - 40;
1738 			band_center_freq2 = arg->band_center_freq1 + 40;
1739 		}
1740 
1741 		ch->band_center_freq1 =
1742 					__cpu_to_le32(band_center_freq1);
1743 		/* Minus 10 to get a defined 5G channel frequency*/
1744 		chan = ieee80211_get_channel(ar->hw->wiphy,
1745 					     band_center_freq2 - 10);
1746 		/* The center frequency of the entire VHT160 */
1747 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
1748 	}
1749 
1750 	if (chan && chan->flags & IEEE80211_CHAN_RADAR)
1751 		flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
1752 
1753 	ch->min_power = arg->min_power;
1754 	ch->max_power = arg->max_power;
1755 	ch->reg_power = arg->max_reg_power;
1756 	ch->antenna_max = arg->max_antenna_gain;
1757 	ch->max_tx_power = arg->max_power;
1758 
1759 	/* mode & flags share storage */
1760 	ch->mode = arg->mode;
1761 	ch->flags |= __cpu_to_le32(flags);
1762 }
1763 
ath10k_wmi_wait_for_service_ready(struct ath10k * ar)1764 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
1765 {
1766 	unsigned long time_left, i;
1767 
1768 	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1769 						WMI_SERVICE_READY_TIMEOUT_HZ);
1770 	if (!time_left) {
1771 		/* Sometimes the PCI HIF doesn't receive interrupt
1772 		 * for the service ready message even if the buffer
1773 		 * was completed. PCIe sniffer shows that it's
1774 		 * because the corresponding CE ring doesn't fires
1775 		 * it. Workaround here by polling CE rings once.
1776 		 */
1777 		ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
1778 
1779 		for (i = 0; i < CE_COUNT; i++)
1780 			ath10k_hif_send_complete_check(ar, i, 1);
1781 
1782 		time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1783 							WMI_SERVICE_READY_TIMEOUT_HZ);
1784 		if (!time_left) {
1785 			ath10k_warn(ar, "polling timed out\n");
1786 			return -ETIMEDOUT;
1787 		}
1788 
1789 		ath10k_warn(ar, "service ready completion received, continuing normally\n");
1790 	}
1791 
1792 	return 0;
1793 }
1794 
ath10k_wmi_wait_for_unified_ready(struct ath10k * ar)1795 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
1796 {
1797 	unsigned long time_left;
1798 
1799 	time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
1800 						WMI_UNIFIED_READY_TIMEOUT_HZ);
1801 	if (!time_left)
1802 		return -ETIMEDOUT;
1803 	return 0;
1804 }
1805 
ath10k_wmi_alloc_skb(struct ath10k * ar,u32 len)1806 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
1807 {
1808 	struct sk_buff *skb;
1809 	u32 round_len = roundup(len, 4);
1810 
1811 	skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
1812 	if (!skb)
1813 		return NULL;
1814 
1815 	skb_reserve(skb, WMI_SKB_HEADROOM);
1816 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1817 		ath10k_warn(ar, "Unaligned WMI skb\n");
1818 
1819 	skb_put(skb, round_len);
1820 	memset(skb->data, 0, round_len);
1821 
1822 	return skb;
1823 }
1824 
ath10k_wmi_htc_tx_complete(struct ath10k * ar,struct sk_buff * skb)1825 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
1826 {
1827 	dev_kfree_skb(skb);
1828 }
1829 
ath10k_wmi_cmd_send_nowait(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1830 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1831 			       u32 cmd_id)
1832 {
1833 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
1834 	struct wmi_cmd_hdr *cmd_hdr;
1835 	int ret;
1836 	u32 cmd = 0;
1837 
1838 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1839 		return -ENOMEM;
1840 
1841 	cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
1842 
1843 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1844 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1845 
1846 	memset(skb_cb, 0, sizeof(*skb_cb));
1847 	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1848 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1849 
1850 	if (ret)
1851 		goto err_pull;
1852 
1853 	return 0;
1854 
1855 err_pull:
1856 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
1857 	return ret;
1858 }
1859 
ath10k_wmi_tx_beacon_nowait(struct ath10k_vif * arvif)1860 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
1861 {
1862 	struct ath10k *ar = arvif->ar;
1863 	struct ath10k_skb_cb *cb;
1864 	struct sk_buff *bcn;
1865 	bool dtim_zero;
1866 	bool deliver_cab;
1867 	int ret;
1868 
1869 	spin_lock_bh(&ar->data_lock);
1870 
1871 	bcn = arvif->beacon;
1872 
1873 	if (!bcn)
1874 		goto unlock;
1875 
1876 	cb = ATH10K_SKB_CB(bcn);
1877 
1878 	switch (arvif->beacon_state) {
1879 	case ATH10K_BEACON_SENDING:
1880 	case ATH10K_BEACON_SENT:
1881 		break;
1882 	case ATH10K_BEACON_SCHEDULED:
1883 		arvif->beacon_state = ATH10K_BEACON_SENDING;
1884 		spin_unlock_bh(&ar->data_lock);
1885 
1886 		dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
1887 		deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
1888 		ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
1889 							arvif->vdev_id,
1890 							bcn->data, bcn->len,
1891 							cb->paddr,
1892 							dtim_zero,
1893 							deliver_cab);
1894 
1895 		spin_lock_bh(&ar->data_lock);
1896 
1897 		if (ret == 0)
1898 			arvif->beacon_state = ATH10K_BEACON_SENT;
1899 		else
1900 			arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1901 	}
1902 
1903 unlock:
1904 	spin_unlock_bh(&ar->data_lock);
1905 }
1906 
ath10k_wmi_tx_beacons_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1907 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1908 				       struct ieee80211_vif *vif)
1909 {
1910 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
1911 
1912 	ath10k_wmi_tx_beacon_nowait(arvif);
1913 }
1914 
ath10k_wmi_tx_beacons_nowait(struct ath10k * ar)1915 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1916 {
1917 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1918 						   ATH10K_ITER_NORMAL_FLAGS,
1919 						   ath10k_wmi_tx_beacons_iter,
1920 						   NULL);
1921 }
1922 
ath10k_wmi_op_ep_tx_credits(struct ath10k * ar)1923 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1924 {
1925 	/* try to send pending beacons first. they take priority */
1926 	ath10k_wmi_tx_beacons_nowait(ar);
1927 
1928 	wake_up(&ar->wmi.tx_credits_wq);
1929 }
1930 
ath10k_wmi_cmd_send(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1931 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1932 {
1933 	int ret = -EOPNOTSUPP;
1934 
1935 	might_sleep();
1936 
1937 	if (cmd_id == WMI_CMD_UNSUPPORTED) {
1938 		ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1939 			    cmd_id);
1940 		return ret;
1941 	}
1942 
1943 	wait_event_timeout(ar->wmi.tx_credits_wq, ({
1944 		/* try to send pending beacons first. they take priority */
1945 		ath10k_wmi_tx_beacons_nowait(ar);
1946 
1947 		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1948 
1949 		if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1950 			ret = -ESHUTDOWN;
1951 
1952 		(ret != -EAGAIN);
1953 	}), 3 * HZ);
1954 
1955 	if (ret)
1956 		dev_kfree_skb_any(skb);
1957 
1958 	if (ret == -EAGAIN) {
1959 		ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
1960 			    cmd_id);
1961 		ath10k_core_start_recovery(ar);
1962 	}
1963 
1964 	return ret;
1965 }
1966 
1967 static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k * ar,struct sk_buff * msdu)1968 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1969 {
1970 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
1971 	struct ath10k_vif *arvif;
1972 	struct wmi_mgmt_tx_cmd *cmd;
1973 	struct ieee80211_hdr *hdr;
1974 	struct sk_buff *skb;
1975 	int len;
1976 	u32 vdev_id;
1977 	u32 buf_len = msdu->len;
1978 	u16 fc;
1979 	const u8 *peer_addr;
1980 
1981 	hdr = (struct ieee80211_hdr *)msdu->data;
1982 	fc = le16_to_cpu(hdr->frame_control);
1983 
1984 	if (cb->vif) {
1985 		arvif = (void *)cb->vif->drv_priv;
1986 		vdev_id = arvif->vdev_id;
1987 	} else {
1988 		vdev_id = 0;
1989 	}
1990 
1991 	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1992 		return ERR_PTR(-EINVAL);
1993 
1994 	len = sizeof(cmd->hdr) + msdu->len;
1995 
1996 	if ((ieee80211_is_action(hdr->frame_control) ||
1997 	     ieee80211_is_deauth(hdr->frame_control) ||
1998 	     ieee80211_is_disassoc(hdr->frame_control)) &&
1999 	     ieee80211_has_protected(hdr->frame_control)) {
2000 		peer_addr = hdr->addr1;
2001 		if (is_multicast_ether_addr(peer_addr)) {
2002 			len += sizeof(struct ieee80211_mmie_16);
2003 			buf_len += sizeof(struct ieee80211_mmie_16);
2004 		} else {
2005 			if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
2006 			    cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
2007 				len += IEEE80211_GCMP_MIC_LEN;
2008 				buf_len += IEEE80211_GCMP_MIC_LEN;
2009 			} else {
2010 				len += IEEE80211_CCMP_MIC_LEN;
2011 				buf_len += IEEE80211_CCMP_MIC_LEN;
2012 			}
2013 		}
2014 	}
2015 
2016 	len = round_up(len, 4);
2017 
2018 	skb = ath10k_wmi_alloc_skb(ar, len);
2019 	if (!skb)
2020 		return ERR_PTR(-ENOMEM);
2021 
2022 	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
2023 
2024 	cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
2025 	cmd->hdr.tx_rate = 0;
2026 	cmd->hdr.tx_power = 0;
2027 	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
2028 
2029 	ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
2030 	memcpy(cmd->buf, msdu->data, msdu->len);
2031 
2032 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
2033 		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
2034 		   fc & IEEE80211_FCTL_STYPE);
2035 	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
2036 	trace_ath10k_tx_payload(ar, skb->data, skb->len);
2037 
2038 	return skb;
2039 }
2040 
ath10k_wmi_event_scan_started(struct ath10k * ar)2041 static void ath10k_wmi_event_scan_started(struct ath10k *ar)
2042 {
2043 	lockdep_assert_held(&ar->data_lock);
2044 
2045 	switch (ar->scan.state) {
2046 	case ATH10K_SCAN_IDLE:
2047 	case ATH10K_SCAN_RUNNING:
2048 	case ATH10K_SCAN_ABORTING:
2049 		ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
2050 			    ath10k_scan_state_str(ar->scan.state),
2051 			    ar->scan.state);
2052 		break;
2053 	case ATH10K_SCAN_STARTING:
2054 		ar->scan.state = ATH10K_SCAN_RUNNING;
2055 
2056 		if (ar->scan.is_roc)
2057 			ieee80211_ready_on_channel(ar->hw);
2058 
2059 		complete(&ar->scan.started);
2060 		break;
2061 	}
2062 }
2063 
ath10k_wmi_event_scan_start_failed(struct ath10k * ar)2064 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
2065 {
2066 	lockdep_assert_held(&ar->data_lock);
2067 
2068 	switch (ar->scan.state) {
2069 	case ATH10K_SCAN_IDLE:
2070 	case ATH10K_SCAN_RUNNING:
2071 	case ATH10K_SCAN_ABORTING:
2072 		ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
2073 			    ath10k_scan_state_str(ar->scan.state),
2074 			    ar->scan.state);
2075 		break;
2076 	case ATH10K_SCAN_STARTING:
2077 		complete(&ar->scan.started);
2078 		__ath10k_scan_finish(ar);
2079 		break;
2080 	}
2081 }
2082 
ath10k_wmi_event_scan_completed(struct ath10k * ar)2083 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
2084 {
2085 	lockdep_assert_held(&ar->data_lock);
2086 
2087 	switch (ar->scan.state) {
2088 	case ATH10K_SCAN_IDLE:
2089 	case ATH10K_SCAN_STARTING:
2090 		/* One suspected reason scan can be completed while starting is
2091 		 * if firmware fails to deliver all scan events to the host,
2092 		 * e.g. when transport pipe is full. This has been observed
2093 		 * with spectral scan phyerr events starving wmi transport
2094 		 * pipe. In such case the "scan completed" event should be (and
2095 		 * is) ignored by the host as it may be just firmware's scan
2096 		 * state machine recovering.
2097 		 */
2098 		ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
2099 			    ath10k_scan_state_str(ar->scan.state),
2100 			    ar->scan.state);
2101 		break;
2102 	case ATH10K_SCAN_RUNNING:
2103 	case ATH10K_SCAN_ABORTING:
2104 		__ath10k_scan_finish(ar);
2105 		break;
2106 	}
2107 }
2108 
ath10k_wmi_event_scan_bss_chan(struct ath10k * ar)2109 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
2110 {
2111 	lockdep_assert_held(&ar->data_lock);
2112 
2113 	switch (ar->scan.state) {
2114 	case ATH10K_SCAN_IDLE:
2115 	case ATH10K_SCAN_STARTING:
2116 		ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
2117 			    ath10k_scan_state_str(ar->scan.state),
2118 			    ar->scan.state);
2119 		break;
2120 	case ATH10K_SCAN_RUNNING:
2121 	case ATH10K_SCAN_ABORTING:
2122 		ar->scan_channel = NULL;
2123 		break;
2124 	}
2125 }
2126 
ath10k_wmi_event_scan_foreign_chan(struct ath10k * ar,u32 freq)2127 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
2128 {
2129 	lockdep_assert_held(&ar->data_lock);
2130 
2131 	switch (ar->scan.state) {
2132 	case ATH10K_SCAN_IDLE:
2133 	case ATH10K_SCAN_STARTING:
2134 		ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
2135 			    ath10k_scan_state_str(ar->scan.state),
2136 			    ar->scan.state);
2137 		break;
2138 	case ATH10K_SCAN_RUNNING:
2139 	case ATH10K_SCAN_ABORTING:
2140 		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
2141 
2142 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
2143 			complete(&ar->scan.on_channel);
2144 		break;
2145 	}
2146 }
2147 
2148 static const char *
ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)2149 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
2150 			       enum wmi_scan_completion_reason reason)
2151 {
2152 	switch (type) {
2153 	case WMI_SCAN_EVENT_STARTED:
2154 		return "started";
2155 	case WMI_SCAN_EVENT_COMPLETED:
2156 		switch (reason) {
2157 		case WMI_SCAN_REASON_COMPLETED:
2158 			return "completed";
2159 		case WMI_SCAN_REASON_CANCELLED:
2160 			return "completed [cancelled]";
2161 		case WMI_SCAN_REASON_PREEMPTED:
2162 			return "completed [preempted]";
2163 		case WMI_SCAN_REASON_TIMEDOUT:
2164 			return "completed [timedout]";
2165 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
2166 			return "completed [internal err]";
2167 		case WMI_SCAN_REASON_MAX:
2168 			break;
2169 		}
2170 		return "completed [unknown]";
2171 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2172 		return "bss channel";
2173 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2174 		return "foreign channel";
2175 	case WMI_SCAN_EVENT_DEQUEUED:
2176 		return "dequeued";
2177 	case WMI_SCAN_EVENT_PREEMPTED:
2178 		return "preempted";
2179 	case WMI_SCAN_EVENT_START_FAILED:
2180 		return "start failed";
2181 	case WMI_SCAN_EVENT_RESTARTED:
2182 		return "restarted";
2183 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2184 		return "foreign channel exit";
2185 	default:
2186 		return "unknown";
2187 	}
2188 }
2189 
ath10k_wmi_op_pull_scan_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_scan_ev_arg * arg)2190 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
2191 				      struct wmi_scan_ev_arg *arg)
2192 {
2193 	struct wmi_scan_event *ev = (void *)skb->data;
2194 
2195 	if (skb->len < sizeof(*ev))
2196 		return -EPROTO;
2197 
2198 	skb_pull(skb, sizeof(*ev));
2199 	arg->event_type = ev->event_type;
2200 	arg->reason = ev->reason;
2201 	arg->channel_freq = ev->channel_freq;
2202 	arg->scan_req_id = ev->scan_req_id;
2203 	arg->scan_id = ev->scan_id;
2204 	arg->vdev_id = ev->vdev_id;
2205 
2206 	return 0;
2207 }
2208 
ath10k_wmi_event_scan(struct ath10k * ar,struct sk_buff * skb)2209 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2210 {
2211 	struct wmi_scan_ev_arg arg = {};
2212 	enum wmi_scan_event_type event_type;
2213 	enum wmi_scan_completion_reason reason;
2214 	u32 freq;
2215 	u32 req_id;
2216 	u32 scan_id;
2217 	u32 vdev_id;
2218 	int ret;
2219 
2220 	ret = ath10k_wmi_pull_scan(ar, skb, &arg);
2221 	if (ret) {
2222 		ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
2223 		return ret;
2224 	}
2225 
2226 	event_type = __le32_to_cpu(arg.event_type);
2227 	reason = __le32_to_cpu(arg.reason);
2228 	freq = __le32_to_cpu(arg.channel_freq);
2229 	req_id = __le32_to_cpu(arg.scan_req_id);
2230 	scan_id = __le32_to_cpu(arg.scan_id);
2231 	vdev_id = __le32_to_cpu(arg.vdev_id);
2232 
2233 	spin_lock_bh(&ar->data_lock);
2234 
2235 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2236 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
2237 		   ath10k_wmi_event_scan_type_str(event_type, reason),
2238 		   event_type, reason, freq, req_id, scan_id, vdev_id,
2239 		   ath10k_scan_state_str(ar->scan.state), ar->scan.state);
2240 
2241 	switch (event_type) {
2242 	case WMI_SCAN_EVENT_STARTED:
2243 		ath10k_wmi_event_scan_started(ar);
2244 		break;
2245 	case WMI_SCAN_EVENT_COMPLETED:
2246 		ath10k_wmi_event_scan_completed(ar);
2247 		break;
2248 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2249 		ath10k_wmi_event_scan_bss_chan(ar);
2250 		break;
2251 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2252 		ath10k_wmi_event_scan_foreign_chan(ar, freq);
2253 		break;
2254 	case WMI_SCAN_EVENT_START_FAILED:
2255 		ath10k_warn(ar, "received scan start failure event\n");
2256 		ath10k_wmi_event_scan_start_failed(ar);
2257 		break;
2258 	case WMI_SCAN_EVENT_DEQUEUED:
2259 	case WMI_SCAN_EVENT_PREEMPTED:
2260 	case WMI_SCAN_EVENT_RESTARTED:
2261 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2262 	default:
2263 		break;
2264 	}
2265 
2266 	spin_unlock_bh(&ar->data_lock);
2267 	return 0;
2268 }
2269 
2270 /* If keys are configured, HW decrypts all frames
2271  * with protected bit set. Mark such frames as decrypted.
2272  */
ath10k_wmi_handle_wep_reauth(struct ath10k * ar,struct sk_buff * skb,struct ieee80211_rx_status * status)2273 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
2274 					 struct sk_buff *skb,
2275 					 struct ieee80211_rx_status *status)
2276 {
2277 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2278 	unsigned int hdrlen;
2279 	bool peer_key;
2280 	u8 *addr, keyidx;
2281 
2282 	if (!ieee80211_is_auth(hdr->frame_control) ||
2283 	    !ieee80211_has_protected(hdr->frame_control))
2284 		return;
2285 
2286 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
2287 	if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
2288 		return;
2289 
2290 	keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
2291 	addr = ieee80211_get_SA(hdr);
2292 
2293 	spin_lock_bh(&ar->data_lock);
2294 	peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
2295 	spin_unlock_bh(&ar->data_lock);
2296 
2297 	if (peer_key) {
2298 		ath10k_dbg(ar, ATH10K_DBG_MAC,
2299 			   "mac wep key present for peer %pM\n", addr);
2300 		status->flag |= RX_FLAG_DECRYPTED;
2301 	}
2302 }
2303 
ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2304 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2305 					 struct wmi_mgmt_rx_ev_arg *arg)
2306 {
2307 	struct wmi_mgmt_rx_event_v1 *ev_v1;
2308 	struct wmi_mgmt_rx_event_v2 *ev_v2;
2309 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2310 	struct wmi_mgmt_rx_ext_info *ext_info;
2311 	size_t pull_len;
2312 	u32 msdu_len;
2313 	u32 len;
2314 
2315 	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
2316 		     ar->running_fw->fw_file.fw_features)) {
2317 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
2318 		ev_hdr = &ev_v2->hdr.v1;
2319 		pull_len = sizeof(*ev_v2);
2320 	} else {
2321 		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
2322 		ev_hdr = &ev_v1->hdr;
2323 		pull_len = sizeof(*ev_v1);
2324 	}
2325 
2326 	if (skb->len < pull_len)
2327 		return -EPROTO;
2328 
2329 	skb_pull(skb, pull_len);
2330 	arg->channel = ev_hdr->channel;
2331 	arg->buf_len = ev_hdr->buf_len;
2332 	arg->status = ev_hdr->status;
2333 	arg->snr = ev_hdr->snr;
2334 	arg->phy_mode = ev_hdr->phy_mode;
2335 	arg->rate = ev_hdr->rate;
2336 
2337 	msdu_len = __le32_to_cpu(arg->buf_len);
2338 	if (skb->len < msdu_len)
2339 		return -EPROTO;
2340 
2341 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2342 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2343 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2344 		memcpy(&arg->ext_info, ext_info,
2345 		       sizeof(struct wmi_mgmt_rx_ext_info));
2346 	}
2347 	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2348 	 * trailer with credit update. Trim the excess garbage.
2349 	 */
2350 	skb_trim(skb, msdu_len);
2351 
2352 	return 0;
2353 }
2354 
ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2355 static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2356 					      struct sk_buff *skb,
2357 					      struct wmi_mgmt_rx_ev_arg *arg)
2358 {
2359 	struct wmi_10_4_mgmt_rx_event *ev;
2360 	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2361 	size_t pull_len;
2362 	u32 msdu_len;
2363 	struct wmi_mgmt_rx_ext_info *ext_info;
2364 	u32 len;
2365 
2366 	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2367 	ev_hdr = &ev->hdr;
2368 	pull_len = sizeof(*ev);
2369 
2370 	if (skb->len < pull_len)
2371 		return -EPROTO;
2372 
2373 	skb_pull(skb, pull_len);
2374 	arg->channel = ev_hdr->channel;
2375 	arg->buf_len = ev_hdr->buf_len;
2376 	arg->status = ev_hdr->status;
2377 	arg->snr = ev_hdr->snr;
2378 	arg->phy_mode = ev_hdr->phy_mode;
2379 	arg->rate = ev_hdr->rate;
2380 
2381 	msdu_len = __le32_to_cpu(arg->buf_len);
2382 	if (skb->len < msdu_len)
2383 		return -EPROTO;
2384 
2385 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2386 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2387 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2388 		memcpy(&arg->ext_info, ext_info,
2389 		       sizeof(struct wmi_mgmt_rx_ext_info));
2390 	}
2391 
2392 	/* Make sure bytes added for padding are removed. */
2393 	skb_trim(skb, msdu_len);
2394 
2395 	return 0;
2396 }
2397 
ath10k_wmi_rx_is_decrypted(struct ath10k * ar,struct ieee80211_hdr * hdr)2398 static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
2399 				       struct ieee80211_hdr *hdr)
2400 {
2401 	if (!ieee80211_has_protected(hdr->frame_control))
2402 		return false;
2403 
2404 	/* FW delivers WEP Shared Auth frame with Protected Bit set and
2405 	 * encrypted payload. However in case of PMF it delivers decrypted
2406 	 * frames with Protected Bit set.
2407 	 */
2408 	if (ieee80211_is_auth(hdr->frame_control))
2409 		return false;
2410 
2411 	/* qca99x0 based FW delivers broadcast or multicast management frames
2412 	 * (ex: group privacy action frames in mesh) as encrypted payload.
2413 	 */
2414 	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
2415 	    ar->hw_params.sw_decrypt_mcast_mgmt)
2416 		return false;
2417 
2418 	return true;
2419 }
2420 
2421 static int
wmi_process_mgmt_tx_comp(struct ath10k * ar,struct mgmt_tx_compl_params * param)2422 wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
2423 {
2424 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
2425 	struct ath10k_wmi *wmi = &ar->wmi;
2426 	struct ieee80211_tx_info *info;
2427 	struct sk_buff *msdu;
2428 	int ret;
2429 
2430 	spin_lock_bh(&ar->data_lock);
2431 
2432 	pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
2433 	if (!pkt_addr) {
2434 		ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
2435 			    param->desc_id);
2436 		ret = -ENOENT;
2437 		goto out;
2438 	}
2439 
2440 	msdu = pkt_addr->vaddr;
2441 	dma_unmap_single(ar->dev, pkt_addr->paddr,
2442 			 msdu->len, DMA_TO_DEVICE);
2443 	info = IEEE80211_SKB_CB(msdu);
2444 
2445 	if (param->status) {
2446 		info->flags &= ~IEEE80211_TX_STAT_ACK;
2447 	} else {
2448 		info->flags |= IEEE80211_TX_STAT_ACK;
2449 		info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
2450 					  param->ack_rssi;
2451 		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
2452 	}
2453 
2454 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
2455 
2456 	ret = 0;
2457 
2458 out:
2459 	idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
2460 	spin_unlock_bh(&ar->data_lock);
2461 	return ret;
2462 }
2463 
ath10k_wmi_event_mgmt_tx_compl(struct ath10k * ar,struct sk_buff * skb)2464 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
2465 {
2466 	struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
2467 	struct mgmt_tx_compl_params param;
2468 	int ret;
2469 
2470 	ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
2471 	if (ret) {
2472 		ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
2473 		return ret;
2474 	}
2475 
2476 	memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2477 	param.desc_id = __le32_to_cpu(arg.desc_id);
2478 	param.status = __le32_to_cpu(arg.status);
2479 
2480 	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2481 		param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
2482 
2483 	wmi_process_mgmt_tx_comp(ar, &param);
2484 
2485 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
2486 
2487 	return 0;
2488 }
2489 
ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k * ar,struct sk_buff * skb)2490 int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
2491 {
2492 	struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
2493 	struct mgmt_tx_compl_params param;
2494 	u32 num_reports;
2495 	int i, ret;
2496 
2497 	ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
2498 	if (ret) {
2499 		ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
2500 		return ret;
2501 	}
2502 
2503 	num_reports = __le32_to_cpu(arg.num_reports);
2504 
2505 	for (i = 0; i < num_reports; i++) {
2506 		memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2507 		param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
2508 		param.status = __le32_to_cpu(arg.desc_ids[i]);
2509 
2510 		if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2511 			param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
2512 		wmi_process_mgmt_tx_comp(ar, &param);
2513 	}
2514 
2515 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
2516 
2517 	return 0;
2518 }
2519 
ath10k_wmi_event_mgmt_rx(struct ath10k * ar,struct sk_buff * skb)2520 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2521 {
2522 	struct wmi_mgmt_rx_ev_arg arg = {};
2523 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2524 	struct ieee80211_hdr *hdr;
2525 	struct ieee80211_supported_band *sband;
2526 	u32 rx_status;
2527 	u32 channel;
2528 	u32 phy_mode;
2529 	u32 snr, rssi;
2530 	u32 rate;
2531 	u16 fc;
2532 	int ret, i;
2533 
2534 	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
2535 	if (ret) {
2536 		ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
2537 		dev_kfree_skb(skb);
2538 		return ret;
2539 	}
2540 
2541 	channel = __le32_to_cpu(arg.channel);
2542 	rx_status = __le32_to_cpu(arg.status);
2543 	snr = __le32_to_cpu(arg.snr);
2544 	phy_mode = __le32_to_cpu(arg.phy_mode);
2545 	rate = __le32_to_cpu(arg.rate);
2546 
2547 	memset(status, 0, sizeof(*status));
2548 
2549 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2550 		   "event mgmt rx status %08x\n", rx_status);
2551 
2552 	if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
2553 	    (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
2554 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
2555 		dev_kfree_skb(skb);
2556 		return 0;
2557 	}
2558 
2559 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
2560 		status->flag |= RX_FLAG_MMIC_ERROR;
2561 
2562 	if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2563 		status->mactime =
2564 			__le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2565 		status->flag |= RX_FLAG_MACTIME_END;
2566 	}
2567 	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2568 	 * MODE_11B. This means phy_mode is not a reliable source for the band
2569 	 * of mgmt rx.
2570 	 */
2571 	if (channel >= 1 && channel <= 14) {
2572 		status->band = NL80211_BAND_2GHZ;
2573 	} else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
2574 		status->band = NL80211_BAND_5GHZ;
2575 	} else {
2576 		/* Shouldn't happen unless list of advertised channels to
2577 		 * mac80211 has been changed.
2578 		 */
2579 		WARN_ON_ONCE(1);
2580 		dev_kfree_skb(skb);
2581 		return 0;
2582 	}
2583 
2584 	if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2585 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2586 
2587 	sband = &ar->mac.sbands[status->band];
2588 
2589 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
2590 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
2591 
2592 	BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
2593 
2594 	for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
2595 		status->chains &= ~BIT(i);
2596 		rssi = __le32_to_cpu(arg.rssi[i]);
2597 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
2598 
2599 		if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
2600 			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
2601 			status->chains |= BIT(i);
2602 		}
2603 	}
2604 
2605 	status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
2606 
2607 	hdr = (struct ieee80211_hdr *)skb->data;
2608 	fc = le16_to_cpu(hdr->frame_control);
2609 
2610 	/* Firmware is guaranteed to report all essential management frames via
2611 	 * WMI while it can deliver some extra via HTT. Since there can be
2612 	 * duplicates split the reporting wrt monitor/sniffing.
2613 	 */
2614 	status->flag |= RX_FLAG_SKIP_MONITOR;
2615 
2616 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
2617 
2618 	if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
2619 		status->flag |= RX_FLAG_DECRYPTED;
2620 
2621 		if (!ieee80211_is_action(hdr->frame_control) &&
2622 		    !ieee80211_is_deauth(hdr->frame_control) &&
2623 		    !ieee80211_is_disassoc(hdr->frame_control)) {
2624 			status->flag |= RX_FLAG_IV_STRIPPED |
2625 					RX_FLAG_MMIC_STRIPPED;
2626 			hdr->frame_control = __cpu_to_le16(fc &
2627 					~IEEE80211_FCTL_PROTECTED);
2628 		}
2629 	}
2630 
2631 	if (ieee80211_is_beacon(hdr->frame_control))
2632 		ath10k_mac_handle_beacon(ar, skb);
2633 
2634 	if (ieee80211_is_beacon(hdr->frame_control) ||
2635 	    ieee80211_is_probe_resp(hdr->frame_control))
2636 		status->boottime_ns = ktime_get_boottime_ns();
2637 
2638 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2639 		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
2640 		   skb, skb->len,
2641 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
2642 
2643 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2644 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
2645 		   status->freq, status->band, status->signal,
2646 		   status->rate_idx);
2647 
2648 	ieee80211_rx_ni(ar->hw, skb);
2649 
2650 	return 0;
2651 }
2652 
freq_to_idx(struct ath10k * ar,int freq)2653 static int freq_to_idx(struct ath10k *ar, int freq)
2654 {
2655 	struct ieee80211_supported_band *sband;
2656 	int band, ch, idx = 0;
2657 
2658 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2659 		sband = ar->hw->wiphy->bands[band];
2660 		if (!sband)
2661 			continue;
2662 
2663 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
2664 			if (sband->channels[ch].center_freq == freq)
2665 				goto exit;
2666 	}
2667 
2668 exit:
2669 	return idx;
2670 }
2671 
ath10k_wmi_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2672 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
2673 					 struct wmi_ch_info_ev_arg *arg)
2674 {
2675 	struct wmi_chan_info_event *ev = (void *)skb->data;
2676 
2677 	if (skb->len < sizeof(*ev))
2678 		return -EPROTO;
2679 
2680 	skb_pull(skb, sizeof(*ev));
2681 	arg->err_code = ev->err_code;
2682 	arg->freq = ev->freq;
2683 	arg->cmd_flags = ev->cmd_flags;
2684 	arg->noise_floor = ev->noise_floor;
2685 	arg->rx_clear_count = ev->rx_clear_count;
2686 	arg->cycle_count = ev->cycle_count;
2687 
2688 	return 0;
2689 }
2690 
ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2691 static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
2692 					      struct sk_buff *skb,
2693 					      struct wmi_ch_info_ev_arg *arg)
2694 {
2695 	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
2696 
2697 	if (skb->len < sizeof(*ev))
2698 		return -EPROTO;
2699 
2700 	skb_pull(skb, sizeof(*ev));
2701 	arg->err_code = ev->err_code;
2702 	arg->freq = ev->freq;
2703 	arg->cmd_flags = ev->cmd_flags;
2704 	arg->noise_floor = ev->noise_floor;
2705 	arg->rx_clear_count = ev->rx_clear_count;
2706 	arg->cycle_count = ev->cycle_count;
2707 	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
2708 	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
2709 	arg->rx_frame_count = ev->rx_frame_count;
2710 
2711 	return 0;
2712 }
2713 
2714 /*
2715  * Handle the channel info event for firmware which only sends one
2716  * chan_info event per scanned channel.
2717  */
ath10k_wmi_event_chan_info_unpaired(struct ath10k * ar,struct chan_info_params * params)2718 static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
2719 						struct chan_info_params *params)
2720 {
2721 	struct survey_info *survey;
2722 	int idx;
2723 
2724 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2725 		ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
2726 		return;
2727 	}
2728 
2729 	idx = freq_to_idx(ar, params->freq);
2730 	if (idx >= ARRAY_SIZE(ar->survey)) {
2731 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2732 			    params->freq, idx);
2733 		return;
2734 	}
2735 
2736 	survey = &ar->survey[idx];
2737 
2738 	if (!params->mac_clk_mhz)
2739 		return;
2740 
2741 	memset(survey, 0, sizeof(*survey));
2742 
2743 	survey->noise = params->noise_floor;
2744 	survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
2745 	survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
2746 	survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
2747 			  SURVEY_INFO_TIME_BUSY;
2748 }
2749 
2750 /*
2751  * Handle the channel info event for firmware which sends chan_info
2752  * event in pairs(start and stop events) for every scanned channel.
2753  */
ath10k_wmi_event_chan_info_paired(struct ath10k * ar,struct chan_info_params * params)2754 static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
2755 					      struct chan_info_params *params)
2756 {
2757 	struct survey_info *survey;
2758 	int idx;
2759 
2760 	idx = freq_to_idx(ar, params->freq);
2761 	if (idx >= ARRAY_SIZE(ar->survey)) {
2762 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2763 			    params->freq, idx);
2764 		return;
2765 	}
2766 
2767 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2768 		if (ar->ch_info_can_report_survey) {
2769 			survey = &ar->survey[idx];
2770 			survey->noise = params->noise_floor;
2771 			survey->filled = SURVEY_INFO_NOISE_DBM;
2772 
2773 			ath10k_hw_fill_survey_time(ar,
2774 						   survey,
2775 						   params->cycle_count,
2776 						   params->rx_clear_count,
2777 						   ar->survey_last_cycle_count,
2778 						   ar->survey_last_rx_clear_count);
2779 		}
2780 
2781 		ar->ch_info_can_report_survey = false;
2782 	} else {
2783 		ar->ch_info_can_report_survey = true;
2784 	}
2785 
2786 	if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
2787 		ar->survey_last_rx_clear_count = params->rx_clear_count;
2788 		ar->survey_last_cycle_count = params->cycle_count;
2789 	}
2790 }
2791 
ath10k_wmi_event_chan_info(struct ath10k * ar,struct sk_buff * skb)2792 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
2793 {
2794 	struct chan_info_params ch_info_param;
2795 	struct wmi_ch_info_ev_arg arg = {};
2796 	int ret;
2797 
2798 	ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
2799 	if (ret) {
2800 		ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
2801 		return;
2802 	}
2803 
2804 	ch_info_param.err_code = __le32_to_cpu(arg.err_code);
2805 	ch_info_param.freq = __le32_to_cpu(arg.freq);
2806 	ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
2807 	ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
2808 	ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
2809 	ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
2810 	ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
2811 
2812 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2813 		   "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
2814 		   ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
2815 		   ch_info_param.noise_floor, ch_info_param.rx_clear_count,
2816 		   ch_info_param.cycle_count);
2817 
2818 	spin_lock_bh(&ar->data_lock);
2819 
2820 	switch (ar->scan.state) {
2821 	case ATH10K_SCAN_IDLE:
2822 	case ATH10K_SCAN_STARTING:
2823 		ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
2824 		goto exit;
2825 	case ATH10K_SCAN_RUNNING:
2826 	case ATH10K_SCAN_ABORTING:
2827 		break;
2828 	}
2829 
2830 	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
2831 		     ar->running_fw->fw_file.fw_features))
2832 		ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
2833 	else
2834 		ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
2835 
2836 exit:
2837 	spin_unlock_bh(&ar->data_lock);
2838 }
2839 
ath10k_wmi_event_echo(struct ath10k * ar,struct sk_buff * skb)2840 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
2841 {
2842 	struct wmi_echo_ev_arg arg = {};
2843 	int ret;
2844 
2845 	ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
2846 	if (ret) {
2847 		ath10k_warn(ar, "failed to parse echo: %d\n", ret);
2848 		return;
2849 	}
2850 
2851 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2852 		   "wmi event echo value 0x%08x\n",
2853 		   le32_to_cpu(arg.value));
2854 
2855 	if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
2856 		complete(&ar->wmi.barrier);
2857 }
2858 
ath10k_wmi_event_debug_mesg(struct ath10k * ar,struct sk_buff * skb)2859 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
2860 {
2861 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
2862 		   skb->len);
2863 
2864 	trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
2865 
2866 	return 0;
2867 }
2868 
ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base * src,struct ath10k_fw_stats_pdev * dst)2869 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
2870 				     struct ath10k_fw_stats_pdev *dst)
2871 {
2872 	dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
2873 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
2874 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
2875 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
2876 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
2877 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
2878 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
2879 }
2880 
ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2881 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
2882 				   struct ath10k_fw_stats_pdev *dst)
2883 {
2884 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2885 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2886 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2887 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2888 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2889 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2890 	dst->local_freed = __le32_to_cpu(src->local_freed);
2891 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2892 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2893 	dst->underrun = __le32_to_cpu(src->underrun);
2894 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2895 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2896 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2897 	dst->data_rc = __le32_to_cpu(src->data_rc);
2898 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2899 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2900 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2901 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2902 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2903 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2904 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2905 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2906 }
2907 
2908 static void
ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2909 ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
2910 				   struct ath10k_fw_stats_pdev *dst)
2911 {
2912 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2913 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2914 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2915 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2916 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2917 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2918 	dst->local_freed = __le32_to_cpu(src->local_freed);
2919 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2920 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2921 	dst->underrun = __le32_to_cpu(src->underrun);
2922 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2923 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2924 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2925 	dst->data_rc = __le32_to_cpu(src->data_rc);
2926 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2927 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2928 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2929 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2930 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2931 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2932 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2933 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2934 	dst->hw_paused = __le32_to_cpu(src->hw_paused);
2935 	dst->seq_posted = __le32_to_cpu(src->seq_posted);
2936 	dst->seq_failed_queueing =
2937 		__le32_to_cpu(src->seq_failed_queueing);
2938 	dst->seq_completed = __le32_to_cpu(src->seq_completed);
2939 	dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
2940 	dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
2941 	dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
2942 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2943 	dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
2944 	dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
2945 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2946 	dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
2947 }
2948 
ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx * src,struct ath10k_fw_stats_pdev * dst)2949 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
2950 				   struct ath10k_fw_stats_pdev *dst)
2951 {
2952 	dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
2953 	dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
2954 	dst->r0_frags = __le32_to_cpu(src->r0_frags);
2955 	dst->r1_frags = __le32_to_cpu(src->r1_frags);
2956 	dst->r2_frags = __le32_to_cpu(src->r2_frags);
2957 	dst->r3_frags = __le32_to_cpu(src->r3_frags);
2958 	dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
2959 	dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
2960 	dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
2961 	dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
2962 	dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
2963 	dst->phy_errs = __le32_to_cpu(src->phy_errs);
2964 	dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
2965 	dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
2966 }
2967 
ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra * src,struct ath10k_fw_stats_pdev * dst)2968 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
2969 				      struct ath10k_fw_stats_pdev *dst)
2970 {
2971 	dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
2972 	dst->rts_bad = __le32_to_cpu(src->rts_bad);
2973 	dst->rts_good = __le32_to_cpu(src->rts_good);
2974 	dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
2975 	dst->no_beacons = __le32_to_cpu(src->no_beacons);
2976 	dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
2977 }
2978 
ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats * src,struct ath10k_fw_stats_peer * dst)2979 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2980 				struct ath10k_fw_stats_peer *dst)
2981 {
2982 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2983 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2984 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2985 }
2986 
2987 static void
ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats * src,struct ath10k_fw_stats_peer * dst)2988 ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2989 				struct ath10k_fw_stats_peer *dst)
2990 {
2991 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2992 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2993 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2994 	dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2995 }
2996 
2997 static void
ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd * src,struct ath10k_fw_stats_vdev_extd * dst)2998 ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
2999 				struct ath10k_fw_stats_vdev_extd *dst)
3000 {
3001 	dst->vdev_id = __le32_to_cpu(src->vdev_id);
3002 	dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
3003 	dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
3004 	dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
3005 	dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
3006 	dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
3007 	dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
3008 	dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
3009 	dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
3010 	dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
3011 	dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
3012 	dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
3013 	dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
3014 	dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
3015 	dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
3016 	dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
3017 }
3018 
ath10k_wmi_main_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3019 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
3020 					    struct sk_buff *skb,
3021 					    struct ath10k_fw_stats *stats)
3022 {
3023 	const struct wmi_stats_event *ev = (void *)skb->data;
3024 	u32 num_pdev_stats, num_peer_stats;
3025 	int i;
3026 
3027 	if (!skb_pull(skb, sizeof(*ev)))
3028 		return -EPROTO;
3029 
3030 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3031 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3032 
3033 	for (i = 0; i < num_pdev_stats; i++) {
3034 		const struct wmi_pdev_stats *src;
3035 		struct ath10k_fw_stats_pdev *dst;
3036 
3037 		src = (void *)skb->data;
3038 		if (!skb_pull(skb, sizeof(*src)))
3039 			return -EPROTO;
3040 
3041 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3042 		if (!dst)
3043 			continue;
3044 
3045 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3046 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3047 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3048 
3049 		list_add_tail(&dst->list, &stats->pdevs);
3050 	}
3051 
3052 	/* fw doesn't implement vdev stats */
3053 
3054 	for (i = 0; i < num_peer_stats; i++) {
3055 		const struct wmi_peer_stats *src;
3056 		struct ath10k_fw_stats_peer *dst;
3057 
3058 		src = (void *)skb->data;
3059 		if (!skb_pull(skb, sizeof(*src)))
3060 			return -EPROTO;
3061 
3062 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3063 		if (!dst)
3064 			continue;
3065 
3066 		ath10k_wmi_pull_peer_stats(src, dst);
3067 		list_add_tail(&dst->list, &stats->peers);
3068 	}
3069 
3070 	return 0;
3071 }
3072 
ath10k_wmi_10x_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3073 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
3074 					   struct sk_buff *skb,
3075 					   struct ath10k_fw_stats *stats)
3076 {
3077 	const struct wmi_stats_event *ev = (void *)skb->data;
3078 	u32 num_pdev_stats, num_peer_stats;
3079 	int i;
3080 
3081 	if (!skb_pull(skb, sizeof(*ev)))
3082 		return -EPROTO;
3083 
3084 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3085 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3086 
3087 	for (i = 0; i < num_pdev_stats; i++) {
3088 		const struct wmi_10x_pdev_stats *src;
3089 		struct ath10k_fw_stats_pdev *dst;
3090 
3091 		src = (void *)skb->data;
3092 		if (!skb_pull(skb, sizeof(*src)))
3093 			return -EPROTO;
3094 
3095 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3096 		if (!dst)
3097 			continue;
3098 
3099 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3100 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3101 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3102 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3103 
3104 		list_add_tail(&dst->list, &stats->pdevs);
3105 	}
3106 
3107 	/* fw doesn't implement vdev stats */
3108 
3109 	for (i = 0; i < num_peer_stats; i++) {
3110 		const struct wmi_10x_peer_stats *src;
3111 		struct ath10k_fw_stats_peer *dst;
3112 
3113 		src = (void *)skb->data;
3114 		if (!skb_pull(skb, sizeof(*src)))
3115 			return -EPROTO;
3116 
3117 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3118 		if (!dst)
3119 			continue;
3120 
3121 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3122 
3123 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3124 
3125 		list_add_tail(&dst->list, &stats->peers);
3126 	}
3127 
3128 	return 0;
3129 }
3130 
ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3131 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
3132 					    struct sk_buff *skb,
3133 					    struct ath10k_fw_stats *stats)
3134 {
3135 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3136 	u32 num_pdev_stats;
3137 	u32 num_pdev_ext_stats;
3138 	u32 num_peer_stats;
3139 	int i;
3140 
3141 	if (!skb_pull(skb, sizeof(*ev)))
3142 		return -EPROTO;
3143 
3144 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3145 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3146 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3147 
3148 	for (i = 0; i < num_pdev_stats; i++) {
3149 		const struct wmi_10_2_pdev_stats *src;
3150 		struct ath10k_fw_stats_pdev *dst;
3151 
3152 		src = (void *)skb->data;
3153 		if (!skb_pull(skb, sizeof(*src)))
3154 			return -EPROTO;
3155 
3156 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3157 		if (!dst)
3158 			continue;
3159 
3160 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3161 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3162 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3163 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3164 		/* FIXME: expose 10.2 specific values */
3165 
3166 		list_add_tail(&dst->list, &stats->pdevs);
3167 	}
3168 
3169 	for (i = 0; i < num_pdev_ext_stats; i++) {
3170 		const struct wmi_10_2_pdev_ext_stats *src;
3171 
3172 		src = (void *)skb->data;
3173 		if (!skb_pull(skb, sizeof(*src)))
3174 			return -EPROTO;
3175 
3176 		/* FIXME: expose values to userspace
3177 		 *
3178 		 * Note: Even though this loop seems to do nothing it is
3179 		 * required to parse following sub-structures properly.
3180 		 */
3181 	}
3182 
3183 	/* fw doesn't implement vdev stats */
3184 
3185 	for (i = 0; i < num_peer_stats; i++) {
3186 		const struct wmi_10_2_peer_stats *src;
3187 		struct ath10k_fw_stats_peer *dst;
3188 
3189 		src = (void *)skb->data;
3190 		if (!skb_pull(skb, sizeof(*src)))
3191 			return -EPROTO;
3192 
3193 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3194 		if (!dst)
3195 			continue;
3196 
3197 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3198 
3199 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3200 		/* FIXME: expose 10.2 specific values */
3201 
3202 		list_add_tail(&dst->list, &stats->peers);
3203 	}
3204 
3205 	return 0;
3206 }
3207 
ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3208 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
3209 					      struct sk_buff *skb,
3210 					      struct ath10k_fw_stats *stats)
3211 {
3212 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3213 	u32 num_pdev_stats;
3214 	u32 num_pdev_ext_stats;
3215 	u32 num_peer_stats;
3216 	int i;
3217 
3218 	if (!skb_pull(skb, sizeof(*ev)))
3219 		return -EPROTO;
3220 
3221 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3222 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3223 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3224 
3225 	for (i = 0; i < num_pdev_stats; i++) {
3226 		const struct wmi_10_2_pdev_stats *src;
3227 		struct ath10k_fw_stats_pdev *dst;
3228 
3229 		src = (void *)skb->data;
3230 		if (!skb_pull(skb, sizeof(*src)))
3231 			return -EPROTO;
3232 
3233 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3234 		if (!dst)
3235 			continue;
3236 
3237 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3238 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3239 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3240 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3241 		/* FIXME: expose 10.2 specific values */
3242 
3243 		list_add_tail(&dst->list, &stats->pdevs);
3244 	}
3245 
3246 	for (i = 0; i < num_pdev_ext_stats; i++) {
3247 		const struct wmi_10_2_pdev_ext_stats *src;
3248 
3249 		src = (void *)skb->data;
3250 		if (!skb_pull(skb, sizeof(*src)))
3251 			return -EPROTO;
3252 
3253 		/* FIXME: expose values to userspace
3254 		 *
3255 		 * Note: Even though this loop seems to do nothing it is
3256 		 * required to parse following sub-structures properly.
3257 		 */
3258 	}
3259 
3260 	/* fw doesn't implement vdev stats */
3261 
3262 	for (i = 0; i < num_peer_stats; i++) {
3263 		const struct wmi_10_2_4_ext_peer_stats *src;
3264 		struct ath10k_fw_stats_peer *dst;
3265 		int stats_len;
3266 
3267 		if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
3268 			stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
3269 		else
3270 			stats_len = sizeof(struct wmi_10_2_4_peer_stats);
3271 
3272 		src = (void *)skb->data;
3273 		if (!skb_pull(skb, stats_len))
3274 			return -EPROTO;
3275 
3276 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3277 		if (!dst)
3278 			continue;
3279 
3280 		ath10k_wmi_pull_peer_stats(&src->common.old, dst);
3281 
3282 		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
3283 
3284 		if (ath10k_peer_stats_enabled(ar))
3285 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3286 		/* FIXME: expose 10.2 specific values */
3287 
3288 		list_add_tail(&dst->list, &stats->peers);
3289 	}
3290 
3291 	return 0;
3292 }
3293 
ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3294 static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
3295 					    struct sk_buff *skb,
3296 					    struct ath10k_fw_stats *stats)
3297 {
3298 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3299 	u32 num_pdev_stats;
3300 	u32 num_pdev_ext_stats;
3301 	u32 num_vdev_stats;
3302 	u32 num_peer_stats;
3303 	u32 num_bcnflt_stats;
3304 	u32 stats_id;
3305 	int i;
3306 
3307 	if (!skb_pull(skb, sizeof(*ev)))
3308 		return -EPROTO;
3309 
3310 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3311 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3312 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
3313 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3314 	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
3315 	stats_id = __le32_to_cpu(ev->stats_id);
3316 
3317 	for (i = 0; i < num_pdev_stats; i++) {
3318 		const struct wmi_10_4_pdev_stats *src;
3319 		struct ath10k_fw_stats_pdev *dst;
3320 
3321 		src = (void *)skb->data;
3322 		if (!skb_pull(skb, sizeof(*src)))
3323 			return -EPROTO;
3324 
3325 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3326 		if (!dst)
3327 			continue;
3328 
3329 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3330 		ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
3331 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3332 		dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
3333 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3334 
3335 		list_add_tail(&dst->list, &stats->pdevs);
3336 	}
3337 
3338 	for (i = 0; i < num_pdev_ext_stats; i++) {
3339 		const struct wmi_10_2_pdev_ext_stats *src;
3340 
3341 		src = (void *)skb->data;
3342 		if (!skb_pull(skb, sizeof(*src)))
3343 			return -EPROTO;
3344 
3345 		/* FIXME: expose values to userspace
3346 		 *
3347 		 * Note: Even though this loop seems to do nothing it is
3348 		 * required to parse following sub-structures properly.
3349 		 */
3350 	}
3351 
3352 	for (i = 0; i < num_vdev_stats; i++) {
3353 		const struct wmi_vdev_stats *src;
3354 
3355 		/* Ignore vdev stats here as it has only vdev id. Actual vdev
3356 		 * stats will be retrieved from vdev extended stats.
3357 		 */
3358 		src = (void *)skb->data;
3359 		if (!skb_pull(skb, sizeof(*src)))
3360 			return -EPROTO;
3361 	}
3362 
3363 	for (i = 0; i < num_peer_stats; i++) {
3364 		const struct wmi_10_4_peer_stats *src;
3365 		struct ath10k_fw_stats_peer *dst;
3366 
3367 		src = (void *)skb->data;
3368 		if (!skb_pull(skb, sizeof(*src)))
3369 			return -EPROTO;
3370 
3371 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3372 		if (!dst)
3373 			continue;
3374 
3375 		ath10k_wmi_10_4_pull_peer_stats(src, dst);
3376 		list_add_tail(&dst->list, &stats->peers);
3377 	}
3378 
3379 	for (i = 0; i < num_bcnflt_stats; i++) {
3380 		const struct wmi_10_4_bss_bcn_filter_stats *src;
3381 
3382 		src = (void *)skb->data;
3383 		if (!skb_pull(skb, sizeof(*src)))
3384 			return -EPROTO;
3385 
3386 		/* FIXME: expose values to userspace
3387 		 *
3388 		 * Note: Even though this loop seems to do nothing it is
3389 		 * required to parse following sub-structures properly.
3390 		 */
3391 	}
3392 
3393 	if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
3394 		stats->extended = true;
3395 
3396 		for (i = 0; i < num_peer_stats; i++) {
3397 			const struct wmi_10_4_peer_extd_stats *src;
3398 			struct ath10k_fw_extd_stats_peer *dst;
3399 
3400 			src = (void *)skb->data;
3401 			if (!skb_pull(skb, sizeof(*src)))
3402 				return -EPROTO;
3403 
3404 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3405 			if (!dst)
3406 				continue;
3407 
3408 			ether_addr_copy(dst->peer_macaddr,
3409 					src->peer_macaddr.addr);
3410 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3411 			list_add_tail(&dst->list, &stats->peers_extd);
3412 		}
3413 	}
3414 
3415 	if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
3416 		for (i = 0; i < num_vdev_stats; i++) {
3417 			const struct wmi_vdev_stats_extd *src;
3418 			struct ath10k_fw_stats_vdev_extd *dst;
3419 
3420 			src = (void *)skb->data;
3421 			if (!skb_pull(skb, sizeof(*src)))
3422 				return -EPROTO;
3423 
3424 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3425 			if (!dst)
3426 				continue;
3427 			ath10k_wmi_10_4_pull_vdev_stats(src, dst);
3428 			list_add_tail(&dst->list, &stats->vdevs);
3429 		}
3430 	}
3431 
3432 	return 0;
3433 }
3434 
ath10k_wmi_event_update_stats(struct ath10k * ar,struct sk_buff * skb)3435 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
3436 {
3437 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
3438 	ath10k_debug_fw_stats_process(ar, skb);
3439 }
3440 
3441 static int
ath10k_wmi_op_pull_vdev_start_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_vdev_start_ev_arg * arg)3442 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
3443 				 struct wmi_vdev_start_ev_arg *arg)
3444 {
3445 	struct wmi_vdev_start_response_event *ev = (void *)skb->data;
3446 
3447 	if (skb->len < sizeof(*ev))
3448 		return -EPROTO;
3449 
3450 	skb_pull(skb, sizeof(*ev));
3451 	arg->vdev_id = ev->vdev_id;
3452 	arg->req_id = ev->req_id;
3453 	arg->resp_type = ev->resp_type;
3454 	arg->status = ev->status;
3455 
3456 	return 0;
3457 }
3458 
ath10k_wmi_event_vdev_start_resp(struct ath10k * ar,struct sk_buff * skb)3459 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
3460 {
3461 	struct wmi_vdev_start_ev_arg arg = {};
3462 	int ret;
3463 	u32 status;
3464 
3465 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
3466 
3467 	ar->last_wmi_vdev_start_status = 0;
3468 
3469 	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
3470 	if (ret) {
3471 		ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
3472 		ar->last_wmi_vdev_start_status = ret;
3473 		goto out;
3474 	}
3475 
3476 	status = __le32_to_cpu(arg.status);
3477 	if (WARN_ON_ONCE(status)) {
3478 		ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
3479 			    status, (status == WMI_VDEV_START_CHAN_INVALID) ?
3480 			    "chan-invalid" : "unknown");
3481 		/* Setup is done one way or another though, so we should still
3482 		 * do the completion, so don't return here.
3483 		 */
3484 		ar->last_wmi_vdev_start_status = -EINVAL;
3485 	}
3486 
3487 out:
3488 	complete(&ar->vdev_setup_done);
3489 }
3490 
ath10k_wmi_event_vdev_stopped(struct ath10k * ar,struct sk_buff * skb)3491 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
3492 {
3493 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
3494 	complete(&ar->vdev_setup_done);
3495 }
3496 
3497 static int
ath10k_wmi_op_pull_peer_kick_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_peer_kick_ev_arg * arg)3498 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
3499 				struct wmi_peer_kick_ev_arg *arg)
3500 {
3501 	struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
3502 
3503 	if (skb->len < sizeof(*ev))
3504 		return -EPROTO;
3505 
3506 	skb_pull(skb, sizeof(*ev));
3507 	arg->mac_addr = ev->peer_macaddr.addr;
3508 
3509 	return 0;
3510 }
3511 
ath10k_wmi_event_peer_sta_kickout(struct ath10k * ar,struct sk_buff * skb)3512 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
3513 {
3514 	struct wmi_peer_kick_ev_arg arg = {};
3515 	struct ieee80211_sta *sta;
3516 	int ret;
3517 
3518 	ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
3519 	if (ret) {
3520 		ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
3521 			    ret);
3522 		return;
3523 	}
3524 
3525 	ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
3526 		   arg.mac_addr);
3527 
3528 	rcu_read_lock();
3529 
3530 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
3531 	if (!sta) {
3532 		ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
3533 			    arg.mac_addr);
3534 		goto exit;
3535 	}
3536 
3537 	ieee80211_report_low_ack(sta, 10);
3538 
3539 exit:
3540 	rcu_read_unlock();
3541 }
3542 
3543 /*
3544  * FIXME
3545  *
3546  * We don't report to mac80211 sleep state of connected
3547  * stations. Due to this mac80211 can't fill in TIM IE
3548  * correctly.
3549  *
3550  * I know of no way of getting nullfunc frames that contain
3551  * sleep transition from connected stations - these do not
3552  * seem to be sent from the target to the host. There also
3553  * doesn't seem to be a dedicated event for that. So the
3554  * only way left to do this would be to read tim_bitmap
3555  * during SWBA.
3556  *
3557  * We could probably try using tim_bitmap from SWBA to tell
3558  * mac80211 which stations are asleep and which are not. The
3559  * problem here is calling mac80211 functions so many times
3560  * could take too long and make us miss the time to submit
3561  * the beacon to the target.
3562  *
3563  * So as a workaround we try to extend the TIM IE if there
3564  * is unicast buffered for stations with aid > 7 and fill it
3565  * in ourselves.
3566  */
ath10k_wmi_update_tim(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_tim_info_arg * tim_info)3567 static void ath10k_wmi_update_tim(struct ath10k *ar,
3568 				  struct ath10k_vif *arvif,
3569 				  struct sk_buff *bcn,
3570 				  const struct wmi_tim_info_arg *tim_info)
3571 {
3572 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
3573 	struct ieee80211_tim_ie *tim;
3574 	u8 *ies, *ie;
3575 	u8 ie_len, pvm_len;
3576 	__le32 t;
3577 	u32 v, tim_len;
3578 
3579 	/* When FW reports 0 in tim_len, ensure at least first byte
3580 	 * in tim_bitmap is considered for pvm calculation.
3581 	 */
3582 	tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
3583 
3584 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
3585 	 * we must copy the bitmap upon change and reuse it later
3586 	 */
3587 	if (__le32_to_cpu(tim_info->tim_changed)) {
3588 		int i;
3589 
3590 		if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
3591 			ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
3592 				    tim_len, sizeof(arvif->u.ap.tim_bitmap));
3593 			tim_len = sizeof(arvif->u.ap.tim_bitmap);
3594 		}
3595 
3596 		for (i = 0; i < tim_len; i++) {
3597 			t = tim_info->tim_bitmap[i / 4];
3598 			v = __le32_to_cpu(t);
3599 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
3600 		}
3601 
3602 		/* FW reports either length 0 or length based on max supported
3603 		 * station. so we calculate this on our own
3604 		 */
3605 		arvif->u.ap.tim_len = 0;
3606 		for (i = 0; i < tim_len; i++)
3607 			if (arvif->u.ap.tim_bitmap[i])
3608 				arvif->u.ap.tim_len = i;
3609 
3610 		arvif->u.ap.tim_len++;
3611 	}
3612 
3613 	ies = bcn->data;
3614 	ies += ieee80211_hdrlen(hdr->frame_control);
3615 	ies += 12; /* fixed parameters */
3616 
3617 	ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
3618 				    (u8 *)skb_tail_pointer(bcn) - ies);
3619 	if (!ie) {
3620 		if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
3621 			ath10k_warn(ar, "no tim ie found;\n");
3622 		return;
3623 	}
3624 
3625 	tim = (void *)ie + 2;
3626 	ie_len = ie[1];
3627 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
3628 
3629 	if (pvm_len < arvif->u.ap.tim_len) {
3630 		int expand_size = tim_len - pvm_len;
3631 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
3632 		void *next_ie = ie + 2 + ie_len;
3633 
3634 		if (skb_put(bcn, expand_size)) {
3635 			memmove(next_ie + expand_size, next_ie, move_size);
3636 
3637 			ie[1] += expand_size;
3638 			ie_len += expand_size;
3639 			pvm_len += expand_size;
3640 		} else {
3641 			ath10k_warn(ar, "tim expansion failed\n");
3642 		}
3643 	}
3644 
3645 	if (pvm_len > tim_len) {
3646 		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
3647 		return;
3648 	}
3649 
3650 	tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
3651 	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
3652 
3653 	if (tim->dtim_count == 0) {
3654 		ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
3655 
3656 		if (__le32_to_cpu(tim_info->tim_mcast) == 1)
3657 			ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
3658 	}
3659 
3660 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
3661 		   tim->dtim_count, tim->dtim_period,
3662 		   tim->bitmap_ctrl, pvm_len);
3663 }
3664 
ath10k_wmi_update_noa(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_p2p_noa_info * noa)3665 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
3666 				  struct sk_buff *bcn,
3667 				  const struct wmi_p2p_noa_info *noa)
3668 {
3669 	if (!arvif->vif->p2p)
3670 		return;
3671 
3672 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
3673 
3674 	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
3675 		ath10k_p2p_noa_update(arvif, noa);
3676 
3677 	if (arvif->u.ap.noa_data)
3678 		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
3679 			skb_put_data(bcn, arvif->u.ap.noa_data,
3680 				     arvif->u.ap.noa_len);
3681 }
3682 
ath10k_wmi_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3683 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
3684 				      struct wmi_swba_ev_arg *arg)
3685 {
3686 	struct wmi_host_swba_event *ev = (void *)skb->data;
3687 	u32 map;
3688 	size_t i;
3689 
3690 	if (skb->len < sizeof(*ev))
3691 		return -EPROTO;
3692 
3693 	skb_pull(skb, sizeof(*ev));
3694 	arg->vdev_map = ev->vdev_map;
3695 
3696 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3697 		if (!(map & BIT(0)))
3698 			continue;
3699 
3700 		/* If this happens there were some changes in firmware and
3701 		 * ath10k should update the max size of tim_info array.
3702 		 */
3703 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3704 			break;
3705 
3706 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3707 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3708 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3709 			return -EPROTO;
3710 		}
3711 
3712 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3713 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3714 		arg->tim_info[i].tim_bitmap =
3715 				ev->bcn_info[i].tim_info.tim_bitmap;
3716 		arg->tim_info[i].tim_changed =
3717 				ev->bcn_info[i].tim_info.tim_changed;
3718 		arg->tim_info[i].tim_num_ps_pending =
3719 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3720 
3721 		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
3722 		i++;
3723 	}
3724 
3725 	return 0;
3726 }
3727 
ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3728 static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
3729 					     struct sk_buff *skb,
3730 					     struct wmi_swba_ev_arg *arg)
3731 {
3732 	struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
3733 	u32 map;
3734 	size_t i;
3735 
3736 	if (skb->len < sizeof(*ev))
3737 		return -EPROTO;
3738 
3739 	skb_pull(skb, sizeof(*ev));
3740 	arg->vdev_map = ev->vdev_map;
3741 
3742 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3743 		if (!(map & BIT(0)))
3744 			continue;
3745 
3746 		/* If this happens there were some changes in firmware and
3747 		 * ath10k should update the max size of tim_info array.
3748 		 */
3749 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3750 			break;
3751 
3752 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3753 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3754 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3755 			return -EPROTO;
3756 		}
3757 
3758 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3759 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3760 		arg->tim_info[i].tim_bitmap =
3761 				ev->bcn_info[i].tim_info.tim_bitmap;
3762 		arg->tim_info[i].tim_changed =
3763 				ev->bcn_info[i].tim_info.tim_changed;
3764 		arg->tim_info[i].tim_num_ps_pending =
3765 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3766 		i++;
3767 	}
3768 
3769 	return 0;
3770 }
3771 
ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3772 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
3773 					   struct sk_buff *skb,
3774 					   struct wmi_swba_ev_arg *arg)
3775 {
3776 	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
3777 	u32 map, tim_len;
3778 	size_t i;
3779 
3780 	if (skb->len < sizeof(*ev))
3781 		return -EPROTO;
3782 
3783 	skb_pull(skb, sizeof(*ev));
3784 	arg->vdev_map = ev->vdev_map;
3785 
3786 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3787 		if (!(map & BIT(0)))
3788 			continue;
3789 
3790 		/* If this happens there were some changes in firmware and
3791 		 * ath10k should update the max size of tim_info array.
3792 		 */
3793 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3794 			break;
3795 
3796 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3797 		      sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3798 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3799 			return -EPROTO;
3800 		}
3801 
3802 		tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
3803 		if (tim_len) {
3804 			/* Exclude 4 byte guard length */
3805 			tim_len -= 4;
3806 			arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
3807 		} else {
3808 			arg->tim_info[i].tim_len = 0;
3809 		}
3810 
3811 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3812 		arg->tim_info[i].tim_bitmap =
3813 				ev->bcn_info[i].tim_info.tim_bitmap;
3814 		arg->tim_info[i].tim_changed =
3815 				ev->bcn_info[i].tim_info.tim_changed;
3816 		arg->tim_info[i].tim_num_ps_pending =
3817 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3818 
3819 		/* 10.4 firmware doesn't have p2p support. notice of absence
3820 		 * info can be ignored for now.
3821 		 */
3822 
3823 		i++;
3824 	}
3825 
3826 	return 0;
3827 }
3828 
ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k * ar)3829 static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
3830 {
3831 	return WMI_TXBF_CONF_BEFORE_ASSOC;
3832 }
3833 
ath10k_wmi_event_host_swba(struct ath10k * ar,struct sk_buff * skb)3834 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3835 {
3836 	struct wmi_swba_ev_arg arg = {};
3837 	u32 map;
3838 	int i = -1;
3839 	const struct wmi_tim_info_arg *tim_info;
3840 	const struct wmi_p2p_noa_info *noa_info;
3841 	struct ath10k_vif *arvif;
3842 	struct sk_buff *bcn;
3843 	dma_addr_t paddr;
3844 	int ret, vdev_id = 0;
3845 
3846 	ret = ath10k_wmi_pull_swba(ar, skb, &arg);
3847 	if (ret) {
3848 		ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
3849 		return;
3850 	}
3851 
3852 	map = __le32_to_cpu(arg.vdev_map);
3853 
3854 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
3855 		   map);
3856 
3857 	for (; map; map >>= 1, vdev_id++) {
3858 		if (!(map & 0x1))
3859 			continue;
3860 
3861 		i++;
3862 
3863 		if (i >= WMI_MAX_AP_VDEV) {
3864 			ath10k_warn(ar, "swba has corrupted vdev map\n");
3865 			break;
3866 		}
3867 
3868 		tim_info = &arg.tim_info[i];
3869 		noa_info = arg.noa_info[i];
3870 
3871 		ath10k_dbg(ar, ATH10K_DBG_MGMT,
3872 			   "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
3873 			   i,
3874 			   __le32_to_cpu(tim_info->tim_len),
3875 			   __le32_to_cpu(tim_info->tim_mcast),
3876 			   __le32_to_cpu(tim_info->tim_changed),
3877 			   __le32_to_cpu(tim_info->tim_num_ps_pending),
3878 			   __le32_to_cpu(tim_info->tim_bitmap[3]),
3879 			   __le32_to_cpu(tim_info->tim_bitmap[2]),
3880 			   __le32_to_cpu(tim_info->tim_bitmap[1]),
3881 			   __le32_to_cpu(tim_info->tim_bitmap[0]));
3882 
3883 		/* TODO: Only first 4 word from tim_bitmap is dumped.
3884 		 * Extend debug code to dump full tim_bitmap.
3885 		 */
3886 
3887 		arvif = ath10k_get_arvif(ar, vdev_id);
3888 		if (arvif == NULL) {
3889 			ath10k_warn(ar, "no vif for vdev_id %d found\n",
3890 				    vdev_id);
3891 			continue;
3892 		}
3893 
3894 		/* mac80211 would have already asked us to stop beaconing and
3895 		 * bring the vdev down, so continue in that case
3896 		 */
3897 		if (!arvif->is_up)
3898 			continue;
3899 
3900 		/* There are no completions for beacons so wait for next SWBA
3901 		 * before telling mac80211 to decrement CSA counter
3902 		 *
3903 		 * Once CSA counter is completed stop sending beacons until
3904 		 * actual channel switch is done
3905 		 */
3906 		if (arvif->vif->bss_conf.csa_active &&
3907 		    ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
3908 			ieee80211_csa_finish(arvif->vif, 0);
3909 			continue;
3910 		}
3911 
3912 		bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
3913 		if (!bcn) {
3914 			ath10k_warn(ar, "could not get mac80211 beacon\n");
3915 			continue;
3916 		}
3917 
3918 		ath10k_tx_h_seq_no(arvif->vif, bcn);
3919 		ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
3920 		ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
3921 
3922 		spin_lock_bh(&ar->data_lock);
3923 
3924 		if (arvif->beacon) {
3925 			switch (arvif->beacon_state) {
3926 			case ATH10K_BEACON_SENT:
3927 				break;
3928 			case ATH10K_BEACON_SCHEDULED:
3929 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
3930 					    arvif->vdev_id);
3931 				break;
3932 			case ATH10K_BEACON_SENDING:
3933 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
3934 					    arvif->vdev_id);
3935 				dev_kfree_skb(bcn);
3936 				goto skip;
3937 			}
3938 
3939 			ath10k_mac_vif_beacon_free(arvif);
3940 		}
3941 
3942 		if (!arvif->beacon_buf) {
3943 			paddr = dma_map_single(arvif->ar->dev, bcn->data,
3944 					       bcn->len, DMA_TO_DEVICE);
3945 			ret = dma_mapping_error(arvif->ar->dev, paddr);
3946 			if (ret) {
3947 				ath10k_warn(ar, "failed to map beacon: %d\n",
3948 					    ret);
3949 				dev_kfree_skb_any(bcn);
3950 				goto skip;
3951 			}
3952 
3953 			ATH10K_SKB_CB(bcn)->paddr = paddr;
3954 		} else {
3955 			if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
3956 				ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
3957 					    bcn->len, IEEE80211_MAX_FRAME_LEN);
3958 				skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
3959 			}
3960 			memcpy(arvif->beacon_buf, bcn->data, bcn->len);
3961 			ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
3962 		}
3963 
3964 		arvif->beacon = bcn;
3965 		arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
3966 
3967 		trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
3968 		trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
3969 
3970 skip:
3971 		spin_unlock_bh(&ar->data_lock);
3972 	}
3973 
3974 	ath10k_wmi_tx_beacons_nowait(ar);
3975 }
3976 
ath10k_wmi_event_tbttoffset_update(struct ath10k * ar,struct sk_buff * skb)3977 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
3978 {
3979 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
3980 }
3981 
ath10k_radar_detected(struct ath10k * ar)3982 static void ath10k_radar_detected(struct ath10k *ar)
3983 {
3984 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
3985 	ATH10K_DFS_STAT_INC(ar, radar_detected);
3986 
3987 	/* Control radar events reporting in debugfs file
3988 	 * dfs_block_radar_events
3989 	 */
3990 	if (ar->dfs_block_radar_events)
3991 		ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
3992 	else
3993 		ieee80211_radar_detected(ar->hw, NULL);
3994 }
3995 
ath10k_radar_confirmation_work(struct work_struct * work)3996 static void ath10k_radar_confirmation_work(struct work_struct *work)
3997 {
3998 	struct ath10k *ar = container_of(work, struct ath10k,
3999 					 radar_confirmation_work);
4000 	struct ath10k_radar_found_info radar_info;
4001 	int ret, time_left;
4002 
4003 	reinit_completion(&ar->wmi.radar_confirm);
4004 
4005 	spin_lock_bh(&ar->data_lock);
4006 	memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
4007 	spin_unlock_bh(&ar->data_lock);
4008 
4009 	ret = ath10k_wmi_report_radar_found(ar, &radar_info);
4010 	if (ret) {
4011 		ath10k_warn(ar, "failed to send radar found %d\n", ret);
4012 		goto wait_complete;
4013 	}
4014 
4015 	time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
4016 						ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
4017 	if (time_left) {
4018 		/* DFS Confirmation status event received and
4019 		 * necessary action completed.
4020 		 */
4021 		goto wait_complete;
4022 	} else {
4023 		/* DFS Confirmation event not received from FW.Considering this
4024 		 * as real radar.
4025 		 */
4026 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4027 			   "dfs confirmation not received from fw, considering as radar\n");
4028 		goto radar_detected;
4029 	}
4030 
4031 radar_detected:
4032 	ath10k_radar_detected(ar);
4033 
4034 	/* Reset state to allow sending confirmation on consecutive radar
4035 	 * detections, unless radar confirmation is disabled/stopped.
4036 	 */
4037 wait_complete:
4038 	spin_lock_bh(&ar->data_lock);
4039 	if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
4040 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
4041 	spin_unlock_bh(&ar->data_lock);
4042 }
4043 
ath10k_dfs_radar_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_radar_report * rr,u64 tsf)4044 static void ath10k_dfs_radar_report(struct ath10k *ar,
4045 				    struct wmi_phyerr_ev_arg *phyerr,
4046 				    const struct phyerr_radar_report *rr,
4047 				    u64 tsf)
4048 {
4049 	u32 reg0, reg1, tsf32l;
4050 	struct ieee80211_channel *ch;
4051 	struct pulse_event pe;
4052 	struct radar_detector_specs rs;
4053 	u64 tsf64;
4054 	u8 rssi, width;
4055 	struct ath10k_radar_found_info *radar_info;
4056 
4057 	reg0 = __le32_to_cpu(rr->reg0);
4058 	reg1 = __le32_to_cpu(rr->reg1);
4059 
4060 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4061 		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
4062 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
4063 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
4064 		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
4065 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
4066 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4067 		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
4068 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
4069 		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
4070 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
4071 		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
4072 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
4073 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4074 		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
4075 		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
4076 		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
4077 
4078 	if (!ar->dfs_detector)
4079 		return;
4080 
4081 	spin_lock_bh(&ar->data_lock);
4082 	ch = ar->rx_channel;
4083 
4084 	/* fetch target operating channel during channel change */
4085 	if (!ch)
4086 		ch = ar->tgt_oper_chan;
4087 
4088 	spin_unlock_bh(&ar->data_lock);
4089 
4090 	if (!ch) {
4091 		ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
4092 		goto radar_detected;
4093 	}
4094 
4095 	/* report event to DFS pattern detector */
4096 	tsf32l = phyerr->tsf_timestamp;
4097 	tsf64 = tsf & (~0xFFFFFFFFULL);
4098 	tsf64 |= tsf32l;
4099 
4100 	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
4101 	rssi = phyerr->rssi_combined;
4102 
4103 	/* hardware store this as 8 bit signed value,
4104 	 * set to zero if negative number
4105 	 */
4106 	if (rssi & 0x80)
4107 		rssi = 0;
4108 
4109 	pe.ts = tsf64;
4110 	pe.freq = ch->center_freq;
4111 	pe.width = width;
4112 	pe.rssi = rssi;
4113 	pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
4114 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4115 		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
4116 		   pe.freq, pe.width, pe.rssi, pe.ts);
4117 
4118 	ATH10K_DFS_STAT_INC(ar, pulses_detected);
4119 
4120 	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
4121 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4122 			   "dfs no pulse pattern detected, yet\n");
4123 		return;
4124 	}
4125 
4126 	if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
4127 	    ar->dfs_detector->region == NL80211_DFS_FCC) {
4128 		/* Consecutive radar indications need not be
4129 		 * sent to the firmware until we get confirmation
4130 		 * for the previous detected radar.
4131 		 */
4132 		spin_lock_bh(&ar->data_lock);
4133 		if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
4134 			spin_unlock_bh(&ar->data_lock);
4135 			return;
4136 		}
4137 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
4138 		radar_info = &ar->last_radar_info;
4139 
4140 		radar_info->pri_min = rs.pri_min;
4141 		radar_info->pri_max = rs.pri_max;
4142 		radar_info->width_min = rs.width_min;
4143 		radar_info->width_max = rs.width_max;
4144 		/*TODO Find sidx_min and sidx_max */
4145 		radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4146 		radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4147 
4148 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4149 			   "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
4150 			   radar_info->pri_min, radar_info->pri_max,
4151 			   radar_info->width_min, radar_info->width_max,
4152 			   radar_info->sidx_min, radar_info->sidx_max);
4153 		ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
4154 		spin_unlock_bh(&ar->data_lock);
4155 		return;
4156 	}
4157 
4158 radar_detected:
4159 	ath10k_radar_detected(ar);
4160 }
4161 
ath10k_dfs_fft_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_fft_report * fftr,u64 tsf)4162 static int ath10k_dfs_fft_report(struct ath10k *ar,
4163 				 struct wmi_phyerr_ev_arg *phyerr,
4164 				 const struct phyerr_fft_report *fftr,
4165 				 u64 tsf)
4166 {
4167 	u32 reg0, reg1;
4168 	u8 rssi, peak_mag;
4169 
4170 	reg0 = __le32_to_cpu(fftr->reg0);
4171 	reg1 = __le32_to_cpu(fftr->reg1);
4172 	rssi = phyerr->rssi_combined;
4173 
4174 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4175 		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
4176 		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
4177 		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
4178 		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
4179 		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
4180 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4181 		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
4182 		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
4183 		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
4184 		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
4185 		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
4186 
4187 	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
4188 
4189 	/* false event detection */
4190 	if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
4191 	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
4192 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
4193 		ATH10K_DFS_STAT_INC(ar, pulses_discarded);
4194 		return -EINVAL;
4195 	}
4196 
4197 	return 0;
4198 }
4199 
ath10k_wmi_event_dfs(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4200 void ath10k_wmi_event_dfs(struct ath10k *ar,
4201 			  struct wmi_phyerr_ev_arg *phyerr,
4202 			  u64 tsf)
4203 {
4204 	int buf_len, tlv_len, res, i = 0;
4205 	const struct phyerr_tlv *tlv;
4206 	const struct phyerr_radar_report *rr;
4207 	const struct phyerr_fft_report *fftr;
4208 	const u8 *tlv_buf;
4209 
4210 	buf_len = phyerr->buf_len;
4211 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4212 		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
4213 		   phyerr->phy_err_code, phyerr->rssi_combined,
4214 		   phyerr->tsf_timestamp, tsf, buf_len);
4215 
4216 	/* Skip event if DFS disabled */
4217 	if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
4218 		return;
4219 
4220 	ATH10K_DFS_STAT_INC(ar, pulses_total);
4221 
4222 	while (i < buf_len) {
4223 		if (i + sizeof(*tlv) > buf_len) {
4224 			ath10k_warn(ar, "too short buf for tlv header (%d)\n",
4225 				    i);
4226 			return;
4227 		}
4228 
4229 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4230 		tlv_len = __le16_to_cpu(tlv->len);
4231 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4232 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4233 			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
4234 			   tlv_len, tlv->tag, tlv->sig);
4235 
4236 		switch (tlv->tag) {
4237 		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
4238 			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
4239 				ath10k_warn(ar, "too short radar pulse summary (%d)\n",
4240 					    i);
4241 				return;
4242 			}
4243 
4244 			rr = (struct phyerr_radar_report *)tlv_buf;
4245 			ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
4246 			break;
4247 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4248 			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
4249 				ath10k_warn(ar, "too short fft report (%d)\n",
4250 					    i);
4251 				return;
4252 			}
4253 
4254 			fftr = (struct phyerr_fft_report *)tlv_buf;
4255 			res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
4256 			if (res)
4257 				return;
4258 			break;
4259 		}
4260 
4261 		i += sizeof(*tlv) + tlv_len;
4262 	}
4263 }
4264 
ath10k_wmi_event_spectral_scan(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4265 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4266 				    struct wmi_phyerr_ev_arg *phyerr,
4267 				    u64 tsf)
4268 {
4269 	int buf_len, tlv_len, res, i = 0;
4270 	struct phyerr_tlv *tlv;
4271 	const void *tlv_buf;
4272 	const struct phyerr_fft_report *fftr;
4273 	size_t fftr_len;
4274 
4275 	buf_len = phyerr->buf_len;
4276 
4277 	while (i < buf_len) {
4278 		if (i + sizeof(*tlv) > buf_len) {
4279 			ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
4280 				    i);
4281 			return;
4282 		}
4283 
4284 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4285 		tlv_len = __le16_to_cpu(tlv->len);
4286 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4287 
4288 		if (i + sizeof(*tlv) + tlv_len > buf_len) {
4289 			ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
4290 				    i);
4291 			return;
4292 		}
4293 
4294 		switch (tlv->tag) {
4295 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4296 			if (sizeof(*fftr) > tlv_len) {
4297 				ath10k_warn(ar, "failed to parse fft report at byte %d\n",
4298 					    i);
4299 				return;
4300 			}
4301 
4302 			fftr_len = tlv_len - sizeof(*fftr);
4303 			fftr = tlv_buf;
4304 			res = ath10k_spectral_process_fft(ar, phyerr,
4305 							  fftr, fftr_len,
4306 							  tsf);
4307 			if (res < 0) {
4308 				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
4309 					   res);
4310 				return;
4311 			}
4312 			break;
4313 		}
4314 
4315 		i += sizeof(*tlv) + tlv_len;
4316 	}
4317 }
4318 
ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4319 static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4320 					    struct sk_buff *skb,
4321 					    struct wmi_phyerr_hdr_arg *arg)
4322 {
4323 	struct wmi_phyerr_event *ev = (void *)skb->data;
4324 
4325 	if (skb->len < sizeof(*ev))
4326 		return -EPROTO;
4327 
4328 	arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
4329 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4330 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4331 	arg->buf_len = skb->len - sizeof(*ev);
4332 	arg->phyerrs = ev->phyerrs;
4333 
4334 	return 0;
4335 }
4336 
ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4337 static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4338 						 struct sk_buff *skb,
4339 						 struct wmi_phyerr_hdr_arg *arg)
4340 {
4341 	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
4342 
4343 	if (skb->len < sizeof(*ev))
4344 		return -EPROTO;
4345 
4346 	/* 10.4 firmware always reports only one phyerr */
4347 	arg->num_phyerrs = 1;
4348 
4349 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4350 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4351 	arg->buf_len = skb->len;
4352 	arg->phyerrs = skb->data;
4353 
4354 	return 0;
4355 }
4356 
ath10k_wmi_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4357 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
4358 				 const void *phyerr_buf,
4359 				 int left_len,
4360 				 struct wmi_phyerr_ev_arg *arg)
4361 {
4362 	const struct wmi_phyerr *phyerr = phyerr_buf;
4363 	int i;
4364 
4365 	if (left_len < sizeof(*phyerr)) {
4366 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4367 			    left_len, sizeof(*phyerr));
4368 		return -EINVAL;
4369 	}
4370 
4371 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4372 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4373 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4374 	arg->rssi_combined = phyerr->rssi_combined;
4375 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4376 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4377 	arg->buf = phyerr->buf;
4378 	arg->hdr_len = sizeof(*phyerr);
4379 
4380 	for (i = 0; i < 4; i++)
4381 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4382 
4383 	switch (phyerr->phy_err_code) {
4384 	case PHY_ERROR_GEN_SPECTRAL_SCAN:
4385 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4386 		break;
4387 	case PHY_ERROR_GEN_FALSE_RADAR_EXT:
4388 		arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
4389 		break;
4390 	case PHY_ERROR_GEN_RADAR:
4391 		arg->phy_err_code = PHY_ERROR_RADAR;
4392 		break;
4393 	default:
4394 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4395 		break;
4396 	}
4397 
4398 	return 0;
4399 }
4400 
ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4401 static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
4402 					     const void *phyerr_buf,
4403 					     int left_len,
4404 					     struct wmi_phyerr_ev_arg *arg)
4405 {
4406 	const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
4407 	u32 phy_err_mask;
4408 	int i;
4409 
4410 	if (left_len < sizeof(*phyerr)) {
4411 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4412 			    left_len, sizeof(*phyerr));
4413 		return -EINVAL;
4414 	}
4415 
4416 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4417 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4418 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4419 	arg->rssi_combined = phyerr->rssi_combined;
4420 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4421 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4422 	arg->buf = phyerr->buf;
4423 	arg->hdr_len = sizeof(*phyerr);
4424 
4425 	for (i = 0; i < 4; i++)
4426 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4427 
4428 	phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
4429 
4430 	if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
4431 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4432 	else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
4433 		arg->phy_err_code = PHY_ERROR_RADAR;
4434 	else
4435 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4436 
4437 	return 0;
4438 }
4439 
ath10k_wmi_event_phyerr(struct ath10k * ar,struct sk_buff * skb)4440 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
4441 {
4442 	struct wmi_phyerr_hdr_arg hdr_arg = {};
4443 	struct wmi_phyerr_ev_arg phyerr_arg = {};
4444 	const void *phyerr;
4445 	u32 count, i, buf_len, phy_err_code;
4446 	u64 tsf;
4447 	int left_len, ret;
4448 
4449 	ATH10K_DFS_STAT_INC(ar, phy_errors);
4450 
4451 	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
4452 	if (ret) {
4453 		ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
4454 		return;
4455 	}
4456 
4457 	/* Check number of included events */
4458 	count = hdr_arg.num_phyerrs;
4459 
4460 	left_len = hdr_arg.buf_len;
4461 
4462 	tsf = hdr_arg.tsf_u32;
4463 	tsf <<= 32;
4464 	tsf |= hdr_arg.tsf_l32;
4465 
4466 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4467 		   "wmi event phyerr count %d tsf64 0x%llX\n",
4468 		   count, tsf);
4469 
4470 	phyerr = hdr_arg.phyerrs;
4471 	for (i = 0; i < count; i++) {
4472 		ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
4473 		if (ret) {
4474 			ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
4475 				    i);
4476 			return;
4477 		}
4478 
4479 		left_len -= phyerr_arg.hdr_len;
4480 		buf_len = phyerr_arg.buf_len;
4481 		phy_err_code = phyerr_arg.phy_err_code;
4482 
4483 		if (left_len < buf_len) {
4484 			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
4485 			return;
4486 		}
4487 
4488 		left_len -= buf_len;
4489 
4490 		switch (phy_err_code) {
4491 		case PHY_ERROR_RADAR:
4492 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4493 			break;
4494 		case PHY_ERROR_SPECTRAL_SCAN:
4495 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4496 			break;
4497 		case PHY_ERROR_FALSE_RADAR_EXT:
4498 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4499 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4500 			break;
4501 		default:
4502 			break;
4503 		}
4504 
4505 		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
4506 	}
4507 }
4508 
4509 static int
ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_dfs_status_ev_arg * arg)4510 ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
4511 				      struct wmi_dfs_status_ev_arg *arg)
4512 {
4513 	struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
4514 
4515 	if (skb->len < sizeof(*ev))
4516 		return -EPROTO;
4517 
4518 	arg->status = ev->status;
4519 
4520 	return 0;
4521 }
4522 
4523 static void
ath10k_wmi_event_dfs_status_check(struct ath10k * ar,struct sk_buff * skb)4524 ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
4525 {
4526 	struct wmi_dfs_status_ev_arg status_arg = {};
4527 	int ret;
4528 
4529 	ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
4530 
4531 	if (ret) {
4532 		ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
4533 		return;
4534 	}
4535 
4536 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4537 		   "dfs status event received from fw: %d\n",
4538 		   status_arg.status);
4539 
4540 	/* Even in case of radar detection failure we follow the same
4541 	 * behaviour as if radar is detected i.e to switch to a different
4542 	 * channel.
4543 	 */
4544 	if (status_arg.status == WMI_HW_RADAR_DETECTED ||
4545 	    status_arg.status == WMI_RADAR_DETECTION_FAIL)
4546 		ath10k_radar_detected(ar);
4547 	complete(&ar->wmi.radar_confirm);
4548 }
4549 
ath10k_wmi_event_roam(struct ath10k * ar,struct sk_buff * skb)4550 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
4551 {
4552 	struct wmi_roam_ev_arg arg = {};
4553 	int ret;
4554 	u32 vdev_id;
4555 	u32 reason;
4556 	s32 rssi;
4557 
4558 	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
4559 	if (ret) {
4560 		ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
4561 		return;
4562 	}
4563 
4564 	vdev_id = __le32_to_cpu(arg.vdev_id);
4565 	reason = __le32_to_cpu(arg.reason);
4566 	rssi = __le32_to_cpu(arg.rssi);
4567 	rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
4568 
4569 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4570 		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
4571 		   vdev_id, reason, rssi);
4572 
4573 	if (reason >= WMI_ROAM_REASON_MAX)
4574 		ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
4575 			    reason, vdev_id);
4576 
4577 	switch (reason) {
4578 	case WMI_ROAM_REASON_BEACON_MISS:
4579 		ath10k_mac_handle_beacon_miss(ar, vdev_id);
4580 		break;
4581 	case WMI_ROAM_REASON_BETTER_AP:
4582 	case WMI_ROAM_REASON_LOW_RSSI:
4583 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
4584 	case WMI_ROAM_REASON_HO_FAILED:
4585 		ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
4586 			    reason, vdev_id);
4587 		break;
4588 	}
4589 }
4590 
ath10k_wmi_event_profile_match(struct ath10k * ar,struct sk_buff * skb)4591 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
4592 {
4593 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
4594 }
4595 
ath10k_wmi_event_debug_print(struct ath10k * ar,struct sk_buff * skb)4596 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
4597 {
4598 	char buf[101], c;
4599 	int i;
4600 
4601 	for (i = 0; i < sizeof(buf) - 1; i++) {
4602 		if (i >= skb->len)
4603 			break;
4604 
4605 		c = skb->data[i];
4606 
4607 		if (c == '\0')
4608 			break;
4609 
4610 		if (isascii(c) && isprint(c))
4611 			buf[i] = c;
4612 		else
4613 			buf[i] = '.';
4614 	}
4615 
4616 	if (i == sizeof(buf) - 1)
4617 		ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
4618 
4619 	/* for some reason the debug prints end with \n, remove that */
4620 	if (skb->data[i - 1] == '\n')
4621 		i--;
4622 
4623 	/* the last byte is always reserved for the null character */
4624 	buf[i] = '\0';
4625 
4626 	ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
4627 }
4628 
ath10k_wmi_event_pdev_qvit(struct ath10k * ar,struct sk_buff * skb)4629 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
4630 {
4631 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
4632 }
4633 
ath10k_wmi_event_wlan_profile_data(struct ath10k * ar,struct sk_buff * skb)4634 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
4635 {
4636 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
4637 }
4638 
ath10k_wmi_event_rtt_measurement_report(struct ath10k * ar,struct sk_buff * skb)4639 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4640 					     struct sk_buff *skb)
4641 {
4642 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
4643 }
4644 
ath10k_wmi_event_tsf_measurement_report(struct ath10k * ar,struct sk_buff * skb)4645 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4646 					     struct sk_buff *skb)
4647 {
4648 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
4649 }
4650 
ath10k_wmi_event_rtt_error_report(struct ath10k * ar,struct sk_buff * skb)4651 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
4652 {
4653 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
4654 }
4655 
ath10k_wmi_event_wow_wakeup_host(struct ath10k * ar,struct sk_buff * skb)4656 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
4657 {
4658 	struct wmi_wow_ev_arg ev = {};
4659 	int ret;
4660 
4661 	complete(&ar->wow.wakeup_completed);
4662 
4663 	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
4664 	if (ret) {
4665 		ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
4666 		return;
4667 	}
4668 
4669 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
4670 		   wow_reason(ev.wake_reason));
4671 }
4672 
ath10k_wmi_event_dcs_interference(struct ath10k * ar,struct sk_buff * skb)4673 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
4674 {
4675 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
4676 }
4677 
ath10k_tpc_config_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type)4678 static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
4679 				     struct wmi_pdev_tpc_config_event *ev,
4680 				     u32 rate_idx, u32 num_chains,
4681 				     u32 rate_code, u8 type)
4682 {
4683 	u8 tpc, num_streams, preamble, ch, stm_idx;
4684 
4685 	num_streams = ATH10K_HW_NSS(rate_code);
4686 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4687 	ch = num_chains - 1;
4688 
4689 	tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
4690 
4691 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4692 		goto out;
4693 
4694 	if (preamble == WMI_RATE_PREAMBLE_CCK)
4695 		goto out;
4696 
4697 	stm_idx = num_streams - 1;
4698 	if (num_chains <= num_streams)
4699 		goto out;
4700 
4701 	switch (type) {
4702 	case WMI_TPC_TABLE_TYPE_STBC:
4703 		tpc = min_t(u8, tpc,
4704 			    ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
4705 		break;
4706 	case WMI_TPC_TABLE_TYPE_TXBF:
4707 		tpc = min_t(u8, tpc,
4708 			    ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
4709 		break;
4710 	case WMI_TPC_TABLE_TYPE_CDD:
4711 		tpc = min_t(u8, tpc,
4712 			    ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
4713 		break;
4714 	default:
4715 		ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
4716 		tpc = 0;
4717 		break;
4718 	}
4719 
4720 out:
4721 	return tpc;
4722 }
4723 
ath10k_tpc_config_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,struct ath10k_tpc_stats * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)4724 static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4725 					  struct wmi_pdev_tpc_config_event *ev,
4726 					  struct ath10k_tpc_stats *tpc_stats,
4727 					  u8 *rate_code, u16 *pream_table, u8 type)
4728 {
4729 	u32 i, j, pream_idx, flags;
4730 	u8 tpc[WMI_TPC_TX_N_CHAIN];
4731 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4732 	char buff[WMI_TPC_BUF_SIZE];
4733 
4734 	flags = __le32_to_cpu(ev->flags);
4735 
4736 	switch (type) {
4737 	case WMI_TPC_TABLE_TYPE_CDD:
4738 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4739 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4740 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4741 			return;
4742 		}
4743 		break;
4744 	case WMI_TPC_TABLE_TYPE_STBC:
4745 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4746 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4747 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4748 			return;
4749 		}
4750 		break;
4751 	case WMI_TPC_TABLE_TYPE_TXBF:
4752 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4753 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4754 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4755 			return;
4756 		}
4757 		break;
4758 	default:
4759 		ath10k_dbg(ar, ATH10K_DBG_WMI,
4760 			   "invalid table type in wmi tpc event: %d\n", type);
4761 		return;
4762 	}
4763 
4764 	pream_idx = 0;
4765 	for (i = 0; i < tpc_stats->rate_max; i++) {
4766 		memset(tpc_value, 0, sizeof(tpc_value));
4767 		memset(buff, 0, sizeof(buff));
4768 		if (i == pream_table[pream_idx])
4769 			pream_idx++;
4770 
4771 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4772 			tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4773 							    rate_code[i],
4774 							    type);
4775 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4776 			strlcat(tpc_value, buff, sizeof(tpc_value));
4777 		}
4778 		tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
4779 		tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
4780 		memcpy(tpc_stats->tpc_table[type].tpc_value[i],
4781 		       tpc_value, sizeof(tpc_value));
4782 	}
4783 }
4784 
ath10k_wmi_tpc_config_get_rate_code(u8 * rate_code,u16 * pream_table,u32 num_tx_chain)4785 void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4786 					 u32 num_tx_chain)
4787 {
4788 	u32 i, j, pream_idx;
4789 	u8 rate_idx;
4790 
4791 	/* Create the rate code table based on the chains supported */
4792 	rate_idx = 0;
4793 	pream_idx = 0;
4794 
4795 	/* Fill CCK rate code */
4796 	for (i = 0; i < 4; i++) {
4797 		rate_code[rate_idx] =
4798 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
4799 		rate_idx++;
4800 	}
4801 	pream_table[pream_idx] = rate_idx;
4802 	pream_idx++;
4803 
4804 	/* Fill OFDM rate code */
4805 	for (i = 0; i < 8; i++) {
4806 		rate_code[rate_idx] =
4807 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
4808 		rate_idx++;
4809 	}
4810 	pream_table[pream_idx] = rate_idx;
4811 	pream_idx++;
4812 
4813 	/* Fill HT20 rate code */
4814 	for (i = 0; i < num_tx_chain; i++) {
4815 		for (j = 0; j < 8; j++) {
4816 			rate_code[rate_idx] =
4817 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4818 			rate_idx++;
4819 		}
4820 	}
4821 	pream_table[pream_idx] = rate_idx;
4822 	pream_idx++;
4823 
4824 	/* Fill HT40 rate code */
4825 	for (i = 0; i < num_tx_chain; i++) {
4826 		for (j = 0; j < 8; j++) {
4827 			rate_code[rate_idx] =
4828 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4829 			rate_idx++;
4830 		}
4831 	}
4832 	pream_table[pream_idx] = rate_idx;
4833 	pream_idx++;
4834 
4835 	/* Fill VHT20 rate code */
4836 	for (i = 0; i < num_tx_chain; i++) {
4837 		for (j = 0; j < 10; j++) {
4838 			rate_code[rate_idx] =
4839 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4840 			rate_idx++;
4841 		}
4842 	}
4843 	pream_table[pream_idx] = rate_idx;
4844 	pream_idx++;
4845 
4846 	/* Fill VHT40 rate code */
4847 	for (i = 0; i < num_tx_chain; i++) {
4848 		for (j = 0; j < 10; j++) {
4849 			rate_code[rate_idx] =
4850 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4851 			rate_idx++;
4852 		}
4853 	}
4854 	pream_table[pream_idx] = rate_idx;
4855 	pream_idx++;
4856 
4857 	/* Fill VHT80 rate code */
4858 	for (i = 0; i < num_tx_chain; i++) {
4859 		for (j = 0; j < 10; j++) {
4860 			rate_code[rate_idx] =
4861 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4862 			rate_idx++;
4863 		}
4864 	}
4865 	pream_table[pream_idx] = rate_idx;
4866 	pream_idx++;
4867 
4868 	rate_code[rate_idx++] =
4869 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4870 	rate_code[rate_idx++] =
4871 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4872 	rate_code[rate_idx++] =
4873 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4874 	rate_code[rate_idx++] =
4875 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4876 	rate_code[rate_idx++] =
4877 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4878 
4879 	pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
4880 }
4881 
ath10k_wmi_event_pdev_tpc_config(struct ath10k * ar,struct sk_buff * skb)4882 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4883 {
4884 	u32 num_tx_chain, rate_max;
4885 	u8 rate_code[WMI_TPC_RATE_MAX];
4886 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4887 	struct wmi_pdev_tpc_config_event *ev;
4888 	struct ath10k_tpc_stats *tpc_stats;
4889 
4890 	ev = (struct wmi_pdev_tpc_config_event *)skb->data;
4891 
4892 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4893 
4894 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4895 		ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
4896 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
4897 		return;
4898 	}
4899 
4900 	rate_max = __le32_to_cpu(ev->rate_max);
4901 	if (rate_max > WMI_TPC_RATE_MAX) {
4902 		ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
4903 			    rate_max, WMI_TPC_RATE_MAX);
4904 		rate_max = WMI_TPC_RATE_MAX;
4905 	}
4906 
4907 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4908 	if (!tpc_stats)
4909 		return;
4910 
4911 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4912 					    num_tx_chain);
4913 
4914 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4915 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4916 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4917 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4918 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4919 	tpc_stats->twice_antenna_reduction =
4920 		__le32_to_cpu(ev->twice_antenna_reduction);
4921 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4922 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4923 	tpc_stats->num_tx_chain = num_tx_chain;
4924 	tpc_stats->rate_max = rate_max;
4925 
4926 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4927 				      rate_code, pream_table,
4928 				      WMI_TPC_TABLE_TYPE_CDD);
4929 	ath10k_tpc_config_disp_tables(ar, ev,  tpc_stats,
4930 				      rate_code, pream_table,
4931 				      WMI_TPC_TABLE_TYPE_STBC);
4932 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4933 				      rate_code, pream_table,
4934 				      WMI_TPC_TABLE_TYPE_TXBF);
4935 
4936 	ath10k_debug_tpc_stats_process(ar, tpc_stats);
4937 
4938 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4939 		   "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4940 		   __le32_to_cpu(ev->chan_freq),
4941 		   __le32_to_cpu(ev->phy_mode),
4942 		   __le32_to_cpu(ev->ctl),
4943 		   __le32_to_cpu(ev->reg_domain),
4944 		   a_sle32_to_cpu(ev->twice_antenna_gain),
4945 		   __le32_to_cpu(ev->twice_antenna_reduction),
4946 		   __le32_to_cpu(ev->power_limit),
4947 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
4948 		   __le32_to_cpu(ev->num_tx_chain),
4949 		   __le32_to_cpu(ev->rate_max));
4950 }
4951 
4952 static u8
ath10k_wmi_tpc_final_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type,u32 pream_idx)4953 ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
4954 			      struct wmi_pdev_tpc_final_table_event *ev,
4955 			      u32 rate_idx, u32 num_chains,
4956 			      u32 rate_code, u8 type, u32 pream_idx)
4957 {
4958 	u8 tpc, num_streams, preamble, ch, stm_idx;
4959 	s8 pow_agcdd, pow_agstbc, pow_agtxbf;
4960 	int pream;
4961 
4962 	num_streams = ATH10K_HW_NSS(rate_code);
4963 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4964 	ch = num_chains - 1;
4965 	stm_idx = num_streams - 1;
4966 	pream = -1;
4967 
4968 	if (__le32_to_cpu(ev->chan_freq) <= 2483) {
4969 		switch (pream_idx) {
4970 		case WMI_TPC_PREAM_2GHZ_CCK:
4971 			pream = 0;
4972 			break;
4973 		case WMI_TPC_PREAM_2GHZ_OFDM:
4974 			pream = 1;
4975 			break;
4976 		case WMI_TPC_PREAM_2GHZ_HT20:
4977 		case WMI_TPC_PREAM_2GHZ_VHT20:
4978 			pream = 2;
4979 			break;
4980 		case WMI_TPC_PREAM_2GHZ_HT40:
4981 		case WMI_TPC_PREAM_2GHZ_VHT40:
4982 			pream = 3;
4983 			break;
4984 		case WMI_TPC_PREAM_2GHZ_VHT80:
4985 			pream = 4;
4986 			break;
4987 		default:
4988 			pream = -1;
4989 			break;
4990 		}
4991 	}
4992 
4993 	if (__le32_to_cpu(ev->chan_freq) >= 5180) {
4994 		switch (pream_idx) {
4995 		case WMI_TPC_PREAM_5GHZ_OFDM:
4996 			pream = 0;
4997 			break;
4998 		case WMI_TPC_PREAM_5GHZ_HT20:
4999 		case WMI_TPC_PREAM_5GHZ_VHT20:
5000 			pream = 1;
5001 			break;
5002 		case WMI_TPC_PREAM_5GHZ_HT40:
5003 		case WMI_TPC_PREAM_5GHZ_VHT40:
5004 			pream = 2;
5005 			break;
5006 		case WMI_TPC_PREAM_5GHZ_VHT80:
5007 			pream = 3;
5008 			break;
5009 		case WMI_TPC_PREAM_5GHZ_HTCUP:
5010 			pream = 4;
5011 			break;
5012 		default:
5013 			pream = -1;
5014 			break;
5015 		}
5016 	}
5017 
5018 	if (pream == -1) {
5019 		ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
5020 			    pream_idx, __le32_to_cpu(ev->chan_freq));
5021 		tpc = 0;
5022 		goto out;
5023 	}
5024 
5025 	if (pream == 4)
5026 		tpc = min_t(u8, ev->rates_array[rate_idx],
5027 			    ev->max_reg_allow_pow[ch]);
5028 	else
5029 		tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
5030 				      ev->max_reg_allow_pow[ch]),
5031 			    ev->ctl_power_table[0][pream][stm_idx]);
5032 
5033 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
5034 		goto out;
5035 
5036 	if (preamble == WMI_RATE_PREAMBLE_CCK)
5037 		goto out;
5038 
5039 	if (num_chains <= num_streams)
5040 		goto out;
5041 
5042 	switch (type) {
5043 	case WMI_TPC_TABLE_TYPE_STBC:
5044 		pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
5045 		if (pream == 4)
5046 			tpc = min_t(u8, tpc, pow_agstbc);
5047 		else
5048 			tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
5049 				    ev->ctl_power_table[0][pream][stm_idx]);
5050 		break;
5051 	case WMI_TPC_TABLE_TYPE_TXBF:
5052 		pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
5053 		if (pream == 4)
5054 			tpc = min_t(u8, tpc, pow_agtxbf);
5055 		else
5056 			tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
5057 				    ev->ctl_power_table[1][pream][stm_idx]);
5058 		break;
5059 	case WMI_TPC_TABLE_TYPE_CDD:
5060 		pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
5061 		if (pream == 4)
5062 			tpc = min_t(u8, tpc, pow_agcdd);
5063 		else
5064 			tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
5065 				    ev->ctl_power_table[0][pream][stm_idx]);
5066 		break;
5067 	default:
5068 		ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
5069 		tpc = 0;
5070 		break;
5071 	}
5072 
5073 out:
5074 	return tpc;
5075 }
5076 
5077 static void
ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,struct ath10k_tpc_stats_final * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)5078 ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
5079 				       struct wmi_pdev_tpc_final_table_event *ev,
5080 				       struct ath10k_tpc_stats_final *tpc_stats,
5081 				       u8 *rate_code, u16 *pream_table, u8 type)
5082 {
5083 	u32 i, j, pream_idx, flags;
5084 	u8 tpc[WMI_TPC_TX_N_CHAIN];
5085 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
5086 	char buff[WMI_TPC_BUF_SIZE];
5087 
5088 	flags = __le32_to_cpu(ev->flags);
5089 
5090 	switch (type) {
5091 	case WMI_TPC_TABLE_TYPE_CDD:
5092 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
5093 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
5094 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5095 			return;
5096 		}
5097 		break;
5098 	case WMI_TPC_TABLE_TYPE_STBC:
5099 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
5100 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
5101 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5102 			return;
5103 		}
5104 		break;
5105 	case WMI_TPC_TABLE_TYPE_TXBF:
5106 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
5107 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
5108 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5109 			return;
5110 		}
5111 		break;
5112 	default:
5113 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5114 			   "invalid table type in wmi tpc event: %d\n", type);
5115 		return;
5116 	}
5117 
5118 	pream_idx = 0;
5119 	for (i = 0; i < tpc_stats->rate_max; i++) {
5120 		memset(tpc_value, 0, sizeof(tpc_value));
5121 		memset(buff, 0, sizeof(buff));
5122 		if (i == pream_table[pream_idx])
5123 			pream_idx++;
5124 
5125 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
5126 			tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
5127 							       rate_code[i],
5128 							       type, pream_idx);
5129 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
5130 			strlcat(tpc_value, buff, sizeof(tpc_value));
5131 		}
5132 		tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
5133 		tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
5134 		memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
5135 		       tpc_value, sizeof(tpc_value));
5136 	}
5137 }
5138 
ath10k_wmi_event_tpc_final_table(struct ath10k * ar,struct sk_buff * skb)5139 void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
5140 {
5141 	u32 num_tx_chain, rate_max;
5142 	u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
5143 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
5144 	struct wmi_pdev_tpc_final_table_event *ev;
5145 	struct ath10k_tpc_stats_final *tpc_stats;
5146 
5147 	ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
5148 
5149 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
5150 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
5151 		ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
5152 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
5153 		return;
5154 	}
5155 
5156 	rate_max = __le32_to_cpu(ev->rate_max);
5157 	if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
5158 		ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
5159 			    rate_max, WMI_TPC_FINAL_RATE_MAX);
5160 		rate_max = WMI_TPC_FINAL_RATE_MAX;
5161 	}
5162 
5163 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
5164 	if (!tpc_stats)
5165 		return;
5166 
5167 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
5168 					    num_tx_chain);
5169 
5170 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
5171 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
5172 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
5173 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
5174 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
5175 	tpc_stats->twice_antenna_reduction =
5176 		__le32_to_cpu(ev->twice_antenna_reduction);
5177 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
5178 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
5179 	tpc_stats->num_tx_chain = num_tx_chain;
5180 	tpc_stats->rate_max = rate_max;
5181 
5182 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5183 					       rate_code, pream_table,
5184 					       WMI_TPC_TABLE_TYPE_CDD);
5185 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev,  tpc_stats,
5186 					       rate_code, pream_table,
5187 					       WMI_TPC_TABLE_TYPE_STBC);
5188 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5189 					       rate_code, pream_table,
5190 					       WMI_TPC_TABLE_TYPE_TXBF);
5191 
5192 	ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
5193 
5194 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5195 		   "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
5196 		   __le32_to_cpu(ev->chan_freq),
5197 		   __le32_to_cpu(ev->phy_mode),
5198 		   __le32_to_cpu(ev->ctl),
5199 		   __le32_to_cpu(ev->reg_domain),
5200 		   a_sle32_to_cpu(ev->twice_antenna_gain),
5201 		   __le32_to_cpu(ev->twice_antenna_reduction),
5202 		   __le32_to_cpu(ev->power_limit),
5203 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
5204 		   __le32_to_cpu(ev->num_tx_chain),
5205 		   __le32_to_cpu(ev->rate_max));
5206 }
5207 
5208 static void
ath10k_wmi_handle_tdls_peer_event(struct ath10k * ar,struct sk_buff * skb)5209 ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
5210 {
5211 	struct wmi_tdls_peer_event *ev;
5212 	struct ath10k_peer *peer;
5213 	struct ath10k_vif *arvif;
5214 	int vdev_id;
5215 	int peer_status;
5216 	int peer_reason;
5217 	u8 reason;
5218 
5219 	if (skb->len < sizeof(*ev)) {
5220 		ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
5221 			   skb->len);
5222 		return;
5223 	}
5224 
5225 	ev = (struct wmi_tdls_peer_event *)skb->data;
5226 	vdev_id = __le32_to_cpu(ev->vdev_id);
5227 	peer_status = __le32_to_cpu(ev->peer_status);
5228 	peer_reason = __le32_to_cpu(ev->peer_reason);
5229 
5230 	spin_lock_bh(&ar->data_lock);
5231 	peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
5232 	spin_unlock_bh(&ar->data_lock);
5233 
5234 	if (!peer) {
5235 		ath10k_warn(ar, "failed to find peer entry for %pM\n",
5236 			    ev->peer_macaddr.addr);
5237 		return;
5238 	}
5239 
5240 	switch (peer_status) {
5241 	case WMI_TDLS_SHOULD_TEARDOWN:
5242 		switch (peer_reason) {
5243 		case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
5244 		case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
5245 		case WMI_TDLS_TEARDOWN_REASON_RSSI:
5246 			reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
5247 			break;
5248 		default:
5249 			reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
5250 			break;
5251 		}
5252 
5253 		arvif = ath10k_get_arvif(ar, vdev_id);
5254 		if (!arvif) {
5255 			ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
5256 				    vdev_id);
5257 			return;
5258 		}
5259 
5260 		ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
5261 					    NL80211_TDLS_TEARDOWN, reason,
5262 					    GFP_ATOMIC);
5263 
5264 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5265 			   "received tdls teardown event for peer %pM reason %u\n",
5266 			   ev->peer_macaddr.addr, peer_reason);
5267 		break;
5268 	default:
5269 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5270 			   "received unknown tdls peer event %u\n",
5271 			   peer_status);
5272 		break;
5273 	}
5274 }
5275 
5276 static void
ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k * ar,struct sk_buff * skb)5277 ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
5278 {
5279 	struct wmi_peer_sta_ps_state_chg_event *ev;
5280 	struct ieee80211_sta *sta;
5281 	struct ath10k_sta *arsta;
5282 	u8 peer_addr[ETH_ALEN];
5283 
5284 	lockdep_assert_held(&ar->data_lock);
5285 
5286 	ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
5287 	ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
5288 
5289 	rcu_read_lock();
5290 
5291 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
5292 
5293 	if (!sta) {
5294 		ath10k_warn(ar, "failed to find station entry %pM\n",
5295 			    peer_addr);
5296 		goto exit;
5297 	}
5298 
5299 	arsta = (struct ath10k_sta *)sta->drv_priv;
5300 	arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
5301 
5302 exit:
5303 	rcu_read_unlock();
5304 }
5305 
ath10k_wmi_event_pdev_ftm_intg(struct ath10k * ar,struct sk_buff * skb)5306 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
5307 {
5308 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
5309 }
5310 
ath10k_wmi_event_gtk_offload_status(struct ath10k * ar,struct sk_buff * skb)5311 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
5312 {
5313 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
5314 }
5315 
ath10k_wmi_event_gtk_rekey_fail(struct ath10k * ar,struct sk_buff * skb)5316 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
5317 {
5318 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
5319 }
5320 
ath10k_wmi_event_delba_complete(struct ath10k * ar,struct sk_buff * skb)5321 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
5322 {
5323 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
5324 }
5325 
ath10k_wmi_event_addba_complete(struct ath10k * ar,struct sk_buff * skb)5326 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
5327 {
5328 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
5329 }
5330 
ath10k_wmi_event_vdev_install_key_complete(struct ath10k * ar,struct sk_buff * skb)5331 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
5332 						struct sk_buff *skb)
5333 {
5334 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
5335 }
5336 
ath10k_wmi_event_inst_rssi_stats(struct ath10k * ar,struct sk_buff * skb)5337 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
5338 {
5339 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
5340 }
5341 
ath10k_wmi_event_vdev_standby_req(struct ath10k * ar,struct sk_buff * skb)5342 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
5343 {
5344 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
5345 }
5346 
ath10k_wmi_event_vdev_resume_req(struct ath10k * ar,struct sk_buff * skb)5347 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
5348 {
5349 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
5350 }
5351 
ath10k_wmi_alloc_chunk(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5352 static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5353 				  u32 num_units, u32 unit_len)
5354 {
5355 	dma_addr_t paddr;
5356 	u32 pool_size;
5357 	int idx = ar->wmi.num_mem_chunks;
5358 	void *vaddr;
5359 
5360 	pool_size = num_units * round_up(unit_len, 4);
5361 	vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5362 
5363 	if (!vaddr)
5364 		return -ENOMEM;
5365 
5366 	ar->wmi.mem_chunks[idx].vaddr = vaddr;
5367 	ar->wmi.mem_chunks[idx].paddr = paddr;
5368 	ar->wmi.mem_chunks[idx].len = pool_size;
5369 	ar->wmi.mem_chunks[idx].req_id = req_id;
5370 	ar->wmi.num_mem_chunks++;
5371 
5372 	return num_units;
5373 }
5374 
ath10k_wmi_alloc_host_mem(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5375 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
5376 				     u32 num_units, u32 unit_len)
5377 {
5378 	int ret;
5379 
5380 	while (num_units) {
5381 		ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
5382 		if (ret < 0)
5383 			return ret;
5384 
5385 		num_units -= ret;
5386 	}
5387 
5388 	return 0;
5389 }
5390 
5391 static bool
ath10k_wmi_is_host_mem_allocated(struct ath10k * ar,const struct wlan_host_mem_req ** mem_reqs,u32 num_mem_reqs)5392 ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
5393 				 const struct wlan_host_mem_req **mem_reqs,
5394 				 u32 num_mem_reqs)
5395 {
5396 	u32 req_id, num_units, unit_size, num_unit_info;
5397 	u32 pool_size;
5398 	int i, j;
5399 	bool found;
5400 
5401 	if (ar->wmi.num_mem_chunks != num_mem_reqs)
5402 		return false;
5403 
5404 	for (i = 0; i < num_mem_reqs; ++i) {
5405 		req_id = __le32_to_cpu(mem_reqs[i]->req_id);
5406 		num_units = __le32_to_cpu(mem_reqs[i]->num_units);
5407 		unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
5408 		num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
5409 
5410 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5411 			if (ar->num_active_peers)
5412 				num_units = ar->num_active_peers + 1;
5413 			else
5414 				num_units = ar->max_num_peers + 1;
5415 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5416 			num_units = ar->max_num_peers + 1;
5417 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5418 			num_units = ar->max_num_vdevs + 1;
5419 		}
5420 
5421 		found = false;
5422 		for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
5423 			if (ar->wmi.mem_chunks[j].req_id == req_id) {
5424 				pool_size = num_units * round_up(unit_size, 4);
5425 				if (ar->wmi.mem_chunks[j].len == pool_size) {
5426 					found = true;
5427 					break;
5428 				}
5429 			}
5430 		}
5431 		if (!found)
5432 			return false;
5433 	}
5434 
5435 	return true;
5436 }
5437 
5438 static int
ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5439 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5440 				   struct wmi_svc_rdy_ev_arg *arg)
5441 {
5442 	struct wmi_service_ready_event *ev;
5443 	size_t i, n;
5444 
5445 	if (skb->len < sizeof(*ev))
5446 		return -EPROTO;
5447 
5448 	ev = (void *)skb->data;
5449 	skb_pull(skb, sizeof(*ev));
5450 	arg->min_tx_power = ev->hw_min_tx_power;
5451 	arg->max_tx_power = ev->hw_max_tx_power;
5452 	arg->ht_cap = ev->ht_cap_info;
5453 	arg->vht_cap = ev->vht_cap_info;
5454 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5455 	arg->sw_ver0 = ev->sw_version;
5456 	arg->sw_ver1 = ev->sw_version_1;
5457 	arg->phy_capab = ev->phy_capability;
5458 	arg->num_rf_chains = ev->num_rf_chains;
5459 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5460 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5461 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5462 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5463 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5464 	arg->num_mem_reqs = ev->num_mem_reqs;
5465 	arg->service_map = ev->wmi_service_bitmap;
5466 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5467 
5468 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5469 		  ARRAY_SIZE(arg->mem_reqs));
5470 	for (i = 0; i < n; i++)
5471 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5472 
5473 	if (skb->len <
5474 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5475 		return -EPROTO;
5476 
5477 	return 0;
5478 }
5479 
5480 static int
ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5481 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5482 				  struct wmi_svc_rdy_ev_arg *arg)
5483 {
5484 	struct wmi_10x_service_ready_event *ev;
5485 	int i, n;
5486 
5487 	if (skb->len < sizeof(*ev))
5488 		return -EPROTO;
5489 
5490 	ev = (void *)skb->data;
5491 	skb_pull(skb, sizeof(*ev));
5492 	arg->min_tx_power = ev->hw_min_tx_power;
5493 	arg->max_tx_power = ev->hw_max_tx_power;
5494 	arg->ht_cap = ev->ht_cap_info;
5495 	arg->vht_cap = ev->vht_cap_info;
5496 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5497 	arg->sw_ver0 = ev->sw_version;
5498 	arg->phy_capab = ev->phy_capability;
5499 	arg->num_rf_chains = ev->num_rf_chains;
5500 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5501 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5502 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5503 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5504 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5505 	arg->num_mem_reqs = ev->num_mem_reqs;
5506 	arg->service_map = ev->wmi_service_bitmap;
5507 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5508 
5509 	/* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
5510 	 * different values. We would need a translation to handle that,
5511 	 * but as we don't currently need anything from sys_cap_info from
5512 	 * WMI interface (only from WMI-TLV) safest it to skip it.
5513 	 */
5514 
5515 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5516 		  ARRAY_SIZE(arg->mem_reqs));
5517 	for (i = 0; i < n; i++)
5518 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5519 
5520 	if (skb->len <
5521 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5522 		return -EPROTO;
5523 
5524 	return 0;
5525 }
5526 
ath10k_wmi_event_service_ready_work(struct work_struct * work)5527 static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
5528 {
5529 	struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
5530 	struct sk_buff *skb = ar->svc_rdy_skb;
5531 	struct wmi_svc_rdy_ev_arg arg = {};
5532 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
5533 	int ret;
5534 	bool allocated;
5535 
5536 	if (!skb) {
5537 		ath10k_warn(ar, "invalid service ready event skb\n");
5538 		return;
5539 	}
5540 
5541 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
5542 	if (ret) {
5543 		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
5544 		return;
5545 	}
5546 
5547 	ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
5548 			   arg.service_map_len);
5549 
5550 	ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
5551 	ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
5552 	ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
5553 	ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
5554 	ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
5555 	ar->fw_version_major =
5556 		(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
5557 	ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
5558 	ar->fw_version_release =
5559 		(__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
5560 	ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
5561 	ar->phy_capability = __le32_to_cpu(arg.phy_capab);
5562 	ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
5563 	ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
5564 	ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
5565 	ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
5566 	ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
5567 	ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
5568 	ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
5569 
5570 	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
5571 			arg.service_map, arg.service_map_len);
5572 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
5573 		   ar->sys_cap_info);
5574 
5575 	if (ar->num_rf_chains > ar->max_spatial_stream) {
5576 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
5577 			    ar->num_rf_chains, ar->max_spatial_stream);
5578 		ar->num_rf_chains = ar->max_spatial_stream;
5579 	}
5580 
5581 	if (!ar->cfg_tx_chainmask) {
5582 		ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
5583 		ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
5584 	}
5585 
5586 	if (strlen(ar->hw->wiphy->fw_version) == 0) {
5587 		snprintf(ar->hw->wiphy->fw_version,
5588 			 sizeof(ar->hw->wiphy->fw_version),
5589 			 "%u.%u.%u.%u",
5590 			 ar->fw_version_major,
5591 			 ar->fw_version_minor,
5592 			 ar->fw_version_release,
5593 			 ar->fw_version_build);
5594 	}
5595 
5596 	num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
5597 	if (num_mem_reqs > WMI_MAX_MEM_REQS) {
5598 		ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
5599 			    num_mem_reqs);
5600 		return;
5601 	}
5602 
5603 	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
5604 		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
5605 			     ar->running_fw->fw_file.fw_features))
5606 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
5607 					       ar->max_num_vdevs;
5608 		else
5609 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
5610 					       ar->max_num_vdevs;
5611 
5612 		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
5613 				    ar->max_num_vdevs;
5614 		ar->num_tids = ar->num_active_peers * 2;
5615 		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
5616 	}
5617 
5618 	/* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
5619 	 * and WMI_SERVICE_IRAM_TIDS, etc.
5620 	 */
5621 
5622 	allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
5623 						     num_mem_reqs);
5624 	if (allocated)
5625 		goto skip_mem_alloc;
5626 
5627 	/* Either this event is received during boot time or there is a change
5628 	 * in memory requirement from firmware when compared to last request.
5629 	 * Free any old memory and do a fresh allocation based on the current
5630 	 * memory requirement.
5631 	 */
5632 	ath10k_wmi_free_host_mem(ar);
5633 
5634 	for (i = 0; i < num_mem_reqs; ++i) {
5635 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
5636 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
5637 		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
5638 		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
5639 
5640 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5641 			if (ar->num_active_peers)
5642 				num_units = ar->num_active_peers + 1;
5643 			else
5644 				num_units = ar->max_num_peers + 1;
5645 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5646 			/* number of units to allocate is number of
5647 			 * peers, 1 extra for self peer on target
5648 			 * this needs to be tied, host and target
5649 			 * can get out of sync
5650 			 */
5651 			num_units = ar->max_num_peers + 1;
5652 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5653 			num_units = ar->max_num_vdevs + 1;
5654 		}
5655 
5656 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5657 			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
5658 			   req_id,
5659 			   __le32_to_cpu(arg.mem_reqs[i]->num_units),
5660 			   num_unit_info,
5661 			   unit_size,
5662 			   num_units);
5663 
5664 		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
5665 						unit_size);
5666 		if (ret)
5667 			return;
5668 	}
5669 
5670 skip_mem_alloc:
5671 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5672 		   "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
5673 		   __le32_to_cpu(arg.min_tx_power),
5674 		   __le32_to_cpu(arg.max_tx_power),
5675 		   __le32_to_cpu(arg.ht_cap),
5676 		   __le32_to_cpu(arg.vht_cap),
5677 		   __le32_to_cpu(arg.vht_supp_mcs),
5678 		   __le32_to_cpu(arg.sw_ver0),
5679 		   __le32_to_cpu(arg.sw_ver1),
5680 		   __le32_to_cpu(arg.fw_build),
5681 		   __le32_to_cpu(arg.phy_capab),
5682 		   __le32_to_cpu(arg.num_rf_chains),
5683 		   __le32_to_cpu(arg.eeprom_rd),
5684 		   __le32_to_cpu(arg.low_2ghz_chan),
5685 		   __le32_to_cpu(arg.high_2ghz_chan),
5686 		   __le32_to_cpu(arg.low_5ghz_chan),
5687 		   __le32_to_cpu(arg.high_5ghz_chan),
5688 		   __le32_to_cpu(arg.num_mem_reqs));
5689 
5690 	dev_kfree_skb(skb);
5691 	ar->svc_rdy_skb = NULL;
5692 	complete(&ar->wmi.service_ready);
5693 }
5694 
ath10k_wmi_event_service_ready(struct ath10k * ar,struct sk_buff * skb)5695 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
5696 {
5697 	ar->svc_rdy_skb = skb;
5698 	queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
5699 }
5700 
ath10k_wmi_op_pull_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_rdy_ev_arg * arg)5701 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5702 				     struct wmi_rdy_ev_arg *arg)
5703 {
5704 	struct wmi_ready_event *ev = (void *)skb->data;
5705 
5706 	if (skb->len < sizeof(*ev))
5707 		return -EPROTO;
5708 
5709 	skb_pull(skb, sizeof(*ev));
5710 	arg->sw_version = ev->sw_version;
5711 	arg->abi_version = ev->abi_version;
5712 	arg->status = ev->status;
5713 	arg->mac_addr = ev->mac_addr.addr;
5714 
5715 	return 0;
5716 }
5717 
ath10k_wmi_op_pull_roam_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_roam_ev_arg * arg)5718 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
5719 				      struct wmi_roam_ev_arg *arg)
5720 {
5721 	struct wmi_roam_ev *ev = (void *)skb->data;
5722 
5723 	if (skb->len < sizeof(*ev))
5724 		return -EPROTO;
5725 
5726 	skb_pull(skb, sizeof(*ev));
5727 	arg->vdev_id = ev->vdev_id;
5728 	arg->reason = ev->reason;
5729 
5730 	return 0;
5731 }
5732 
ath10k_wmi_op_pull_echo_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_echo_ev_arg * arg)5733 static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
5734 				      struct sk_buff *skb,
5735 				      struct wmi_echo_ev_arg *arg)
5736 {
5737 	struct wmi_echo_event *ev = (void *)skb->data;
5738 
5739 	arg->value = ev->value;
5740 
5741 	return 0;
5742 }
5743 
ath10k_wmi_event_ready(struct ath10k * ar,struct sk_buff * skb)5744 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
5745 {
5746 	struct wmi_rdy_ev_arg arg = {};
5747 	int ret;
5748 
5749 	ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
5750 	if (ret) {
5751 		ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
5752 		return ret;
5753 	}
5754 
5755 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5756 		   "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
5757 		   __le32_to_cpu(arg.sw_version),
5758 		   __le32_to_cpu(arg.abi_version),
5759 		   arg.mac_addr,
5760 		   __le32_to_cpu(arg.status));
5761 
5762 	if (is_zero_ether_addr(ar->mac_addr))
5763 		ether_addr_copy(ar->mac_addr, arg.mac_addr);
5764 	complete(&ar->wmi.unified_ready);
5765 	return 0;
5766 }
5767 
ath10k_wmi_event_service_available(struct ath10k * ar,struct sk_buff * skb)5768 void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
5769 {
5770 	int ret;
5771 	struct wmi_svc_avail_ev_arg arg = {};
5772 
5773 	ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
5774 	if (ret) {
5775 		ath10k_warn(ar, "failed to parse service available event: %d\n",
5776 			    ret);
5777 	}
5778 
5779 	/*
5780 	 * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
5781 	 * for the below logic to work.
5782 	 */
5783 	if (arg.service_map_ext_valid)
5784 		ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
5785 				       __le32_to_cpu(arg.service_map_ext_len));
5786 }
5787 
ath10k_wmi_event_temperature(struct ath10k * ar,struct sk_buff * skb)5788 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
5789 {
5790 	const struct wmi_pdev_temperature_event *ev;
5791 
5792 	ev = (struct wmi_pdev_temperature_event *)skb->data;
5793 	if (WARN_ON(skb->len < sizeof(*ev)))
5794 		return -EPROTO;
5795 
5796 	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
5797 	return 0;
5798 }
5799 
ath10k_wmi_event_pdev_bss_chan_info(struct ath10k * ar,struct sk_buff * skb)5800 static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
5801 					       struct sk_buff *skb)
5802 {
5803 	struct wmi_pdev_bss_chan_info_event *ev;
5804 	struct survey_info *survey;
5805 	u64 busy, total, tx, rx, rx_bss;
5806 	u32 freq, noise_floor;
5807 	u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
5808 	int idx;
5809 
5810 	ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
5811 	if (WARN_ON(skb->len < sizeof(*ev)))
5812 		return -EPROTO;
5813 
5814 	freq        = __le32_to_cpu(ev->freq);
5815 	noise_floor = __le32_to_cpu(ev->noise_floor);
5816 	busy        = __le64_to_cpu(ev->cycle_busy);
5817 	total       = __le64_to_cpu(ev->cycle_total);
5818 	tx          = __le64_to_cpu(ev->cycle_tx);
5819 	rx          = __le64_to_cpu(ev->cycle_rx);
5820 	rx_bss      = __le64_to_cpu(ev->cycle_rx_bss);
5821 
5822 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5823 		   "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5824 		   freq, noise_floor, busy, total, tx, rx, rx_bss);
5825 
5826 	spin_lock_bh(&ar->data_lock);
5827 	idx = freq_to_idx(ar, freq);
5828 	if (idx >= ARRAY_SIZE(ar->survey)) {
5829 		ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5830 			    freq, idx);
5831 		goto exit;
5832 	}
5833 
5834 	survey = &ar->survey[idx];
5835 
5836 	survey->noise     = noise_floor;
5837 	survey->time      = div_u64(total, cc_freq_hz);
5838 	survey->time_busy = div_u64(busy, cc_freq_hz);
5839 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
5840 	survey->time_tx   = div_u64(tx, cc_freq_hz);
5841 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
5842 			     SURVEY_INFO_TIME |
5843 			     SURVEY_INFO_TIME_BUSY |
5844 			     SURVEY_INFO_TIME_RX |
5845 			     SURVEY_INFO_TIME_TX);
5846 exit:
5847 	spin_unlock_bh(&ar->data_lock);
5848 	complete(&ar->bss_survey_done);
5849 	return 0;
5850 }
5851 
ath10k_wmi_queue_set_coverage_class_work(struct ath10k * ar)5852 static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
5853 {
5854 	if (ar->hw_params.hw_ops->set_coverage_class) {
5855 		spin_lock_bh(&ar->data_lock);
5856 
5857 		/* This call only ensures that the modified coverage class
5858 		 * persists in case the firmware sets the registers back to
5859 		 * their default value. So calling it is only necessary if the
5860 		 * coverage class has a non-zero value.
5861 		 */
5862 		if (ar->fw_coverage.coverage_class)
5863 			queue_work(ar->workqueue, &ar->set_coverage_class_work);
5864 
5865 		spin_unlock_bh(&ar->data_lock);
5866 	}
5867 }
5868 
ath10k_wmi_op_rx(struct ath10k * ar,struct sk_buff * skb)5869 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
5870 {
5871 	struct wmi_cmd_hdr *cmd_hdr;
5872 	enum wmi_event_id id;
5873 
5874 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5875 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5876 
5877 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5878 		goto out;
5879 
5880 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5881 
5882 	switch (id) {
5883 	case WMI_MGMT_RX_EVENTID:
5884 		ath10k_wmi_event_mgmt_rx(ar, skb);
5885 		/* mgmt_rx() owns the skb now! */
5886 		return;
5887 	case WMI_SCAN_EVENTID:
5888 		ath10k_wmi_event_scan(ar, skb);
5889 		ath10k_wmi_queue_set_coverage_class_work(ar);
5890 		break;
5891 	case WMI_CHAN_INFO_EVENTID:
5892 		ath10k_wmi_event_chan_info(ar, skb);
5893 		break;
5894 	case WMI_ECHO_EVENTID:
5895 		ath10k_wmi_event_echo(ar, skb);
5896 		break;
5897 	case WMI_DEBUG_MESG_EVENTID:
5898 		ath10k_wmi_event_debug_mesg(ar, skb);
5899 		ath10k_wmi_queue_set_coverage_class_work(ar);
5900 		break;
5901 	case WMI_UPDATE_STATS_EVENTID:
5902 		ath10k_wmi_event_update_stats(ar, skb);
5903 		break;
5904 	case WMI_VDEV_START_RESP_EVENTID:
5905 		ath10k_wmi_event_vdev_start_resp(ar, skb);
5906 		ath10k_wmi_queue_set_coverage_class_work(ar);
5907 		break;
5908 	case WMI_VDEV_STOPPED_EVENTID:
5909 		ath10k_wmi_event_vdev_stopped(ar, skb);
5910 		ath10k_wmi_queue_set_coverage_class_work(ar);
5911 		break;
5912 	case WMI_PEER_STA_KICKOUT_EVENTID:
5913 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
5914 		break;
5915 	case WMI_HOST_SWBA_EVENTID:
5916 		ath10k_wmi_event_host_swba(ar, skb);
5917 		break;
5918 	case WMI_TBTTOFFSET_UPDATE_EVENTID:
5919 		ath10k_wmi_event_tbttoffset_update(ar, skb);
5920 		break;
5921 	case WMI_PHYERR_EVENTID:
5922 		ath10k_wmi_event_phyerr(ar, skb);
5923 		break;
5924 	case WMI_ROAM_EVENTID:
5925 		ath10k_wmi_event_roam(ar, skb);
5926 		ath10k_wmi_queue_set_coverage_class_work(ar);
5927 		break;
5928 	case WMI_PROFILE_MATCH:
5929 		ath10k_wmi_event_profile_match(ar, skb);
5930 		break;
5931 	case WMI_DEBUG_PRINT_EVENTID:
5932 		ath10k_wmi_event_debug_print(ar, skb);
5933 		ath10k_wmi_queue_set_coverage_class_work(ar);
5934 		break;
5935 	case WMI_PDEV_QVIT_EVENTID:
5936 		ath10k_wmi_event_pdev_qvit(ar, skb);
5937 		break;
5938 	case WMI_WLAN_PROFILE_DATA_EVENTID:
5939 		ath10k_wmi_event_wlan_profile_data(ar, skb);
5940 		break;
5941 	case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
5942 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
5943 		break;
5944 	case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
5945 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
5946 		break;
5947 	case WMI_RTT_ERROR_REPORT_EVENTID:
5948 		ath10k_wmi_event_rtt_error_report(ar, skb);
5949 		break;
5950 	case WMI_WOW_WAKEUP_HOST_EVENTID:
5951 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
5952 		break;
5953 	case WMI_DCS_INTERFERENCE_EVENTID:
5954 		ath10k_wmi_event_dcs_interference(ar, skb);
5955 		break;
5956 	case WMI_PDEV_TPC_CONFIG_EVENTID:
5957 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
5958 		break;
5959 	case WMI_PDEV_FTM_INTG_EVENTID:
5960 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
5961 		break;
5962 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
5963 		ath10k_wmi_event_gtk_offload_status(ar, skb);
5964 		break;
5965 	case WMI_GTK_REKEY_FAIL_EVENTID:
5966 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
5967 		break;
5968 	case WMI_TX_DELBA_COMPLETE_EVENTID:
5969 		ath10k_wmi_event_delba_complete(ar, skb);
5970 		break;
5971 	case WMI_TX_ADDBA_COMPLETE_EVENTID:
5972 		ath10k_wmi_event_addba_complete(ar, skb);
5973 		break;
5974 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
5975 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
5976 		break;
5977 	case WMI_SERVICE_READY_EVENTID:
5978 		ath10k_wmi_event_service_ready(ar, skb);
5979 		return;
5980 	case WMI_READY_EVENTID:
5981 		ath10k_wmi_event_ready(ar, skb);
5982 		ath10k_wmi_queue_set_coverage_class_work(ar);
5983 		break;
5984 	case WMI_SERVICE_AVAILABLE_EVENTID:
5985 		ath10k_wmi_event_service_available(ar, skb);
5986 		break;
5987 	default:
5988 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
5989 		break;
5990 	}
5991 
5992 out:
5993 	dev_kfree_skb(skb);
5994 }
5995 
ath10k_wmi_10_1_op_rx(struct ath10k * ar,struct sk_buff * skb)5996 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
5997 {
5998 	struct wmi_cmd_hdr *cmd_hdr;
5999 	enum wmi_10x_event_id id;
6000 	bool consumed;
6001 
6002 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6003 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6004 
6005 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6006 		goto out;
6007 
6008 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6009 
6010 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6011 
6012 	/* Ready event must be handled normally also in UTF mode so that we
6013 	 * know the UTF firmware has booted, others we are just bypass WMI
6014 	 * events to testmode.
6015 	 */
6016 	if (consumed && id != WMI_10X_READY_EVENTID) {
6017 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6018 			   "wmi testmode consumed 0x%x\n", id);
6019 		goto out;
6020 	}
6021 
6022 	switch (id) {
6023 	case WMI_10X_MGMT_RX_EVENTID:
6024 		ath10k_wmi_event_mgmt_rx(ar, skb);
6025 		/* mgmt_rx() owns the skb now! */
6026 		return;
6027 	case WMI_10X_SCAN_EVENTID:
6028 		ath10k_wmi_event_scan(ar, skb);
6029 		ath10k_wmi_queue_set_coverage_class_work(ar);
6030 		break;
6031 	case WMI_10X_CHAN_INFO_EVENTID:
6032 		ath10k_wmi_event_chan_info(ar, skb);
6033 		break;
6034 	case WMI_10X_ECHO_EVENTID:
6035 		ath10k_wmi_event_echo(ar, skb);
6036 		break;
6037 	case WMI_10X_DEBUG_MESG_EVENTID:
6038 		ath10k_wmi_event_debug_mesg(ar, skb);
6039 		ath10k_wmi_queue_set_coverage_class_work(ar);
6040 		break;
6041 	case WMI_10X_UPDATE_STATS_EVENTID:
6042 		ath10k_wmi_event_update_stats(ar, skb);
6043 		break;
6044 	case WMI_10X_VDEV_START_RESP_EVENTID:
6045 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6046 		ath10k_wmi_queue_set_coverage_class_work(ar);
6047 		break;
6048 	case WMI_10X_VDEV_STOPPED_EVENTID:
6049 		ath10k_wmi_event_vdev_stopped(ar, skb);
6050 		ath10k_wmi_queue_set_coverage_class_work(ar);
6051 		break;
6052 	case WMI_10X_PEER_STA_KICKOUT_EVENTID:
6053 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6054 		break;
6055 	case WMI_10X_HOST_SWBA_EVENTID:
6056 		ath10k_wmi_event_host_swba(ar, skb);
6057 		break;
6058 	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
6059 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6060 		break;
6061 	case WMI_10X_PHYERR_EVENTID:
6062 		ath10k_wmi_event_phyerr(ar, skb);
6063 		break;
6064 	case WMI_10X_ROAM_EVENTID:
6065 		ath10k_wmi_event_roam(ar, skb);
6066 		ath10k_wmi_queue_set_coverage_class_work(ar);
6067 		break;
6068 	case WMI_10X_PROFILE_MATCH:
6069 		ath10k_wmi_event_profile_match(ar, skb);
6070 		break;
6071 	case WMI_10X_DEBUG_PRINT_EVENTID:
6072 		ath10k_wmi_event_debug_print(ar, skb);
6073 		ath10k_wmi_queue_set_coverage_class_work(ar);
6074 		break;
6075 	case WMI_10X_PDEV_QVIT_EVENTID:
6076 		ath10k_wmi_event_pdev_qvit(ar, skb);
6077 		break;
6078 	case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
6079 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6080 		break;
6081 	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
6082 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6083 		break;
6084 	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
6085 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6086 		break;
6087 	case WMI_10X_RTT_ERROR_REPORT_EVENTID:
6088 		ath10k_wmi_event_rtt_error_report(ar, skb);
6089 		break;
6090 	case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
6091 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6092 		break;
6093 	case WMI_10X_DCS_INTERFERENCE_EVENTID:
6094 		ath10k_wmi_event_dcs_interference(ar, skb);
6095 		break;
6096 	case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
6097 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6098 		break;
6099 	case WMI_10X_INST_RSSI_STATS_EVENTID:
6100 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6101 		break;
6102 	case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
6103 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6104 		break;
6105 	case WMI_10X_VDEV_RESUME_REQ_EVENTID:
6106 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6107 		break;
6108 	case WMI_10X_SERVICE_READY_EVENTID:
6109 		ath10k_wmi_event_service_ready(ar, skb);
6110 		return;
6111 	case WMI_10X_READY_EVENTID:
6112 		ath10k_wmi_event_ready(ar, skb);
6113 		ath10k_wmi_queue_set_coverage_class_work(ar);
6114 		break;
6115 	case WMI_10X_PDEV_UTF_EVENTID:
6116 		/* ignore utf events */
6117 		break;
6118 	default:
6119 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6120 		break;
6121 	}
6122 
6123 out:
6124 	dev_kfree_skb(skb);
6125 }
6126 
ath10k_wmi_10_2_op_rx(struct ath10k * ar,struct sk_buff * skb)6127 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
6128 {
6129 	struct wmi_cmd_hdr *cmd_hdr;
6130 	enum wmi_10_2_event_id id;
6131 	bool consumed;
6132 
6133 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6134 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6135 
6136 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6137 		goto out;
6138 
6139 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6140 
6141 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6142 
6143 	/* Ready event must be handled normally also in UTF mode so that we
6144 	 * know the UTF firmware has booted, others we are just bypass WMI
6145 	 * events to testmode.
6146 	 */
6147 	if (consumed && id != WMI_10_2_READY_EVENTID) {
6148 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6149 			   "wmi testmode consumed 0x%x\n", id);
6150 		goto out;
6151 	}
6152 
6153 	switch (id) {
6154 	case WMI_10_2_MGMT_RX_EVENTID:
6155 		ath10k_wmi_event_mgmt_rx(ar, skb);
6156 		/* mgmt_rx() owns the skb now! */
6157 		return;
6158 	case WMI_10_2_SCAN_EVENTID:
6159 		ath10k_wmi_event_scan(ar, skb);
6160 		ath10k_wmi_queue_set_coverage_class_work(ar);
6161 		break;
6162 	case WMI_10_2_CHAN_INFO_EVENTID:
6163 		ath10k_wmi_event_chan_info(ar, skb);
6164 		break;
6165 	case WMI_10_2_ECHO_EVENTID:
6166 		ath10k_wmi_event_echo(ar, skb);
6167 		break;
6168 	case WMI_10_2_DEBUG_MESG_EVENTID:
6169 		ath10k_wmi_event_debug_mesg(ar, skb);
6170 		ath10k_wmi_queue_set_coverage_class_work(ar);
6171 		break;
6172 	case WMI_10_2_UPDATE_STATS_EVENTID:
6173 		ath10k_wmi_event_update_stats(ar, skb);
6174 		break;
6175 	case WMI_10_2_VDEV_START_RESP_EVENTID:
6176 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6177 		ath10k_wmi_queue_set_coverage_class_work(ar);
6178 		break;
6179 	case WMI_10_2_VDEV_STOPPED_EVENTID:
6180 		ath10k_wmi_event_vdev_stopped(ar, skb);
6181 		ath10k_wmi_queue_set_coverage_class_work(ar);
6182 		break;
6183 	case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
6184 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6185 		break;
6186 	case WMI_10_2_HOST_SWBA_EVENTID:
6187 		ath10k_wmi_event_host_swba(ar, skb);
6188 		break;
6189 	case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
6190 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6191 		break;
6192 	case WMI_10_2_PHYERR_EVENTID:
6193 		ath10k_wmi_event_phyerr(ar, skb);
6194 		break;
6195 	case WMI_10_2_ROAM_EVENTID:
6196 		ath10k_wmi_event_roam(ar, skb);
6197 		ath10k_wmi_queue_set_coverage_class_work(ar);
6198 		break;
6199 	case WMI_10_2_PROFILE_MATCH:
6200 		ath10k_wmi_event_profile_match(ar, skb);
6201 		break;
6202 	case WMI_10_2_DEBUG_PRINT_EVENTID:
6203 		ath10k_wmi_event_debug_print(ar, skb);
6204 		ath10k_wmi_queue_set_coverage_class_work(ar);
6205 		break;
6206 	case WMI_10_2_PDEV_QVIT_EVENTID:
6207 		ath10k_wmi_event_pdev_qvit(ar, skb);
6208 		break;
6209 	case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
6210 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6211 		break;
6212 	case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
6213 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6214 		break;
6215 	case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
6216 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6217 		break;
6218 	case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
6219 		ath10k_wmi_event_rtt_error_report(ar, skb);
6220 		break;
6221 	case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
6222 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6223 		break;
6224 	case WMI_10_2_DCS_INTERFERENCE_EVENTID:
6225 		ath10k_wmi_event_dcs_interference(ar, skb);
6226 		break;
6227 	case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
6228 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6229 		break;
6230 	case WMI_10_2_INST_RSSI_STATS_EVENTID:
6231 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6232 		break;
6233 	case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
6234 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6235 		ath10k_wmi_queue_set_coverage_class_work(ar);
6236 		break;
6237 	case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
6238 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6239 		ath10k_wmi_queue_set_coverage_class_work(ar);
6240 		break;
6241 	case WMI_10_2_SERVICE_READY_EVENTID:
6242 		ath10k_wmi_event_service_ready(ar, skb);
6243 		return;
6244 	case WMI_10_2_READY_EVENTID:
6245 		ath10k_wmi_event_ready(ar, skb);
6246 		ath10k_wmi_queue_set_coverage_class_work(ar);
6247 		break;
6248 	case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
6249 		ath10k_wmi_event_temperature(ar, skb);
6250 		break;
6251 	case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
6252 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6253 		break;
6254 	case WMI_10_2_RTT_KEEPALIVE_EVENTID:
6255 	case WMI_10_2_GPIO_INPUT_EVENTID:
6256 	case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
6257 	case WMI_10_2_GENERIC_BUFFER_EVENTID:
6258 	case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
6259 	case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
6260 	case WMI_10_2_WDS_PEER_EVENTID:
6261 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6262 			   "received event id %d not implemented\n", id);
6263 		break;
6264 	case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
6265 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6266 		break;
6267 	default:
6268 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6269 		break;
6270 	}
6271 
6272 out:
6273 	dev_kfree_skb(skb);
6274 }
6275 
ath10k_wmi_10_4_op_rx(struct ath10k * ar,struct sk_buff * skb)6276 static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
6277 {
6278 	struct wmi_cmd_hdr *cmd_hdr;
6279 	enum wmi_10_4_event_id id;
6280 	bool consumed;
6281 
6282 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6283 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6284 
6285 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6286 		goto out;
6287 
6288 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6289 
6290 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6291 
6292 	/* Ready event must be handled normally also in UTF mode so that we
6293 	 * know the UTF firmware has booted, others we are just bypass WMI
6294 	 * events to testmode.
6295 	 */
6296 	if (consumed && id != WMI_10_4_READY_EVENTID) {
6297 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6298 			   "wmi testmode consumed 0x%x\n", id);
6299 		goto out;
6300 	}
6301 
6302 	switch (id) {
6303 	case WMI_10_4_MGMT_RX_EVENTID:
6304 		ath10k_wmi_event_mgmt_rx(ar, skb);
6305 		/* mgmt_rx() owns the skb now! */
6306 		return;
6307 	case WMI_10_4_ECHO_EVENTID:
6308 		ath10k_wmi_event_echo(ar, skb);
6309 		break;
6310 	case WMI_10_4_DEBUG_MESG_EVENTID:
6311 		ath10k_wmi_event_debug_mesg(ar, skb);
6312 		ath10k_wmi_queue_set_coverage_class_work(ar);
6313 		break;
6314 	case WMI_10_4_SERVICE_READY_EVENTID:
6315 		ath10k_wmi_event_service_ready(ar, skb);
6316 		return;
6317 	case WMI_10_4_SCAN_EVENTID:
6318 		ath10k_wmi_event_scan(ar, skb);
6319 		ath10k_wmi_queue_set_coverage_class_work(ar);
6320 		break;
6321 	case WMI_10_4_CHAN_INFO_EVENTID:
6322 		ath10k_wmi_event_chan_info(ar, skb);
6323 		break;
6324 	case WMI_10_4_PHYERR_EVENTID:
6325 		ath10k_wmi_event_phyerr(ar, skb);
6326 		break;
6327 	case WMI_10_4_READY_EVENTID:
6328 		ath10k_wmi_event_ready(ar, skb);
6329 		ath10k_wmi_queue_set_coverage_class_work(ar);
6330 		break;
6331 	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
6332 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6333 		break;
6334 	case WMI_10_4_ROAM_EVENTID:
6335 		ath10k_wmi_event_roam(ar, skb);
6336 		ath10k_wmi_queue_set_coverage_class_work(ar);
6337 		break;
6338 	case WMI_10_4_HOST_SWBA_EVENTID:
6339 		ath10k_wmi_event_host_swba(ar, skb);
6340 		break;
6341 	case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
6342 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6343 		break;
6344 	case WMI_10_4_DEBUG_PRINT_EVENTID:
6345 		ath10k_wmi_event_debug_print(ar, skb);
6346 		ath10k_wmi_queue_set_coverage_class_work(ar);
6347 		break;
6348 	case WMI_10_4_VDEV_START_RESP_EVENTID:
6349 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6350 		ath10k_wmi_queue_set_coverage_class_work(ar);
6351 		break;
6352 	case WMI_10_4_VDEV_STOPPED_EVENTID:
6353 		ath10k_wmi_event_vdev_stopped(ar, skb);
6354 		ath10k_wmi_queue_set_coverage_class_work(ar);
6355 		break;
6356 	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
6357 	case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
6358 	case WMI_10_4_WDS_PEER_EVENTID:
6359 	case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
6360 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6361 			   "received event id %d not implemented\n", id);
6362 		break;
6363 	case WMI_10_4_UPDATE_STATS_EVENTID:
6364 		ath10k_wmi_event_update_stats(ar, skb);
6365 		break;
6366 	case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
6367 		ath10k_wmi_event_temperature(ar, skb);
6368 		break;
6369 	case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
6370 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6371 		break;
6372 	case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
6373 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6374 		break;
6375 	case WMI_10_4_TDLS_PEER_EVENTID:
6376 		ath10k_wmi_handle_tdls_peer_event(ar, skb);
6377 		break;
6378 	case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
6379 		ath10k_wmi_event_tpc_final_table(ar, skb);
6380 		break;
6381 	case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
6382 		ath10k_wmi_event_dfs_status_check(ar, skb);
6383 		break;
6384 	case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
6385 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6386 		break;
6387 	default:
6388 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6389 		break;
6390 	}
6391 
6392 out:
6393 	dev_kfree_skb(skb);
6394 }
6395 
ath10k_wmi_process_rx(struct ath10k * ar,struct sk_buff * skb)6396 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
6397 {
6398 	int ret;
6399 
6400 	ret = ath10k_wmi_rx(ar, skb);
6401 	if (ret)
6402 		ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
6403 }
6404 
ath10k_wmi_connect(struct ath10k * ar)6405 int ath10k_wmi_connect(struct ath10k *ar)
6406 {
6407 	int status;
6408 	struct ath10k_htc_svc_conn_req conn_req;
6409 	struct ath10k_htc_svc_conn_resp conn_resp;
6410 
6411 	memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
6412 
6413 	memset(&conn_req, 0, sizeof(conn_req));
6414 	memset(&conn_resp, 0, sizeof(conn_resp));
6415 
6416 	/* these fields are the same for all service endpoints */
6417 	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
6418 	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
6419 	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
6420 
6421 	/* connect to control service */
6422 	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
6423 
6424 	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
6425 	if (status) {
6426 		ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
6427 			    status);
6428 		return status;
6429 	}
6430 
6431 	ar->wmi.eid = conn_resp.eid;
6432 	return 0;
6433 }
6434 
6435 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k * ar,const u8 macaddr[ETH_ALEN])6436 ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
6437 					const u8 macaddr[ETH_ALEN])
6438 {
6439 	struct wmi_pdev_set_base_macaddr_cmd *cmd;
6440 	struct sk_buff *skb;
6441 
6442 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6443 	if (!skb)
6444 		return ERR_PTR(-ENOMEM);
6445 
6446 	cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
6447 	ether_addr_copy(cmd->mac_addr.addr, macaddr);
6448 
6449 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6450 		   "wmi pdev basemac %pM\n", macaddr);
6451 	return skb;
6452 }
6453 
6454 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6455 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
6456 			      u16 ctl2g, u16 ctl5g,
6457 			      enum wmi_dfs_region dfs_reg)
6458 {
6459 	struct wmi_pdev_set_regdomain_cmd *cmd;
6460 	struct sk_buff *skb;
6461 
6462 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6463 	if (!skb)
6464 		return ERR_PTR(-ENOMEM);
6465 
6466 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
6467 	cmd->reg_domain = __cpu_to_le32(rd);
6468 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6469 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6470 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6471 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6472 
6473 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6474 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
6475 		   rd, rd2g, rd5g, ctl2g, ctl5g);
6476 	return skb;
6477 }
6478 
6479 static struct sk_buff *
ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6480 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
6481 				  rd5g, u16 ctl2g, u16 ctl5g,
6482 				  enum wmi_dfs_region dfs_reg)
6483 {
6484 	struct wmi_pdev_set_regdomain_cmd_10x *cmd;
6485 	struct sk_buff *skb;
6486 
6487 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6488 	if (!skb)
6489 		return ERR_PTR(-ENOMEM);
6490 
6491 	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
6492 	cmd->reg_domain = __cpu_to_le32(rd);
6493 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6494 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6495 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6496 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6497 	cmd->dfs_domain = __cpu_to_le32(dfs_reg);
6498 
6499 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6500 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
6501 		   rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
6502 	return skb;
6503 }
6504 
6505 static struct sk_buff *
ath10k_wmi_op_gen_pdev_suspend(struct ath10k * ar,u32 suspend_opt)6506 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
6507 {
6508 	struct wmi_pdev_suspend_cmd *cmd;
6509 	struct sk_buff *skb;
6510 
6511 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6512 	if (!skb)
6513 		return ERR_PTR(-ENOMEM);
6514 
6515 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
6516 	cmd->suspend_opt = __cpu_to_le32(suspend_opt);
6517 
6518 	return skb;
6519 }
6520 
6521 static struct sk_buff *
ath10k_wmi_op_gen_pdev_resume(struct ath10k * ar)6522 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
6523 {
6524 	struct sk_buff *skb;
6525 
6526 	skb = ath10k_wmi_alloc_skb(ar, 0);
6527 	if (!skb)
6528 		return ERR_PTR(-ENOMEM);
6529 
6530 	return skb;
6531 }
6532 
6533 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_param(struct ath10k * ar,u32 id,u32 value)6534 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
6535 {
6536 	struct wmi_pdev_set_param_cmd *cmd;
6537 	struct sk_buff *skb;
6538 
6539 	if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
6540 		ath10k_warn(ar, "pdev param %d not supported by firmware\n",
6541 			    id);
6542 		return ERR_PTR(-EOPNOTSUPP);
6543 	}
6544 
6545 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6546 	if (!skb)
6547 		return ERR_PTR(-ENOMEM);
6548 
6549 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
6550 	cmd->param_id    = __cpu_to_le32(id);
6551 	cmd->param_value = __cpu_to_le32(value);
6552 
6553 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
6554 		   id, value);
6555 	return skb;
6556 }
6557 
ath10k_wmi_put_host_mem_chunks(struct ath10k * ar,struct wmi_host_mem_chunks * chunks)6558 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
6559 				    struct wmi_host_mem_chunks *chunks)
6560 {
6561 	struct host_memory_chunk *chunk;
6562 	int i;
6563 
6564 	chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
6565 
6566 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
6567 		chunk = &chunks->items[i];
6568 		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
6569 		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
6570 		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
6571 
6572 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6573 			   "wmi chunk %d len %d requested, addr 0x%llx\n",
6574 			   i,
6575 			   ar->wmi.mem_chunks[i].len,
6576 			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
6577 	}
6578 }
6579 
ath10k_wmi_op_gen_init(struct ath10k * ar)6580 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
6581 {
6582 	struct wmi_init_cmd *cmd;
6583 	struct sk_buff *buf;
6584 	struct wmi_resource_config config = {};
6585 	u32 val;
6586 
6587 	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
6588 	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
6589 	config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
6590 
6591 	config.num_offload_reorder_bufs =
6592 		__cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
6593 
6594 	config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
6595 	config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
6596 	config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
6597 	config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
6598 	config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
6599 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6600 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6601 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6602 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
6603 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6604 	config.scan_max_pending_reqs =
6605 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
6606 
6607 	config.bmiss_offload_max_vdev =
6608 		__cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
6609 
6610 	config.roam_offload_max_vdev =
6611 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
6612 
6613 	config.roam_offload_max_ap_profiles =
6614 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
6615 
6616 	config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
6617 	config.num_mcast_table_elems =
6618 		__cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
6619 
6620 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
6621 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
6622 	config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
6623 	config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
6624 	config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
6625 
6626 	val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6627 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6628 
6629 	config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
6630 
6631 	config.gtk_offload_max_vdev =
6632 		__cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
6633 
6634 	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
6635 	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
6636 
6637 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6638 						   ar->wmi.num_mem_chunks));
6639 	if (!buf)
6640 		return ERR_PTR(-ENOMEM);
6641 
6642 	cmd = (struct wmi_init_cmd *)buf->data;
6643 
6644 	memcpy(&cmd->resource_config, &config, sizeof(config));
6645 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6646 
6647 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
6648 	return buf;
6649 }
6650 
ath10k_wmi_10_1_op_gen_init(struct ath10k * ar)6651 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
6652 {
6653 	struct wmi_init_cmd_10x *cmd;
6654 	struct sk_buff *buf;
6655 	struct wmi_resource_config_10x config = {};
6656 	u32 val;
6657 
6658 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6659 	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6660 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6661 	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6662 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6663 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6664 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6665 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6666 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6667 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6668 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6669 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6670 	config.scan_max_pending_reqs =
6671 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6672 
6673 	config.bmiss_offload_max_vdev =
6674 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6675 
6676 	config.roam_offload_max_vdev =
6677 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6678 
6679 	config.roam_offload_max_ap_profiles =
6680 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6681 
6682 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6683 	config.num_mcast_table_elems =
6684 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6685 
6686 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6687 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6688 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6689 	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
6690 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6691 
6692 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6693 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6694 
6695 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6696 
6697 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6698 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6699 
6700 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6701 						   ar->wmi.num_mem_chunks));
6702 	if (!buf)
6703 		return ERR_PTR(-ENOMEM);
6704 
6705 	cmd = (struct wmi_init_cmd_10x *)buf->data;
6706 
6707 	memcpy(&cmd->resource_config, &config, sizeof(config));
6708 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6709 
6710 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
6711 	return buf;
6712 }
6713 
ath10k_wmi_10_2_op_gen_init(struct ath10k * ar)6714 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
6715 {
6716 	struct wmi_init_cmd_10_2 *cmd;
6717 	struct sk_buff *buf;
6718 	struct wmi_resource_config_10x config = {};
6719 	u32 val, features;
6720 
6721 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6722 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6723 
6724 	if (ath10k_peer_stats_enabled(ar)) {
6725 		config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
6726 		config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
6727 	} else {
6728 		config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6729 		config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6730 	}
6731 
6732 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6733 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6734 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6735 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6736 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6737 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6738 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6739 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6740 
6741 	config.scan_max_pending_reqs =
6742 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6743 
6744 	config.bmiss_offload_max_vdev =
6745 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6746 
6747 	config.roam_offload_max_vdev =
6748 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6749 
6750 	config.roam_offload_max_ap_profiles =
6751 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6752 
6753 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6754 	config.num_mcast_table_elems =
6755 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6756 
6757 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6758 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6759 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6760 	config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
6761 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6762 
6763 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6764 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6765 
6766 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6767 
6768 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6769 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6770 
6771 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6772 						   ar->wmi.num_mem_chunks));
6773 	if (!buf)
6774 		return ERR_PTR(-ENOMEM);
6775 
6776 	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
6777 
6778 	features = WMI_10_2_RX_BATCH_MODE;
6779 
6780 	if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
6781 	    test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
6782 		features |= WMI_10_2_COEX_GPIO;
6783 
6784 	if (ath10k_peer_stats_enabled(ar))
6785 		features |= WMI_10_2_PEER_STATS;
6786 
6787 	if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
6788 		features |= WMI_10_2_BSS_CHAN_INFO;
6789 
6790 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
6791 
6792 	memcpy(&cmd->resource_config.common, &config, sizeof(config));
6793 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6794 
6795 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
6796 	return buf;
6797 }
6798 
ath10k_wmi_10_4_op_gen_init(struct ath10k * ar)6799 static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
6800 {
6801 	struct wmi_init_cmd_10_4 *cmd;
6802 	struct sk_buff *buf;
6803 	struct wmi_resource_config_10_4 config = {};
6804 
6805 	config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
6806 	config.num_peers = __cpu_to_le32(ar->max_num_peers);
6807 	config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
6808 	config.num_tids = __cpu_to_le32(ar->num_tids);
6809 
6810 	config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
6811 	config.num_offload_reorder_buffs =
6812 			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
6813 	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
6814 	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
6815 	config.tx_chain_mask  = __cpu_to_le32(ar->hw_params.tx_chain_mask);
6816 	config.rx_chain_mask  = __cpu_to_le32(ar->hw_params.rx_chain_mask);
6817 
6818 	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6819 	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6820 	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6821 	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
6822 
6823 	config.rx_decap_mode	    = __cpu_to_le32(ar->wmi.rx_decap_mode);
6824 	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
6825 	config.bmiss_offload_max_vdev =
6826 			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
6827 	config.roam_offload_max_vdev  =
6828 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
6829 	config.roam_offload_max_ap_profiles =
6830 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
6831 	config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
6832 	config.num_mcast_table_elems =
6833 			__cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
6834 
6835 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
6836 	config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
6837 	config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
6838 	config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
6839 	config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
6840 
6841 	config.rx_skip_defrag_timeout_dup_detection_check =
6842 	  __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
6843 
6844 	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
6845 	config.gtk_offload_max_vdev =
6846 			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
6847 	config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
6848 	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
6849 	config.max_peer_ext_stats =
6850 			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
6851 	config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
6852 
6853 	config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
6854 	config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
6855 	config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
6856 	config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
6857 
6858 	config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
6859 	config.tt_support =
6860 			__cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
6861 	config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
6862 	config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
6863 	config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
6864 
6865 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6866 						   ar->wmi.num_mem_chunks));
6867 	if (!buf)
6868 		return ERR_PTR(-ENOMEM);
6869 
6870 	cmd = (struct wmi_init_cmd_10_4 *)buf->data;
6871 	memcpy(&cmd->resource_config, &config, sizeof(config));
6872 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6873 
6874 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
6875 	return buf;
6876 }
6877 
ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg * arg)6878 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
6879 {
6880 	if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
6881 		return -EINVAL;
6882 	if (arg->n_channels > ARRAY_SIZE(arg->channels))
6883 		return -EINVAL;
6884 	if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
6885 		return -EINVAL;
6886 	if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
6887 		return -EINVAL;
6888 
6889 	return 0;
6890 }
6891 
6892 static size_t
ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg * arg)6893 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
6894 {
6895 	int len = 0;
6896 
6897 	if (arg->ie_len) {
6898 		len += sizeof(struct wmi_ie_data);
6899 		len += roundup(arg->ie_len, 4);
6900 	}
6901 
6902 	if (arg->n_channels) {
6903 		len += sizeof(struct wmi_chan_list);
6904 		len += sizeof(__le32) * arg->n_channels;
6905 	}
6906 
6907 	if (arg->n_ssids) {
6908 		len += sizeof(struct wmi_ssid_list);
6909 		len += sizeof(struct wmi_ssid) * arg->n_ssids;
6910 	}
6911 
6912 	if (arg->n_bssids) {
6913 		len += sizeof(struct wmi_bssid_list);
6914 		len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6915 	}
6916 
6917 	return len;
6918 }
6919 
ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common * cmn,const struct wmi_start_scan_arg * arg)6920 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
6921 				      const struct wmi_start_scan_arg *arg)
6922 {
6923 	u32 scan_id;
6924 	u32 scan_req_id;
6925 
6926 	scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
6927 	scan_id |= arg->scan_id;
6928 
6929 	scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6930 	scan_req_id |= arg->scan_req_id;
6931 
6932 	cmn->scan_id            = __cpu_to_le32(scan_id);
6933 	cmn->scan_req_id        = __cpu_to_le32(scan_req_id);
6934 	cmn->vdev_id            = __cpu_to_le32(arg->vdev_id);
6935 	cmn->scan_priority      = __cpu_to_le32(arg->scan_priority);
6936 	cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
6937 	cmn->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
6938 	cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
6939 	cmn->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
6940 	cmn->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
6941 	cmn->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
6942 	cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
6943 	cmn->idle_time          = __cpu_to_le32(arg->idle_time);
6944 	cmn->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
6945 	cmn->probe_delay        = __cpu_to_le32(arg->probe_delay);
6946 	cmn->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
6947 }
6948 
6949 static void
ath10k_wmi_put_start_scan_tlvs(u8 * tlvs,const struct wmi_start_scan_arg * arg)6950 ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
6951 			       const struct wmi_start_scan_arg *arg)
6952 {
6953 	struct wmi_ie_data *ie;
6954 	struct wmi_chan_list *channels;
6955 	struct wmi_ssid_list *ssids;
6956 	struct wmi_bssid_list *bssids;
6957 	void *ptr = tlvs;
6958 	int i;
6959 
6960 	if (arg->n_channels) {
6961 		channels = ptr;
6962 		channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
6963 		channels->num_chan = __cpu_to_le32(arg->n_channels);
6964 
6965 		for (i = 0; i < arg->n_channels; i++)
6966 			channels->channel_list[i].freq =
6967 				__cpu_to_le16(arg->channels[i]);
6968 
6969 		ptr += sizeof(*channels);
6970 		ptr += sizeof(__le32) * arg->n_channels;
6971 	}
6972 
6973 	if (arg->n_ssids) {
6974 		ssids = ptr;
6975 		ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
6976 		ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
6977 
6978 		for (i = 0; i < arg->n_ssids; i++) {
6979 			ssids->ssids[i].ssid_len =
6980 				__cpu_to_le32(arg->ssids[i].len);
6981 			memcpy(&ssids->ssids[i].ssid,
6982 			       arg->ssids[i].ssid,
6983 			       arg->ssids[i].len);
6984 		}
6985 
6986 		ptr += sizeof(*ssids);
6987 		ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
6988 	}
6989 
6990 	if (arg->n_bssids) {
6991 		bssids = ptr;
6992 		bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
6993 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
6994 
6995 		for (i = 0; i < arg->n_bssids; i++)
6996 			ether_addr_copy(bssids->bssid_list[i].addr,
6997 					arg->bssids[i].bssid);
6998 
6999 		ptr += sizeof(*bssids);
7000 		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
7001 	}
7002 
7003 	if (arg->ie_len) {
7004 		ie = ptr;
7005 		ie->tag = __cpu_to_le32(WMI_IE_TAG);
7006 		ie->ie_len = __cpu_to_le32(arg->ie_len);
7007 		memcpy(ie->ie_data, arg->ie, arg->ie_len);
7008 
7009 		ptr += sizeof(*ie);
7010 		ptr += roundup(arg->ie_len, 4);
7011 	}
7012 }
7013 
7014 static struct sk_buff *
ath10k_wmi_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7015 ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
7016 			     const struct wmi_start_scan_arg *arg)
7017 {
7018 	struct wmi_start_scan_cmd *cmd;
7019 	struct sk_buff *skb;
7020 	size_t len;
7021 	int ret;
7022 
7023 	ret = ath10k_wmi_start_scan_verify(arg);
7024 	if (ret)
7025 		return ERR_PTR(ret);
7026 
7027 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7028 	skb = ath10k_wmi_alloc_skb(ar, len);
7029 	if (!skb)
7030 		return ERR_PTR(-ENOMEM);
7031 
7032 	cmd = (struct wmi_start_scan_cmd *)skb->data;
7033 
7034 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7035 	ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
7036 
7037 	cmd->burst_duration_ms = __cpu_to_le32(0);
7038 
7039 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
7040 	return skb;
7041 }
7042 
7043 static struct sk_buff *
ath10k_wmi_10x_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7044 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
7045 				 const struct wmi_start_scan_arg *arg)
7046 {
7047 	struct wmi_10x_start_scan_cmd *cmd;
7048 	struct sk_buff *skb;
7049 	size_t len;
7050 	int ret;
7051 
7052 	ret = ath10k_wmi_start_scan_verify(arg);
7053 	if (ret)
7054 		return ERR_PTR(ret);
7055 
7056 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7057 	skb = ath10k_wmi_alloc_skb(ar, len);
7058 	if (!skb)
7059 		return ERR_PTR(-ENOMEM);
7060 
7061 	cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
7062 
7063 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7064 	ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
7065 
7066 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
7067 	return skb;
7068 }
7069 
ath10k_wmi_start_scan_init(struct ath10k * ar,struct wmi_start_scan_arg * arg)7070 void ath10k_wmi_start_scan_init(struct ath10k *ar,
7071 				struct wmi_start_scan_arg *arg)
7072 {
7073 	/* setup commonly used values */
7074 	arg->scan_req_id = 1;
7075 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
7076 	arg->dwell_time_active = 50;
7077 	arg->dwell_time_passive = 150;
7078 	arg->min_rest_time = 50;
7079 	arg->max_rest_time = 500;
7080 	arg->repeat_probe_time = 0;
7081 	arg->probe_spacing_time = 0;
7082 	arg->idle_time = 0;
7083 	arg->max_scan_time = 20000;
7084 	arg->probe_delay = 5;
7085 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
7086 		| WMI_SCAN_EVENT_COMPLETED
7087 		| WMI_SCAN_EVENT_BSS_CHANNEL
7088 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
7089 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
7090 		| WMI_SCAN_EVENT_DEQUEUED;
7091 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
7092 	arg->n_bssids = 1;
7093 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
7094 }
7095 
7096 static struct sk_buff *
ath10k_wmi_op_gen_stop_scan(struct ath10k * ar,const struct wmi_stop_scan_arg * arg)7097 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
7098 			    const struct wmi_stop_scan_arg *arg)
7099 {
7100 	struct wmi_stop_scan_cmd *cmd;
7101 	struct sk_buff *skb;
7102 	u32 scan_id;
7103 	u32 req_id;
7104 
7105 	if (arg->req_id > 0xFFF)
7106 		return ERR_PTR(-EINVAL);
7107 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
7108 		return ERR_PTR(-EINVAL);
7109 
7110 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7111 	if (!skb)
7112 		return ERR_PTR(-ENOMEM);
7113 
7114 	scan_id = arg->u.scan_id;
7115 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
7116 
7117 	req_id = arg->req_id;
7118 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
7119 
7120 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
7121 	cmd->req_type    = __cpu_to_le32(arg->req_type);
7122 	cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
7123 	cmd->scan_id     = __cpu_to_le32(scan_id);
7124 	cmd->scan_req_id = __cpu_to_le32(req_id);
7125 
7126 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7127 		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
7128 		   arg->req_id, arg->req_type, arg->u.scan_id);
7129 	return skb;
7130 }
7131 
7132 static struct sk_buff *
ath10k_wmi_op_gen_vdev_create(struct ath10k * ar,u32 vdev_id,enum wmi_vdev_type type,enum wmi_vdev_subtype subtype,const u8 macaddr[ETH_ALEN])7133 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
7134 			      enum wmi_vdev_type type,
7135 			      enum wmi_vdev_subtype subtype,
7136 			      const u8 macaddr[ETH_ALEN])
7137 {
7138 	struct wmi_vdev_create_cmd *cmd;
7139 	struct sk_buff *skb;
7140 
7141 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7142 	if (!skb)
7143 		return ERR_PTR(-ENOMEM);
7144 
7145 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
7146 	cmd->vdev_id      = __cpu_to_le32(vdev_id);
7147 	cmd->vdev_type    = __cpu_to_le32(type);
7148 	cmd->vdev_subtype = __cpu_to_le32(subtype);
7149 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
7150 
7151 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7152 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
7153 		   vdev_id, type, subtype, macaddr);
7154 	return skb;
7155 }
7156 
7157 static struct sk_buff *
ath10k_wmi_op_gen_vdev_delete(struct ath10k * ar,u32 vdev_id)7158 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
7159 {
7160 	struct wmi_vdev_delete_cmd *cmd;
7161 	struct sk_buff *skb;
7162 
7163 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7164 	if (!skb)
7165 		return ERR_PTR(-ENOMEM);
7166 
7167 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
7168 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7169 
7170 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7171 		   "WMI vdev delete id %d\n", vdev_id);
7172 	return skb;
7173 }
7174 
7175 static struct sk_buff *
ath10k_wmi_op_gen_vdev_start(struct ath10k * ar,const struct wmi_vdev_start_request_arg * arg,bool restart)7176 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
7177 			     const struct wmi_vdev_start_request_arg *arg,
7178 			     bool restart)
7179 {
7180 	struct wmi_vdev_start_request_cmd *cmd;
7181 	struct sk_buff *skb;
7182 	const char *cmdname;
7183 	u32 flags = 0;
7184 
7185 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
7186 		return ERR_PTR(-EINVAL);
7187 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
7188 		return ERR_PTR(-EINVAL);
7189 
7190 	if (restart)
7191 		cmdname = "restart";
7192 	else
7193 		cmdname = "start";
7194 
7195 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7196 	if (!skb)
7197 		return ERR_PTR(-ENOMEM);
7198 
7199 	if (arg->hidden_ssid)
7200 		flags |= WMI_VDEV_START_HIDDEN_SSID;
7201 	if (arg->pmf_enabled)
7202 		flags |= WMI_VDEV_START_PMF_ENABLED;
7203 
7204 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
7205 	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
7206 	cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
7207 	cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
7208 	cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
7209 	cmd->flags           = __cpu_to_le32(flags);
7210 	cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
7211 	cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
7212 
7213 	if (arg->ssid) {
7214 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
7215 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
7216 	}
7217 
7218 	ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
7219 
7220 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7221 		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
7222 		   cmdname, arg->vdev_id,
7223 		   flags, arg->channel.freq, arg->channel.mode,
7224 		   cmd->chan.flags, arg->channel.max_power);
7225 
7226 	return skb;
7227 }
7228 
7229 static struct sk_buff *
ath10k_wmi_op_gen_vdev_stop(struct ath10k * ar,u32 vdev_id)7230 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
7231 {
7232 	struct wmi_vdev_stop_cmd *cmd;
7233 	struct sk_buff *skb;
7234 
7235 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7236 	if (!skb)
7237 		return ERR_PTR(-ENOMEM);
7238 
7239 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
7240 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7241 
7242 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
7243 	return skb;
7244 }
7245 
7246 static struct sk_buff *
ath10k_wmi_op_gen_vdev_up(struct ath10k * ar,u32 vdev_id,u32 aid,const u8 * bssid)7247 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
7248 			  const u8 *bssid)
7249 {
7250 	struct wmi_vdev_up_cmd *cmd;
7251 	struct sk_buff *skb;
7252 
7253 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7254 	if (!skb)
7255 		return ERR_PTR(-ENOMEM);
7256 
7257 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
7258 	cmd->vdev_id       = __cpu_to_le32(vdev_id);
7259 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
7260 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
7261 
7262 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7263 		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
7264 		   vdev_id, aid, bssid);
7265 	return skb;
7266 }
7267 
7268 static struct sk_buff *
ath10k_wmi_op_gen_vdev_down(struct ath10k * ar,u32 vdev_id)7269 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
7270 {
7271 	struct wmi_vdev_down_cmd *cmd;
7272 	struct sk_buff *skb;
7273 
7274 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7275 	if (!skb)
7276 		return ERR_PTR(-ENOMEM);
7277 
7278 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
7279 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7280 
7281 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7282 		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
7283 	return skb;
7284 }
7285 
7286 static struct sk_buff *
ath10k_wmi_op_gen_vdev_set_param(struct ath10k * ar,u32 vdev_id,u32 param_id,u32 param_value)7287 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
7288 				 u32 param_id, u32 param_value)
7289 {
7290 	struct wmi_vdev_set_param_cmd *cmd;
7291 	struct sk_buff *skb;
7292 
7293 	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
7294 		ath10k_dbg(ar, ATH10K_DBG_WMI,
7295 			   "vdev param %d not supported by firmware\n",
7296 			    param_id);
7297 		return ERR_PTR(-EOPNOTSUPP);
7298 	}
7299 
7300 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7301 	if (!skb)
7302 		return ERR_PTR(-ENOMEM);
7303 
7304 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
7305 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7306 	cmd->param_id    = __cpu_to_le32(param_id);
7307 	cmd->param_value = __cpu_to_le32(param_value);
7308 
7309 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7310 		   "wmi vdev id 0x%x set param %d value %d\n",
7311 		   vdev_id, param_id, param_value);
7312 	return skb;
7313 }
7314 
7315 static struct sk_buff *
ath10k_wmi_op_gen_vdev_install_key(struct ath10k * ar,const struct wmi_vdev_install_key_arg * arg)7316 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
7317 				   const struct wmi_vdev_install_key_arg *arg)
7318 {
7319 	struct wmi_vdev_install_key_cmd *cmd;
7320 	struct sk_buff *skb;
7321 
7322 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
7323 		return ERR_PTR(-EINVAL);
7324 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
7325 		return ERR_PTR(-EINVAL);
7326 
7327 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
7328 	if (!skb)
7329 		return ERR_PTR(-ENOMEM);
7330 
7331 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
7332 	cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
7333 	cmd->key_idx       = __cpu_to_le32(arg->key_idx);
7334 	cmd->key_flags     = __cpu_to_le32(arg->key_flags);
7335 	cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
7336 	cmd->key_len       = __cpu_to_le32(arg->key_len);
7337 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
7338 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
7339 
7340 	if (arg->macaddr)
7341 		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
7342 	if (arg->key_data)
7343 		memcpy(cmd->key_data, arg->key_data, arg->key_len);
7344 
7345 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7346 		   "wmi vdev install key idx %d cipher %d len %d\n",
7347 		   arg->key_idx, arg->key_cipher, arg->key_len);
7348 	return skb;
7349 }
7350 
7351 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k * ar,const struct wmi_vdev_spectral_conf_arg * arg)7352 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
7353 				     const struct wmi_vdev_spectral_conf_arg *arg)
7354 {
7355 	struct wmi_vdev_spectral_conf_cmd *cmd;
7356 	struct sk_buff *skb;
7357 
7358 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7359 	if (!skb)
7360 		return ERR_PTR(-ENOMEM);
7361 
7362 	cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
7363 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7364 	cmd->scan_count = __cpu_to_le32(arg->scan_count);
7365 	cmd->scan_period = __cpu_to_le32(arg->scan_period);
7366 	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
7367 	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
7368 	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
7369 	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
7370 	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
7371 	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
7372 	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
7373 	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
7374 	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
7375 	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
7376 	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
7377 	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
7378 	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
7379 	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
7380 	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
7381 	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
7382 
7383 	return skb;
7384 }
7385 
7386 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k * ar,u32 vdev_id,u32 trigger,u32 enable)7387 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
7388 				       u32 trigger, u32 enable)
7389 {
7390 	struct wmi_vdev_spectral_enable_cmd *cmd;
7391 	struct sk_buff *skb;
7392 
7393 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7394 	if (!skb)
7395 		return ERR_PTR(-ENOMEM);
7396 
7397 	cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
7398 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7399 	cmd->trigger_cmd = __cpu_to_le32(trigger);
7400 	cmd->enable_cmd = __cpu_to_le32(enable);
7401 
7402 	return skb;
7403 }
7404 
7405 static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],enum wmi_peer_type peer_type)7406 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
7407 			      const u8 peer_addr[ETH_ALEN],
7408 			      enum wmi_peer_type peer_type)
7409 {
7410 	struct wmi_peer_create_cmd *cmd;
7411 	struct sk_buff *skb;
7412 
7413 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7414 	if (!skb)
7415 		return ERR_PTR(-ENOMEM);
7416 
7417 	cmd = (struct wmi_peer_create_cmd *)skb->data;
7418 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7419 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7420 	cmd->peer_type = __cpu_to_le32(peer_type);
7421 
7422 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7423 		   "wmi peer create vdev_id %d peer_addr %pM\n",
7424 		   vdev_id, peer_addr);
7425 	return skb;
7426 }
7427 
7428 static struct sk_buff *
ath10k_wmi_op_gen_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN])7429 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
7430 			      const u8 peer_addr[ETH_ALEN])
7431 {
7432 	struct wmi_peer_delete_cmd *cmd;
7433 	struct sk_buff *skb;
7434 
7435 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7436 	if (!skb)
7437 		return ERR_PTR(-ENOMEM);
7438 
7439 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
7440 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7441 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7442 
7443 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7444 		   "wmi peer delete vdev_id %d peer_addr %pM\n",
7445 		   vdev_id, peer_addr);
7446 	return skb;
7447 }
7448 
7449 static struct sk_buff *
ath10k_wmi_op_gen_peer_flush(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],u32 tid_bitmap)7450 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
7451 			     const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
7452 {
7453 	struct wmi_peer_flush_tids_cmd *cmd;
7454 	struct sk_buff *skb;
7455 
7456 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7457 	if (!skb)
7458 		return ERR_PTR(-ENOMEM);
7459 
7460 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
7461 	cmd->vdev_id         = __cpu_to_le32(vdev_id);
7462 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
7463 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7464 
7465 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7466 		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
7467 		   vdev_id, peer_addr, tid_bitmap);
7468 	return skb;
7469 }
7470 
7471 static struct sk_buff *
ath10k_wmi_op_gen_peer_set_param(struct ath10k * ar,u32 vdev_id,const u8 * peer_addr,enum wmi_peer_param param_id,u32 param_value)7472 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
7473 				 const u8 *peer_addr,
7474 				 enum wmi_peer_param param_id,
7475 				 u32 param_value)
7476 {
7477 	struct wmi_peer_set_param_cmd *cmd;
7478 	struct sk_buff *skb;
7479 
7480 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7481 	if (!skb)
7482 		return ERR_PTR(-ENOMEM);
7483 
7484 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
7485 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7486 	cmd->param_id    = __cpu_to_le32(param_id);
7487 	cmd->param_value = __cpu_to_le32(param_value);
7488 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7489 
7490 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7491 		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
7492 		   vdev_id, peer_addr, param_id, param_value);
7493 	return skb;
7494 }
7495 
ath10k_wmi_op_gen_gpio_config(struct ath10k * ar,u32 gpio_num,u32 input,u32 pull_type,u32 intr_mode)7496 static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar,
7497 						     u32 gpio_num, u32 input,
7498 						     u32 pull_type, u32 intr_mode)
7499 {
7500 	struct wmi_gpio_config_cmd *cmd;
7501 	struct sk_buff *skb;
7502 
7503 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7504 	if (!skb)
7505 		return ERR_PTR(-ENOMEM);
7506 
7507 	cmd = (struct wmi_gpio_config_cmd *)skb->data;
7508 	cmd->pull_type = __cpu_to_le32(pull_type);
7509 	cmd->gpio_num = __cpu_to_le32(gpio_num);
7510 	cmd->input = __cpu_to_le32(input);
7511 	cmd->intr_mode = __cpu_to_le32(intr_mode);
7512 
7513 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n",
7514 		   gpio_num, input, pull_type, intr_mode);
7515 
7516 	return skb;
7517 }
7518 
ath10k_wmi_op_gen_gpio_output(struct ath10k * ar,u32 gpio_num,u32 set)7519 static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar,
7520 						     u32 gpio_num, u32 set)
7521 {
7522 	struct wmi_gpio_output_cmd *cmd;
7523 	struct sk_buff *skb;
7524 
7525 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7526 	if (!skb)
7527 		return ERR_PTR(-ENOMEM);
7528 
7529 	cmd = (struct wmi_gpio_output_cmd *)skb->data;
7530 	cmd->gpio_num = __cpu_to_le32(gpio_num);
7531 	cmd->set = __cpu_to_le32(set);
7532 
7533 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n",
7534 		   gpio_num, set);
7535 
7536 	return skb;
7537 }
7538 
7539 static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k * ar,u32 vdev_id,enum wmi_sta_ps_mode psmode)7540 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
7541 			     enum wmi_sta_ps_mode psmode)
7542 {
7543 	struct wmi_sta_powersave_mode_cmd *cmd;
7544 	struct sk_buff *skb;
7545 
7546 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7547 	if (!skb)
7548 		return ERR_PTR(-ENOMEM);
7549 
7550 	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
7551 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7552 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
7553 
7554 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7555 		   "wmi set powersave id 0x%x mode %d\n",
7556 		   vdev_id, psmode);
7557 	return skb;
7558 }
7559 
7560 static struct sk_buff *
ath10k_wmi_op_gen_set_sta_ps(struct ath10k * ar,u32 vdev_id,enum wmi_sta_powersave_param param_id,u32 value)7561 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
7562 			     enum wmi_sta_powersave_param param_id,
7563 			     u32 value)
7564 {
7565 	struct wmi_sta_powersave_param_cmd *cmd;
7566 	struct sk_buff *skb;
7567 
7568 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7569 	if (!skb)
7570 		return ERR_PTR(-ENOMEM);
7571 
7572 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
7573 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7574 	cmd->param_id    = __cpu_to_le32(param_id);
7575 	cmd->param_value = __cpu_to_le32(value);
7576 
7577 	ath10k_dbg(ar, ATH10K_DBG_STA,
7578 		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
7579 		   vdev_id, param_id, value);
7580 	return skb;
7581 }
7582 
7583 static struct sk_buff *
ath10k_wmi_op_gen_set_ap_ps(struct ath10k * ar,u32 vdev_id,const u8 * mac,enum wmi_ap_ps_peer_param param_id,u32 value)7584 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7585 			    enum wmi_ap_ps_peer_param param_id, u32 value)
7586 {
7587 	struct wmi_ap_ps_peer_cmd *cmd;
7588 	struct sk_buff *skb;
7589 
7590 	if (!mac)
7591 		return ERR_PTR(-EINVAL);
7592 
7593 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7594 	if (!skb)
7595 		return ERR_PTR(-ENOMEM);
7596 
7597 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
7598 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7599 	cmd->param_id = __cpu_to_le32(param_id);
7600 	cmd->param_value = __cpu_to_le32(value);
7601 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
7602 
7603 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7604 		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
7605 		   vdev_id, param_id, value, mac);
7606 	return skb;
7607 }
7608 
7609 static struct sk_buff *
ath10k_wmi_op_gen_scan_chan_list(struct ath10k * ar,const struct wmi_scan_chan_list_arg * arg)7610 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
7611 				 const struct wmi_scan_chan_list_arg *arg)
7612 {
7613 	struct wmi_scan_chan_list_cmd *cmd;
7614 	struct sk_buff *skb;
7615 	struct wmi_channel_arg *ch;
7616 	struct wmi_channel *ci;
7617 	int i;
7618 
7619 	skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
7620 	if (!skb)
7621 		return ERR_PTR(-EINVAL);
7622 
7623 	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
7624 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
7625 
7626 	for (i = 0; i < arg->n_channels; i++) {
7627 		ch = &arg->channels[i];
7628 		ci = &cmd->chan_info[i];
7629 
7630 		ath10k_wmi_put_wmi_channel(ar, ci, ch);
7631 	}
7632 
7633 	return skb;
7634 }
7635 
7636 static void
ath10k_wmi_peer_assoc_fill(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7637 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
7638 			   const struct wmi_peer_assoc_complete_arg *arg)
7639 {
7640 	struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
7641 
7642 	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
7643 	cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
7644 	cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
7645 	cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
7646 	cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
7647 	cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
7648 	cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
7649 	cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
7650 	cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
7651 	cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
7652 	cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
7653 	cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
7654 	cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
7655 
7656 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
7657 
7658 	cmd->peer_legacy_rates.num_rates =
7659 		__cpu_to_le32(arg->peer_legacy_rates.num_rates);
7660 	memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
7661 	       arg->peer_legacy_rates.num_rates);
7662 
7663 	cmd->peer_ht_rates.num_rates =
7664 		__cpu_to_le32(arg->peer_ht_rates.num_rates);
7665 	memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
7666 	       arg->peer_ht_rates.num_rates);
7667 
7668 	cmd->peer_vht_rates.rx_max_rate =
7669 		__cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
7670 	cmd->peer_vht_rates.rx_mcs_set =
7671 		__cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
7672 	cmd->peer_vht_rates.tx_max_rate =
7673 		__cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
7674 	cmd->peer_vht_rates.tx_mcs_set =
7675 		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
7676 }
7677 
7678 static void
ath10k_wmi_peer_assoc_fill_main(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7679 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
7680 				const struct wmi_peer_assoc_complete_arg *arg)
7681 {
7682 	struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
7683 
7684 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7685 	memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
7686 }
7687 
7688 static void
ath10k_wmi_peer_assoc_fill_10_1(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7689 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
7690 				const struct wmi_peer_assoc_complete_arg *arg)
7691 {
7692 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7693 }
7694 
7695 static void
ath10k_wmi_peer_assoc_fill_10_2(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7696 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
7697 				const struct wmi_peer_assoc_complete_arg *arg)
7698 {
7699 	struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
7700 	int max_mcs, max_nss;
7701 	u32 info0;
7702 
7703 	/* TODO: Is using max values okay with firmware? */
7704 	max_mcs = 0xf;
7705 	max_nss = 0xf;
7706 
7707 	info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
7708 		SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
7709 
7710 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7711 	cmd->info0 = __cpu_to_le32(info0);
7712 }
7713 
7714 static void
ath10k_wmi_peer_assoc_fill_10_4(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7715 ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
7716 				const struct wmi_peer_assoc_complete_arg *arg)
7717 {
7718 	struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
7719 
7720 	ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
7721 	cmd->peer_bw_rxnss_override =
7722 		__cpu_to_le32(arg->peer_bw_rxnss_override);
7723 }
7724 
7725 static int
ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg * arg)7726 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
7727 {
7728 	if (arg->peer_mpdu_density > 16)
7729 		return -EINVAL;
7730 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
7731 		return -EINVAL;
7732 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
7733 		return -EINVAL;
7734 
7735 	return 0;
7736 }
7737 
7738 static struct sk_buff *
ath10k_wmi_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7739 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
7740 			     const struct wmi_peer_assoc_complete_arg *arg)
7741 {
7742 	size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
7743 	struct sk_buff *skb;
7744 	int ret;
7745 
7746 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7747 	if (ret)
7748 		return ERR_PTR(ret);
7749 
7750 	skb = ath10k_wmi_alloc_skb(ar, len);
7751 	if (!skb)
7752 		return ERR_PTR(-ENOMEM);
7753 
7754 	ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
7755 
7756 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7757 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7758 		   arg->vdev_id, arg->addr,
7759 		   arg->peer_reassoc ? "reassociate" : "new");
7760 	return skb;
7761 }
7762 
7763 static struct sk_buff *
ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7764 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
7765 				  const struct wmi_peer_assoc_complete_arg *arg)
7766 {
7767 	size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
7768 	struct sk_buff *skb;
7769 	int ret;
7770 
7771 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7772 	if (ret)
7773 		return ERR_PTR(ret);
7774 
7775 	skb = ath10k_wmi_alloc_skb(ar, len);
7776 	if (!skb)
7777 		return ERR_PTR(-ENOMEM);
7778 
7779 	ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
7780 
7781 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7782 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7783 		   arg->vdev_id, arg->addr,
7784 		   arg->peer_reassoc ? "reassociate" : "new");
7785 	return skb;
7786 }
7787 
7788 static struct sk_buff *
ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7789 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
7790 				  const struct wmi_peer_assoc_complete_arg *arg)
7791 {
7792 	size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
7793 	struct sk_buff *skb;
7794 	int ret;
7795 
7796 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7797 	if (ret)
7798 		return ERR_PTR(ret);
7799 
7800 	skb = ath10k_wmi_alloc_skb(ar, len);
7801 	if (!skb)
7802 		return ERR_PTR(-ENOMEM);
7803 
7804 	ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
7805 
7806 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7807 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7808 		   arg->vdev_id, arg->addr,
7809 		   arg->peer_reassoc ? "reassociate" : "new");
7810 	return skb;
7811 }
7812 
7813 static struct sk_buff *
ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7814 ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
7815 				  const struct wmi_peer_assoc_complete_arg *arg)
7816 {
7817 	size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
7818 	struct sk_buff *skb;
7819 	int ret;
7820 
7821 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7822 	if (ret)
7823 		return ERR_PTR(ret);
7824 
7825 	skb = ath10k_wmi_alloc_skb(ar, len);
7826 	if (!skb)
7827 		return ERR_PTR(-ENOMEM);
7828 
7829 	ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
7830 
7831 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7832 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7833 		   arg->vdev_id, arg->addr,
7834 		   arg->peer_reassoc ? "reassociate" : "new");
7835 	return skb;
7836 }
7837 
7838 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k * ar)7839 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
7840 {
7841 	struct sk_buff *skb;
7842 
7843 	skb = ath10k_wmi_alloc_skb(ar, 0);
7844 	if (!skb)
7845 		return ERR_PTR(-ENOMEM);
7846 
7847 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
7848 	return skb;
7849 }
7850 
7851 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k * ar,enum wmi_bss_survey_req_type type)7852 ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
7853 					  enum wmi_bss_survey_req_type type)
7854 {
7855 	struct wmi_pdev_chan_info_req_cmd *cmd;
7856 	struct sk_buff *skb;
7857 
7858 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7859 	if (!skb)
7860 		return ERR_PTR(-ENOMEM);
7861 
7862 	cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
7863 	cmd->type = __cpu_to_le32(type);
7864 
7865 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7866 		   "wmi pdev bss info request type %d\n", type);
7867 
7868 	return skb;
7869 }
7870 
7871 /* This function assumes the beacon is already DMA mapped */
7872 static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k * ar,u32 vdev_id,const void * bcn,size_t bcn_len,u32 bcn_paddr,bool dtim_zero,bool deliver_cab)7873 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
7874 			     size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
7875 			     bool deliver_cab)
7876 {
7877 	struct wmi_bcn_tx_ref_cmd *cmd;
7878 	struct sk_buff *skb;
7879 	struct ieee80211_hdr *hdr;
7880 	u16 fc;
7881 
7882 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7883 	if (!skb)
7884 		return ERR_PTR(-ENOMEM);
7885 
7886 	hdr = (struct ieee80211_hdr *)bcn;
7887 	fc = le16_to_cpu(hdr->frame_control);
7888 
7889 	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
7890 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7891 	cmd->data_len = __cpu_to_le32(bcn_len);
7892 	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
7893 	cmd->msdu_id = 0;
7894 	cmd->frame_control = __cpu_to_le32(fc);
7895 	cmd->flags = 0;
7896 	cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
7897 
7898 	if (dtim_zero)
7899 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
7900 
7901 	if (deliver_cab)
7902 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
7903 
7904 	return skb;
7905 }
7906 
ath10k_wmi_set_wmm_param(struct wmi_wmm_params * params,const struct wmi_wmm_params_arg * arg)7907 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
7908 			      const struct wmi_wmm_params_arg *arg)
7909 {
7910 	params->cwmin  = __cpu_to_le32(arg->cwmin);
7911 	params->cwmax  = __cpu_to_le32(arg->cwmax);
7912 	params->aifs   = __cpu_to_le32(arg->aifs);
7913 	params->txop   = __cpu_to_le32(arg->txop);
7914 	params->acm    = __cpu_to_le32(arg->acm);
7915 	params->no_ack = __cpu_to_le32(arg->no_ack);
7916 }
7917 
7918 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k * ar,const struct wmi_wmm_params_all_arg * arg)7919 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
7920 			       const struct wmi_wmm_params_all_arg *arg)
7921 {
7922 	struct wmi_pdev_set_wmm_params *cmd;
7923 	struct sk_buff *skb;
7924 
7925 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7926 	if (!skb)
7927 		return ERR_PTR(-ENOMEM);
7928 
7929 	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
7930 	ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
7931 	ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
7932 	ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
7933 	ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
7934 
7935 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
7936 	return skb;
7937 }
7938 
7939 static struct sk_buff *
ath10k_wmi_op_gen_request_stats(struct ath10k * ar,u32 stats_mask)7940 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
7941 {
7942 	struct wmi_request_stats_cmd *cmd;
7943 	struct sk_buff *skb;
7944 
7945 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7946 	if (!skb)
7947 		return ERR_PTR(-ENOMEM);
7948 
7949 	cmd = (struct wmi_request_stats_cmd *)skb->data;
7950 	cmd->stats_id = __cpu_to_le32(stats_mask);
7951 
7952 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
7953 		   stats_mask);
7954 	return skb;
7955 }
7956 
7957 static struct sk_buff *
ath10k_wmi_op_gen_force_fw_hang(struct ath10k * ar,enum wmi_force_fw_hang_type type,u32 delay_ms)7958 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
7959 				enum wmi_force_fw_hang_type type, u32 delay_ms)
7960 {
7961 	struct wmi_force_fw_hang_cmd *cmd;
7962 	struct sk_buff *skb;
7963 
7964 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7965 	if (!skb)
7966 		return ERR_PTR(-ENOMEM);
7967 
7968 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
7969 	cmd->type = __cpu_to_le32(type);
7970 	cmd->delay_ms = __cpu_to_le32(delay_ms);
7971 
7972 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
7973 		   type, delay_ms);
7974 	return skb;
7975 }
7976 
7977 static struct sk_buff *
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7978 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7979 			     u32 log_level)
7980 {
7981 	struct wmi_dbglog_cfg_cmd *cmd;
7982 	struct sk_buff *skb;
7983 	u32 cfg;
7984 
7985 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7986 	if (!skb)
7987 		return ERR_PTR(-ENOMEM);
7988 
7989 	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
7990 
7991 	if (module_enable) {
7992 		cfg = SM(log_level,
7993 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7994 	} else {
7995 		/* set back defaults, all modules with WARN level */
7996 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7997 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7998 		module_enable = ~0;
7999 	}
8000 
8001 	cmd->module_enable = __cpu_to_le32(module_enable);
8002 	cmd->module_valid = __cpu_to_le32(~0);
8003 	cmd->config_enable = __cpu_to_le32(cfg);
8004 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
8005 
8006 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8007 		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
8008 		   __le32_to_cpu(cmd->module_enable),
8009 		   __le32_to_cpu(cmd->module_valid),
8010 		   __le32_to_cpu(cmd->config_enable),
8011 		   __le32_to_cpu(cmd->config_valid));
8012 	return skb;
8013 }
8014 
8015 static struct sk_buff *
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)8016 ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
8017 				  u32 log_level)
8018 {
8019 	struct wmi_10_4_dbglog_cfg_cmd *cmd;
8020 	struct sk_buff *skb;
8021 	u32 cfg;
8022 
8023 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8024 	if (!skb)
8025 		return ERR_PTR(-ENOMEM);
8026 
8027 	cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
8028 
8029 	if (module_enable) {
8030 		cfg = SM(log_level,
8031 			 ATH10K_DBGLOG_CFG_LOG_LVL);
8032 	} else {
8033 		/* set back defaults, all modules with WARN level */
8034 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
8035 			 ATH10K_DBGLOG_CFG_LOG_LVL);
8036 		module_enable = ~0;
8037 	}
8038 
8039 	cmd->module_enable = __cpu_to_le64(module_enable);
8040 	cmd->module_valid = __cpu_to_le64(~0);
8041 	cmd->config_enable = __cpu_to_le32(cfg);
8042 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
8043 
8044 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8045 		   "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
8046 		   __le64_to_cpu(cmd->module_enable),
8047 		   __le64_to_cpu(cmd->module_valid),
8048 		   __le32_to_cpu(cmd->config_enable),
8049 		   __le32_to_cpu(cmd->config_valid));
8050 	return skb;
8051 }
8052 
8053 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k * ar,u32 ev_bitmap)8054 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
8055 {
8056 	struct wmi_pdev_pktlog_enable_cmd *cmd;
8057 	struct sk_buff *skb;
8058 
8059 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8060 	if (!skb)
8061 		return ERR_PTR(-ENOMEM);
8062 
8063 	ev_bitmap &= ATH10K_PKTLOG_ANY;
8064 
8065 	cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
8066 	cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
8067 
8068 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
8069 		   ev_bitmap);
8070 	return skb;
8071 }
8072 
8073 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_disable(struct ath10k * ar)8074 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
8075 {
8076 	struct sk_buff *skb;
8077 
8078 	skb = ath10k_wmi_alloc_skb(ar, 0);
8079 	if (!skb)
8080 		return ERR_PTR(-ENOMEM);
8081 
8082 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
8083 	return skb;
8084 }
8085 
8086 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k * ar,u32 period,u32 duration,u32 next_offset,u32 enabled)8087 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
8088 				      u32 duration, u32 next_offset,
8089 				      u32 enabled)
8090 {
8091 	struct wmi_pdev_set_quiet_cmd *cmd;
8092 	struct sk_buff *skb;
8093 
8094 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8095 	if (!skb)
8096 		return ERR_PTR(-ENOMEM);
8097 
8098 	cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
8099 	cmd->period = __cpu_to_le32(period);
8100 	cmd->duration = __cpu_to_le32(duration);
8101 	cmd->next_start = __cpu_to_le32(next_offset);
8102 	cmd->enabled = __cpu_to_le32(enabled);
8103 
8104 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8105 		   "wmi quiet param: period %u duration %u enabled %d\n",
8106 		   period, duration, enabled);
8107 	return skb;
8108 }
8109 
8110 static struct sk_buff *
ath10k_wmi_op_gen_addba_clear_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac)8111 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
8112 				   const u8 *mac)
8113 {
8114 	struct wmi_addba_clear_resp_cmd *cmd;
8115 	struct sk_buff *skb;
8116 
8117 	if (!mac)
8118 		return ERR_PTR(-EINVAL);
8119 
8120 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8121 	if (!skb)
8122 		return ERR_PTR(-ENOMEM);
8123 
8124 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
8125 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8126 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8127 
8128 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8129 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
8130 		   vdev_id, mac);
8131 	return skb;
8132 }
8133 
8134 static struct sk_buff *
ath10k_wmi_op_gen_addba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)8135 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8136 			     u32 tid, u32 buf_size)
8137 {
8138 	struct wmi_addba_send_cmd *cmd;
8139 	struct sk_buff *skb;
8140 
8141 	if (!mac)
8142 		return ERR_PTR(-EINVAL);
8143 
8144 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8145 	if (!skb)
8146 		return ERR_PTR(-ENOMEM);
8147 
8148 	cmd = (struct wmi_addba_send_cmd *)skb->data;
8149 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8150 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8151 	cmd->tid = __cpu_to_le32(tid);
8152 	cmd->buffersize = __cpu_to_le32(buf_size);
8153 
8154 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8155 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
8156 		   vdev_id, mac, tid, buf_size);
8157 	return skb;
8158 }
8159 
8160 static struct sk_buff *
ath10k_wmi_op_gen_addba_set_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)8161 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8162 				 u32 tid, u32 status)
8163 {
8164 	struct wmi_addba_setresponse_cmd *cmd;
8165 	struct sk_buff *skb;
8166 
8167 	if (!mac)
8168 		return ERR_PTR(-EINVAL);
8169 
8170 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8171 	if (!skb)
8172 		return ERR_PTR(-ENOMEM);
8173 
8174 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
8175 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8176 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8177 	cmd->tid = __cpu_to_le32(tid);
8178 	cmd->statuscode = __cpu_to_le32(status);
8179 
8180 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8181 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
8182 		   vdev_id, mac, tid, status);
8183 	return skb;
8184 }
8185 
8186 static struct sk_buff *
ath10k_wmi_op_gen_delba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)8187 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8188 			     u32 tid, u32 initiator, u32 reason)
8189 {
8190 	struct wmi_delba_send_cmd *cmd;
8191 	struct sk_buff *skb;
8192 
8193 	if (!mac)
8194 		return ERR_PTR(-EINVAL);
8195 
8196 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8197 	if (!skb)
8198 		return ERR_PTR(-ENOMEM);
8199 
8200 	cmd = (struct wmi_delba_send_cmd *)skb->data;
8201 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8202 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8203 	cmd->tid = __cpu_to_le32(tid);
8204 	cmd->initiator = __cpu_to_le32(initiator);
8205 	cmd->reasoncode = __cpu_to_le32(reason);
8206 
8207 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8208 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
8209 		   vdev_id, mac, tid, initiator, reason);
8210 	return skb;
8211 }
8212 
8213 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k * ar,u32 param)8214 ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
8215 {
8216 	struct wmi_pdev_get_tpc_config_cmd *cmd;
8217 	struct sk_buff *skb;
8218 
8219 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8220 	if (!skb)
8221 		return ERR_PTR(-ENOMEM);
8222 
8223 	cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
8224 	cmd->param = __cpu_to_le32(param);
8225 
8226 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8227 		   "wmi pdev get tpc config param %d\n", param);
8228 	return skb;
8229 }
8230 
8231 static void
ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8232 ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8233 				   char *buf, u32 *length)
8234 {
8235 	u32 len = *length;
8236 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8237 
8238 	len += scnprintf(buf + len, buf_len - len, "\n");
8239 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
8240 			"ath10k PDEV stats");
8241 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8242 			"=================");
8243 
8244 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8245 			"Channel noise floor", pdev->ch_noise_floor);
8246 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8247 			"Channel TX power", pdev->chan_tx_power);
8248 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8249 			"TX frame count", pdev->tx_frame_count);
8250 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8251 			"RX frame count", pdev->rx_frame_count);
8252 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8253 			"RX clear count", pdev->rx_clear_count);
8254 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8255 			"Cycle count", pdev->cycle_count);
8256 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8257 			"PHY error count", pdev->phy_err_count);
8258 
8259 	*length = len;
8260 }
8261 
8262 static void
ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8263 ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8264 				    char *buf, u32 *length)
8265 {
8266 	u32 len = *length;
8267 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8268 
8269 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8270 			"RTS bad count", pdev->rts_bad);
8271 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8272 			"RTS good count", pdev->rts_good);
8273 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8274 			"FCS bad count", pdev->fcs_bad);
8275 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8276 			"No beacon count", pdev->no_beacons);
8277 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8278 			"MIB int count", pdev->mib_int_count);
8279 
8280 	len += scnprintf(buf + len, buf_len - len, "\n");
8281 	*length = len;
8282 }
8283 
8284 static void
ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8285 ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8286 				 char *buf, u32 *length)
8287 {
8288 	u32 len = *length;
8289 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8290 
8291 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8292 			 "ath10k PDEV TX stats");
8293 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8294 				 "=================");
8295 
8296 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8297 			 "HTT cookies queued", pdev->comp_queued);
8298 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8299 			 "HTT cookies disp.", pdev->comp_delivered);
8300 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8301 			 "MSDU queued", pdev->msdu_enqued);
8302 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8303 			 "MPDU queued", pdev->mpdu_enqued);
8304 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8305 			 "MSDUs dropped", pdev->wmm_drop);
8306 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8307 			 "Local enqued", pdev->local_enqued);
8308 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8309 			 "Local freed", pdev->local_freed);
8310 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8311 			 "HW queued", pdev->hw_queued);
8312 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8313 			 "PPDUs reaped", pdev->hw_reaped);
8314 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8315 			 "Num underruns", pdev->underrun);
8316 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8317 			 "PPDUs cleaned", pdev->tx_abort);
8318 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8319 			 "MPDUs requeued", pdev->mpdus_requeued);
8320 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8321 			 "Excessive retries", pdev->tx_ko);
8322 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8323 			 "HW rate", pdev->data_rc);
8324 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8325 			 "Sched self triggers", pdev->self_triggers);
8326 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8327 			 "Dropped due to SW retries",
8328 			 pdev->sw_retry_failure);
8329 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8330 			 "Illegal rate phy errors",
8331 			 pdev->illgl_rate_phy_err);
8332 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8333 			 "Pdev continuous xretry", pdev->pdev_cont_xretry);
8334 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8335 			 "TX timeout", pdev->pdev_tx_timeout);
8336 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8337 			 "PDEV resets", pdev->pdev_resets);
8338 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8339 			 "PHY underrun", pdev->phy_underrun);
8340 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8341 			 "MPDU is more than txop limit", pdev->txop_ovf);
8342 	*length = len;
8343 }
8344 
8345 static void
ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8346 ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8347 				 char *buf, u32 *length)
8348 {
8349 	u32 len = *length;
8350 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8351 
8352 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8353 			 "ath10k PDEV RX stats");
8354 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8355 				 "=================");
8356 
8357 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8358 			 "Mid PPDU route change",
8359 			 pdev->mid_ppdu_route_change);
8360 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8361 			 "Tot. number of statuses", pdev->status_rcvd);
8362 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8363 			 "Extra frags on rings 0", pdev->r0_frags);
8364 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8365 			 "Extra frags on rings 1", pdev->r1_frags);
8366 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8367 			 "Extra frags on rings 2", pdev->r2_frags);
8368 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8369 			 "Extra frags on rings 3", pdev->r3_frags);
8370 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8371 			 "MSDUs delivered to HTT", pdev->htt_msdus);
8372 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8373 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
8374 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8375 			 "MSDUs delivered to stack", pdev->loc_msdus);
8376 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8377 			 "MPDUs delivered to stack", pdev->loc_mpdus);
8378 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8379 			 "Oversized AMSDUs", pdev->oversize_amsdu);
8380 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8381 			 "PHY errors", pdev->phy_errs);
8382 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8383 			 "PHY errors drops", pdev->phy_err_drop);
8384 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8385 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8386 	*length = len;
8387 }
8388 
8389 static void
ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev * vdev,char * buf,u32 * length)8390 ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
8391 			      char *buf, u32 *length)
8392 {
8393 	u32 len = *length;
8394 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8395 	int i;
8396 
8397 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8398 			"vdev id", vdev->vdev_id);
8399 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8400 			"beacon snr", vdev->beacon_snr);
8401 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8402 			"data snr", vdev->data_snr);
8403 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8404 			"num rx frames", vdev->num_rx_frames);
8405 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8406 			"num rts fail", vdev->num_rts_fail);
8407 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8408 			"num rts success", vdev->num_rts_success);
8409 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8410 			"num rx err", vdev->num_rx_err);
8411 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8412 			"num rx discard", vdev->num_rx_discard);
8413 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8414 			"num tx not acked", vdev->num_tx_not_acked);
8415 
8416 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
8417 		len += scnprintf(buf + len, buf_len - len,
8418 				"%25s [%02d] %u\n",
8419 				"num tx frames", i,
8420 				vdev->num_tx_frames[i]);
8421 
8422 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
8423 		len += scnprintf(buf + len, buf_len - len,
8424 				"%25s [%02d] %u\n",
8425 				"num tx frames retries", i,
8426 				vdev->num_tx_frames_retries[i]);
8427 
8428 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
8429 		len += scnprintf(buf + len, buf_len - len,
8430 				"%25s [%02d] %u\n",
8431 				"num tx frames failures", i,
8432 				vdev->num_tx_frames_failures[i]);
8433 
8434 	for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
8435 		len += scnprintf(buf + len, buf_len - len,
8436 				"%25s [%02d] 0x%08x\n",
8437 				"tx rate history", i,
8438 				vdev->tx_rate_history[i]);
8439 
8440 	for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
8441 		len += scnprintf(buf + len, buf_len - len,
8442 				"%25s [%02d] %u\n",
8443 				"beacon rssi history", i,
8444 				vdev->beacon_rssi_history[i]);
8445 
8446 	len += scnprintf(buf + len, buf_len - len, "\n");
8447 	*length = len;
8448 }
8449 
8450 static void
ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer * peer,char * buf,u32 * length,bool extended_peer)8451 ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
8452 			      char *buf, u32 *length, bool extended_peer)
8453 {
8454 	u32 len = *length;
8455 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8456 
8457 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8458 			"Peer MAC address", peer->peer_macaddr);
8459 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8460 			"Peer RSSI", peer->peer_rssi);
8461 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8462 			"Peer TX rate", peer->peer_tx_rate);
8463 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8464 			"Peer RX rate", peer->peer_rx_rate);
8465 	if (!extended_peer)
8466 		len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8467 				"Peer RX duration", peer->rx_duration);
8468 
8469 	len += scnprintf(buf + len, buf_len - len, "\n");
8470 	*length = len;
8471 }
8472 
8473 static void
ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer * peer,char * buf,u32 * length)8474 ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
8475 				   char *buf, u32 *length)
8476 {
8477 	u32 len = *length;
8478 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8479 
8480 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8481 			"Peer MAC address", peer->peer_macaddr);
8482 	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8483 			"Peer RX duration", peer->rx_duration);
8484 }
8485 
ath10k_wmi_main_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8486 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
8487 				      struct ath10k_fw_stats *fw_stats,
8488 				      char *buf)
8489 {
8490 	u32 len = 0;
8491 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8492 	const struct ath10k_fw_stats_pdev *pdev;
8493 	const struct ath10k_fw_stats_vdev *vdev;
8494 	const struct ath10k_fw_stats_peer *peer;
8495 	size_t num_peers;
8496 	size_t num_vdevs;
8497 
8498 	spin_lock_bh(&ar->data_lock);
8499 
8500 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8501 					struct ath10k_fw_stats_pdev, list);
8502 	if (!pdev) {
8503 		ath10k_warn(ar, "failed to get pdev stats\n");
8504 		goto unlock;
8505 	}
8506 
8507 	num_peers = list_count_nodes(&fw_stats->peers);
8508 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8509 
8510 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8511 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8512 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8513 
8514 	len += scnprintf(buf + len, buf_len - len, "\n");
8515 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8516 			 "ath10k VDEV stats", num_vdevs);
8517 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8518 				 "=================");
8519 
8520 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8521 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8522 	}
8523 
8524 	len += scnprintf(buf + len, buf_len - len, "\n");
8525 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8526 			 "ath10k PEER stats", num_peers);
8527 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8528 				 "=================");
8529 
8530 	list_for_each_entry(peer, &fw_stats->peers, list) {
8531 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8532 					      fw_stats->extended);
8533 	}
8534 
8535 unlock:
8536 	spin_unlock_bh(&ar->data_lock);
8537 
8538 	if (len >= buf_len)
8539 		buf[len - 1] = 0;
8540 	else
8541 		buf[len] = 0;
8542 }
8543 
ath10k_wmi_10x_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8544 void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
8545 				     struct ath10k_fw_stats *fw_stats,
8546 				     char *buf)
8547 {
8548 	unsigned int len = 0;
8549 	unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
8550 	const struct ath10k_fw_stats_pdev *pdev;
8551 	const struct ath10k_fw_stats_vdev *vdev;
8552 	const struct ath10k_fw_stats_peer *peer;
8553 	size_t num_peers;
8554 	size_t num_vdevs;
8555 
8556 	spin_lock_bh(&ar->data_lock);
8557 
8558 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8559 					struct ath10k_fw_stats_pdev, list);
8560 	if (!pdev) {
8561 		ath10k_warn(ar, "failed to get pdev stats\n");
8562 		goto unlock;
8563 	}
8564 
8565 	num_peers = list_count_nodes(&fw_stats->peers);
8566 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8567 
8568 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8569 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8570 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8571 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8572 
8573 	len += scnprintf(buf + len, buf_len - len, "\n");
8574 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8575 			 "ath10k VDEV stats", num_vdevs);
8576 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8577 				 "=================");
8578 
8579 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8580 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8581 	}
8582 
8583 	len += scnprintf(buf + len, buf_len - len, "\n");
8584 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8585 			 "ath10k PEER stats", num_peers);
8586 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8587 				 "=================");
8588 
8589 	list_for_each_entry(peer, &fw_stats->peers, list) {
8590 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8591 					      fw_stats->extended);
8592 	}
8593 
8594 unlock:
8595 	spin_unlock_bh(&ar->data_lock);
8596 
8597 	if (len >= buf_len)
8598 		buf[len - 1] = 0;
8599 	else
8600 		buf[len] = 0;
8601 }
8602 
8603 static struct sk_buff *
ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k * ar,u8 enable,u32 detect_level,u32 detect_margin)8604 ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
8605 					   u32 detect_level, u32 detect_margin)
8606 {
8607 	struct wmi_pdev_set_adaptive_cca_params *cmd;
8608 	struct sk_buff *skb;
8609 
8610 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8611 	if (!skb)
8612 		return ERR_PTR(-ENOMEM);
8613 
8614 	cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
8615 	cmd->enable = __cpu_to_le32(enable);
8616 	cmd->cca_detect_level = __cpu_to_le32(detect_level);
8617 	cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
8618 
8619 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8620 		   "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
8621 		   enable, detect_level, detect_margin);
8622 	return skb;
8623 }
8624 
8625 static void
ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd * vdev,char * buf,u32 * length)8626 ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
8627 				   char *buf, u32 *length)
8628 {
8629 	u32 len = *length;
8630 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8631 	u32 val;
8632 
8633 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8634 			 "vdev id", vdev->vdev_id);
8635 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8636 			 "ppdu aggr count", vdev->ppdu_aggr_cnt);
8637 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8638 			 "ppdu noack", vdev->ppdu_noack);
8639 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8640 			 "mpdu queued", vdev->mpdu_queued);
8641 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8642 			 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
8643 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8644 			 "mpdu sw requeued", vdev->mpdu_sw_requeued);
8645 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8646 			 "mpdu success retry", vdev->mpdu_suc_retry);
8647 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8648 			 "mpdu success multitry", vdev->mpdu_suc_multitry);
8649 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8650 			 "mpdu fail retry", vdev->mpdu_fail_retry);
8651 	val = vdev->tx_ftm_suc;
8652 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8653 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8654 				 "tx ftm success",
8655 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8656 	val = vdev->tx_ftm_suc_retry;
8657 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8658 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8659 				 "tx ftm success retry",
8660 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8661 	val = vdev->tx_ftm_fail;
8662 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8663 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8664 				 "tx ftm fail",
8665 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8666 	val = vdev->rx_ftmr_cnt;
8667 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8668 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8669 				 "rx ftm request count",
8670 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8671 	val = vdev->rx_ftmr_dup_cnt;
8672 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8673 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8674 				 "rx ftm request dup count",
8675 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8676 	val = vdev->rx_iftmr_cnt;
8677 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8678 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8679 				 "rx initial ftm req count",
8680 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8681 	val = vdev->rx_iftmr_dup_cnt;
8682 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8683 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8684 				 "rx initial ftm req dup cnt",
8685 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8686 	len += scnprintf(buf + len, buf_len - len, "\n");
8687 
8688 	*length = len;
8689 }
8690 
ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8691 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
8692 				      struct ath10k_fw_stats *fw_stats,
8693 				      char *buf)
8694 {
8695 	u32 len = 0;
8696 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8697 	const struct ath10k_fw_stats_pdev *pdev;
8698 	const struct ath10k_fw_stats_vdev_extd *vdev;
8699 	const struct ath10k_fw_stats_peer *peer;
8700 	const struct ath10k_fw_extd_stats_peer *extd_peer;
8701 	size_t num_peers;
8702 	size_t num_vdevs;
8703 
8704 	spin_lock_bh(&ar->data_lock);
8705 
8706 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8707 					struct ath10k_fw_stats_pdev, list);
8708 	if (!pdev) {
8709 		ath10k_warn(ar, "failed to get pdev stats\n");
8710 		goto unlock;
8711 	}
8712 
8713 	num_peers = list_count_nodes(&fw_stats->peers);
8714 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8715 
8716 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8717 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8718 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8719 
8720 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8721 			"HW paused", pdev->hw_paused);
8722 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8723 			"Seqs posted", pdev->seq_posted);
8724 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8725 			"Seqs failed queueing", pdev->seq_failed_queueing);
8726 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8727 			"Seqs completed", pdev->seq_completed);
8728 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8729 			"Seqs restarted", pdev->seq_restarted);
8730 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8731 			"MU Seqs posted", pdev->mu_seq_posted);
8732 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8733 			"MPDUs SW flushed", pdev->mpdus_sw_flush);
8734 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8735 			"MPDUs HW filtered", pdev->mpdus_hw_filter);
8736 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8737 			"MPDUs truncated", pdev->mpdus_truncated);
8738 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8739 			"MPDUs receive no ACK", pdev->mpdus_ack_failed);
8740 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8741 			"MPDUs expired", pdev->mpdus_expired);
8742 
8743 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8744 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8745 			"Num Rx Overflow errors", pdev->rx_ovfl_errs);
8746 
8747 	len += scnprintf(buf + len, buf_len - len, "\n");
8748 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8749 			"ath10k VDEV stats", num_vdevs);
8750 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8751 				"=================");
8752 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8753 		ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
8754 	}
8755 
8756 	len += scnprintf(buf + len, buf_len - len, "\n");
8757 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8758 			"ath10k PEER stats", num_peers);
8759 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8760 				"=================");
8761 
8762 	list_for_each_entry(peer, &fw_stats->peers, list) {
8763 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8764 					      fw_stats->extended);
8765 	}
8766 
8767 	if (fw_stats->extended) {
8768 		list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
8769 			ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
8770 							   &len);
8771 		}
8772 	}
8773 
8774 unlock:
8775 	spin_unlock_bh(&ar->data_lock);
8776 
8777 	if (len >= buf_len)
8778 		buf[len - 1] = 0;
8779 	else
8780 		buf[len] = 0;
8781 }
8782 
ath10k_wmi_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8783 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
8784 				   enum wmi_vdev_subtype subtype)
8785 {
8786 	switch (subtype) {
8787 	case WMI_VDEV_SUBTYPE_NONE:
8788 		return WMI_VDEV_SUBTYPE_LEGACY_NONE;
8789 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8790 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
8791 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8792 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
8793 	case WMI_VDEV_SUBTYPE_P2P_GO:
8794 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
8795 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8796 		return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
8797 	case WMI_VDEV_SUBTYPE_MESH_11S:
8798 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8799 		return -EOPNOTSUPP;
8800 	}
8801 	return -EOPNOTSUPP;
8802 }
8803 
ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8804 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
8805 						 enum wmi_vdev_subtype subtype)
8806 {
8807 	switch (subtype) {
8808 	case WMI_VDEV_SUBTYPE_NONE:
8809 		return WMI_VDEV_SUBTYPE_10_2_4_NONE;
8810 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8811 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
8812 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8813 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
8814 	case WMI_VDEV_SUBTYPE_P2P_GO:
8815 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
8816 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8817 		return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
8818 	case WMI_VDEV_SUBTYPE_MESH_11S:
8819 		return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
8820 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8821 		return -EOPNOTSUPP;
8822 	}
8823 	return -EOPNOTSUPP;
8824 }
8825 
ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8826 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
8827 					       enum wmi_vdev_subtype subtype)
8828 {
8829 	switch (subtype) {
8830 	case WMI_VDEV_SUBTYPE_NONE:
8831 		return WMI_VDEV_SUBTYPE_10_4_NONE;
8832 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8833 		return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
8834 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8835 		return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
8836 	case WMI_VDEV_SUBTYPE_P2P_GO:
8837 		return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
8838 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8839 		return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
8840 	case WMI_VDEV_SUBTYPE_MESH_11S:
8841 		return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
8842 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8843 		return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
8844 	}
8845 	return -EOPNOTSUPP;
8846 }
8847 
8848 static struct sk_buff *
ath10k_wmi_10_4_ext_resource_config(struct ath10k * ar,enum wmi_host_platform_type type,u32 fw_feature_bitmap)8849 ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
8850 				    enum wmi_host_platform_type type,
8851 				    u32 fw_feature_bitmap)
8852 {
8853 	struct wmi_ext_resource_config_10_4_cmd *cmd;
8854 	struct sk_buff *skb;
8855 	u32 num_tdls_sleep_sta = 0;
8856 
8857 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8858 	if (!skb)
8859 		return ERR_PTR(-ENOMEM);
8860 
8861 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
8862 		num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
8863 
8864 	cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
8865 	cmd->host_platform_config = __cpu_to_le32(type);
8866 	cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
8867 	cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
8868 	cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
8869 	cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
8870 	cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
8871 	cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
8872 	cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
8873 	cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
8874 	cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
8875 	cmd->max_tdls_concurrent_buffer_sta =
8876 			__cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
8877 
8878 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8879 		   "wmi ext resource config host type %d firmware feature bitmap %08x\n",
8880 		   type, fw_feature_bitmap);
8881 	return skb;
8882 }
8883 
8884 static struct sk_buff *
ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k * ar,u32 vdev_id,enum wmi_tdls_state state)8885 ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
8886 					 enum wmi_tdls_state state)
8887 {
8888 	struct wmi_10_4_tdls_set_state_cmd *cmd;
8889 	struct sk_buff *skb;
8890 	u32 options = 0;
8891 
8892 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8893 	if (!skb)
8894 		return ERR_PTR(-ENOMEM);
8895 
8896 	if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
8897 	    state == WMI_TDLS_ENABLE_ACTIVE)
8898 		state = WMI_TDLS_ENABLE_PASSIVE;
8899 
8900 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8901 		options |= WMI_TDLS_BUFFER_STA_EN;
8902 
8903 	cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
8904 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8905 	cmd->state = __cpu_to_le32(state);
8906 	cmd->notification_interval_ms = __cpu_to_le32(5000);
8907 	cmd->tx_discovery_threshold = __cpu_to_le32(100);
8908 	cmd->tx_teardown_threshold = __cpu_to_le32(5);
8909 	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
8910 	cmd->rssi_delta = __cpu_to_le32(-20);
8911 	cmd->tdls_options = __cpu_to_le32(options);
8912 	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
8913 	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
8914 	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
8915 	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
8916 	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
8917 	cmd->teardown_notification_ms = __cpu_to_le32(10);
8918 	cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
8919 
8920 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
8921 		   state, vdev_id);
8922 	return skb;
8923 }
8924 
ath10k_wmi_prepare_peer_qos(u8 uapsd_queues,u8 sp)8925 static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
8926 {
8927 	u32 peer_qos = 0;
8928 
8929 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
8930 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
8931 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
8932 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
8933 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
8934 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
8935 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
8936 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
8937 
8938 	peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
8939 
8940 	return peer_qos;
8941 }
8942 
8943 static struct sk_buff *
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k * ar,u32 param)8944 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
8945 {
8946 	struct wmi_pdev_get_tpc_table_cmd *cmd;
8947 	struct sk_buff *skb;
8948 
8949 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8950 	if (!skb)
8951 		return ERR_PTR(-ENOMEM);
8952 
8953 	cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
8954 	cmd->param = __cpu_to_le32(param);
8955 
8956 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8957 		   "wmi pdev get tpc table param:%d\n", param);
8958 	return skb;
8959 }
8960 
8961 static struct sk_buff *
ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k * ar,const struct wmi_tdls_peer_update_cmd_arg * arg,const struct wmi_tdls_peer_capab_arg * cap,const struct wmi_channel_arg * chan_arg)8962 ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
8963 				     const struct wmi_tdls_peer_update_cmd_arg *arg,
8964 				     const struct wmi_tdls_peer_capab_arg *cap,
8965 				     const struct wmi_channel_arg *chan_arg)
8966 {
8967 	struct wmi_10_4_tdls_peer_update_cmd *cmd;
8968 	struct wmi_tdls_peer_capabilities *peer_cap;
8969 	struct wmi_channel *chan;
8970 	struct sk_buff *skb;
8971 	u32 peer_qos;
8972 	int len, chan_len;
8973 	int i;
8974 
8975 	/* tdls peer update cmd has place holder for one channel*/
8976 	chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
8977 
8978 	len = sizeof(*cmd) + chan_len * sizeof(*chan);
8979 
8980 	skb = ath10k_wmi_alloc_skb(ar, len);
8981 	if (!skb)
8982 		return ERR_PTR(-ENOMEM);
8983 
8984 	cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
8985 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
8986 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
8987 	cmd->peer_state = __cpu_to_le32(arg->peer_state);
8988 
8989 	peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
8990 					       cap->peer_max_sp);
8991 
8992 	peer_cap = &cmd->peer_capab;
8993 	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
8994 	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
8995 	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
8996 	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
8997 	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
8998 	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
8999 	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
9000 
9001 	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
9002 		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
9003 
9004 	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
9005 	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
9006 	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
9007 
9008 	for (i = 0; i < cap->peer_chan_len; i++) {
9009 		chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
9010 		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
9011 	}
9012 
9013 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9014 		   "wmi tdls peer update vdev %i state %d n_chans %u\n",
9015 		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
9016 	return skb;
9017 }
9018 
9019 static struct sk_buff *
ath10k_wmi_10_4_gen_radar_found(struct ath10k * ar,const struct ath10k_radar_found_info * arg)9020 ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
9021 				const struct ath10k_radar_found_info *arg)
9022 {
9023 	struct wmi_radar_found_info *cmd;
9024 	struct sk_buff *skb;
9025 
9026 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9027 	if (!skb)
9028 		return ERR_PTR(-ENOMEM);
9029 
9030 	cmd = (struct wmi_radar_found_info *)skb->data;
9031 	cmd->pri_min   = __cpu_to_le32(arg->pri_min);
9032 	cmd->pri_max   = __cpu_to_le32(arg->pri_max);
9033 	cmd->width_min = __cpu_to_le32(arg->width_min);
9034 	cmd->width_max = __cpu_to_le32(arg->width_max);
9035 	cmd->sidx_min  = __cpu_to_le32(arg->sidx_min);
9036 	cmd->sidx_max  = __cpu_to_le32(arg->sidx_max);
9037 
9038 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9039 		   "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
9040 		   arg->pri_min, arg->pri_max, arg->width_min,
9041 		   arg->width_max, arg->sidx_min, arg->sidx_max);
9042 	return skb;
9043 }
9044 
9045 static struct sk_buff *
ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k * ar,const struct wmi_per_peer_per_tid_cfg_arg * arg)9046 ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
9047 					 const struct wmi_per_peer_per_tid_cfg_arg *arg)
9048 {
9049 	struct wmi_peer_per_tid_cfg_cmd *cmd;
9050 	struct sk_buff *skb;
9051 
9052 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9053 	if (!skb)
9054 		return ERR_PTR(-ENOMEM);
9055 
9056 	memset(skb->data, 0, sizeof(*cmd));
9057 
9058 	cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
9059 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9060 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
9061 	cmd->tid = cpu_to_le32(arg->tid);
9062 	cmd->ack_policy = cpu_to_le32(arg->ack_policy);
9063 	cmd->aggr_control = cpu_to_le32(arg->aggr_control);
9064 	cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
9065 	cmd->retry_count = cpu_to_le32(arg->retry_count);
9066 	cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
9067 	cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
9068 	cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
9069 
9070 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9071 		   "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
9072 		   arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
9073 		   arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
9074 		   arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
9075 	return skb;
9076 }
9077 
9078 static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k * ar,u32 value)9079 ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
9080 {
9081 	struct wmi_echo_cmd *cmd;
9082 	struct sk_buff *skb;
9083 
9084 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9085 	if (!skb)
9086 		return ERR_PTR(-ENOMEM);
9087 
9088 	cmd = (struct wmi_echo_cmd *)skb->data;
9089 	cmd->value = cpu_to_le32(value);
9090 
9091 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9092 		   "wmi echo value 0x%08x\n", value);
9093 	return skb;
9094 }
9095 
9096 int
ath10k_wmi_barrier(struct ath10k * ar)9097 ath10k_wmi_barrier(struct ath10k *ar)
9098 {
9099 	int ret;
9100 	int time_left;
9101 
9102 	spin_lock_bh(&ar->data_lock);
9103 	reinit_completion(&ar->wmi.barrier);
9104 	spin_unlock_bh(&ar->data_lock);
9105 
9106 	ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
9107 	if (ret) {
9108 		ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
9109 		return ret;
9110 	}
9111 
9112 	time_left = wait_for_completion_timeout(&ar->wmi.barrier,
9113 						ATH10K_WMI_BARRIER_TIMEOUT_HZ);
9114 	if (!time_left)
9115 		return -ETIMEDOUT;
9116 
9117 	return 0;
9118 }
9119 
9120 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k * ar,const struct wmi_bb_timing_cfg_arg * arg)9121 ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
9122 				   const struct wmi_bb_timing_cfg_arg *arg)
9123 {
9124 	struct wmi_pdev_bb_timing_cfg_cmd *cmd;
9125 	struct sk_buff *skb;
9126 
9127 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9128 	if (!skb)
9129 		return ERR_PTR(-ENOMEM);
9130 
9131 	cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
9132 	cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
9133 	cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
9134 
9135 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9136 		   "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
9137 		   arg->bb_tx_timing, arg->bb_xpa_timing);
9138 	return skb;
9139 }
9140 
9141 static const struct wmi_ops wmi_ops = {
9142 	.rx = ath10k_wmi_op_rx,
9143 	.map_svc = wmi_main_svc_map,
9144 
9145 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9146 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9147 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9148 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9149 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9150 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9151 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9152 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9153 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9154 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9155 	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
9156 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9157 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9158 
9159 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9160 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9161 	.gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
9162 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9163 	.gen_init = ath10k_wmi_op_gen_init,
9164 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9165 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9166 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9167 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9168 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9169 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9170 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9171 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9172 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9173 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9174 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9175 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9176 	/* .gen_vdev_wmm_conf not implemented */
9177 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9178 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9179 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9180 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9181 	.gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
9182 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9183 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9184 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9185 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9186 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9187 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9188 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9189 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9190 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9191 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9192 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9193 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9194 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9195 	/* .gen_pdev_get_temperature not implemented */
9196 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9197 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9198 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9199 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9200 	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
9201 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9202 	.gen_echo = ath10k_wmi_op_gen_echo,
9203 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9204 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9205 
9206 	/* .gen_bcn_tmpl not implemented */
9207 	/* .gen_prb_tmpl not implemented */
9208 	/* .gen_p2p_go_bcn_ie not implemented */
9209 	/* .gen_adaptive_qcs not implemented */
9210 	/* .gen_pdev_enable_adaptive_cca not implemented */
9211 };
9212 
9213 static const struct wmi_ops wmi_10_1_ops = {
9214 	.rx = ath10k_wmi_10_1_op_rx,
9215 	.map_svc = wmi_10x_svc_map,
9216 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9217 	.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
9218 	.gen_init = ath10k_wmi_10_1_op_gen_init,
9219 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9220 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9221 	.gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
9222 	/* .gen_pdev_get_temperature not implemented */
9223 
9224 	/* shared with main branch */
9225 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9226 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9227 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9228 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9229 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9230 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9231 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9232 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9233 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9234 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9235 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9236 
9237 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9238 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9239 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9240 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9241 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9242 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9243 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9244 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9245 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9246 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9247 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9248 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9249 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9250 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9251 	/* .gen_vdev_wmm_conf not implemented */
9252 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9253 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9254 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9255 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9256 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9257 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9258 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9259 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9260 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9261 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9262 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9263 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9264 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9265 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9266 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9267 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9268 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9269 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9270 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9271 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9272 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9273 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9274 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9275 	.gen_echo = ath10k_wmi_op_gen_echo,
9276 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9277 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9278 	/* .gen_bcn_tmpl not implemented */
9279 	/* .gen_prb_tmpl not implemented */
9280 	/* .gen_p2p_go_bcn_ie not implemented */
9281 	/* .gen_adaptive_qcs not implemented */
9282 	/* .gen_pdev_enable_adaptive_cca not implemented */
9283 };
9284 
9285 static const struct wmi_ops wmi_10_2_ops = {
9286 	.rx = ath10k_wmi_10_2_op_rx,
9287 	.pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
9288 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9289 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9290 	/* .gen_pdev_get_temperature not implemented */
9291 
9292 	/* shared with 10.1 */
9293 	.map_svc = wmi_10x_svc_map,
9294 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9295 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9296 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9297 	.gen_echo = ath10k_wmi_op_gen_echo,
9298 
9299 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9300 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9301 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9302 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9303 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9304 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9305 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9306 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9307 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9308 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9309 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9310 
9311 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9312 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9313 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9314 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9315 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9316 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9317 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9318 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9319 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9320 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9321 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9322 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9323 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9324 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9325 	/* .gen_vdev_wmm_conf not implemented */
9326 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9327 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9328 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9329 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9330 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9331 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9332 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9333 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9334 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9335 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9336 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9337 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9338 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9339 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9340 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9341 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9342 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9343 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9344 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9345 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9346 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9347 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9348 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9349 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9350 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9351 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9352 	/* .gen_pdev_enable_adaptive_cca not implemented */
9353 };
9354 
9355 static const struct wmi_ops wmi_10_2_4_ops = {
9356 	.rx = ath10k_wmi_10_2_op_rx,
9357 	.pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
9358 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9359 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9360 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9361 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9362 
9363 	/* shared with 10.1 */
9364 	.map_svc = wmi_10x_svc_map,
9365 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9366 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9367 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9368 	.gen_echo = ath10k_wmi_op_gen_echo,
9369 
9370 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9371 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9372 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9373 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9374 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9375 	.pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
9376 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9377 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9378 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9379 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9380 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9381 
9382 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9383 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9384 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9385 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9386 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9387 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9388 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9389 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9390 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9391 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9392 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9393 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9394 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9395 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9396 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9397 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9398 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9399 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9400 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9401 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9402 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9403 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9404 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9405 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9406 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9407 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9408 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9409 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9410 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9411 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9412 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9413 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9414 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9415 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9416 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9417 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9418 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9419 	.gen_pdev_enable_adaptive_cca =
9420 		ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
9421 	.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
9422 	.gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
9423 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9424 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9425 	/* .gen_bcn_tmpl not implemented */
9426 	/* .gen_prb_tmpl not implemented */
9427 	/* .gen_p2p_go_bcn_ie not implemented */
9428 	/* .gen_adaptive_qcs not implemented */
9429 };
9430 
9431 static const struct wmi_ops wmi_10_4_ops = {
9432 	.rx = ath10k_wmi_10_4_op_rx,
9433 	.map_svc = wmi_10_4_svc_map,
9434 
9435 	.pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
9436 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9437 	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
9438 	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
9439 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9440 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9441 	.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
9442 	.pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
9443 	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
9444 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9445 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9446 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9447 	.pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
9448 	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
9449 
9450 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9451 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9452 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9453 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9454 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9455 	.gen_init = ath10k_wmi_10_4_op_gen_init,
9456 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9457 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9458 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9459 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9460 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9461 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9462 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9463 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9464 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9465 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9466 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9467 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9468 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9469 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9470 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9471 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9472 	.gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
9473 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9474 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9475 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9476 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9477 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9478 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9479 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9480 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9481 	.gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
9482 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9483 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9484 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9485 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9486 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9487 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9488 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9489 	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
9490 	.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
9491 	.gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
9492 	.gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
9493 	.gen_pdev_get_tpc_table_cmdid =
9494 			ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
9495 	.gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
9496 	.gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
9497 
9498 	/* shared with 10.2 */
9499 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9500 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9501 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9502 	.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
9503 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9504 	.gen_echo = ath10k_wmi_op_gen_echo,
9505 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9506 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9507 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9508 };
9509 
ath10k_wmi_attach(struct ath10k * ar)9510 int ath10k_wmi_attach(struct ath10k *ar)
9511 {
9512 	switch (ar->running_fw->fw_file.wmi_op_version) {
9513 	case ATH10K_FW_WMI_OP_VERSION_10_4:
9514 		ar->wmi.ops = &wmi_10_4_ops;
9515 		ar->wmi.cmd = &wmi_10_4_cmd_map;
9516 		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
9517 		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
9518 		ar->wmi.peer_param = &wmi_peer_param_map;
9519 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9520 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9521 		break;
9522 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
9523 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
9524 		ar->wmi.ops = &wmi_10_2_4_ops;
9525 		ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
9526 		ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
9527 		ar->wmi.peer_param = &wmi_peer_param_map;
9528 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9529 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9530 		break;
9531 	case ATH10K_FW_WMI_OP_VERSION_10_2:
9532 		ar->wmi.cmd = &wmi_10_2_cmd_map;
9533 		ar->wmi.ops = &wmi_10_2_ops;
9534 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9535 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9536 		ar->wmi.peer_param = &wmi_peer_param_map;
9537 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9538 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9539 		break;
9540 	case ATH10K_FW_WMI_OP_VERSION_10_1:
9541 		ar->wmi.cmd = &wmi_10x_cmd_map;
9542 		ar->wmi.ops = &wmi_10_1_ops;
9543 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9544 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9545 		ar->wmi.peer_param = &wmi_peer_param_map;
9546 		ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
9547 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9548 		break;
9549 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
9550 		ar->wmi.cmd = &wmi_cmd_map;
9551 		ar->wmi.ops = &wmi_ops;
9552 		ar->wmi.vdev_param = &wmi_vdev_param_map;
9553 		ar->wmi.pdev_param = &wmi_pdev_param_map;
9554 		ar->wmi.peer_param = &wmi_peer_param_map;
9555 		ar->wmi.peer_flags = &wmi_peer_flags_map;
9556 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9557 		break;
9558 	case ATH10K_FW_WMI_OP_VERSION_TLV:
9559 		ath10k_wmi_tlv_attach(ar);
9560 		ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
9561 		break;
9562 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
9563 	case ATH10K_FW_WMI_OP_VERSION_MAX:
9564 		ath10k_err(ar, "unsupported WMI op version: %d\n",
9565 			   ar->running_fw->fw_file.wmi_op_version);
9566 		return -EINVAL;
9567 	}
9568 
9569 	init_completion(&ar->wmi.service_ready);
9570 	init_completion(&ar->wmi.unified_ready);
9571 	init_completion(&ar->wmi.barrier);
9572 	init_completion(&ar->wmi.radar_confirm);
9573 
9574 	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
9575 	INIT_WORK(&ar->radar_confirmation_work,
9576 		  ath10k_radar_confirmation_work);
9577 
9578 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9579 		     ar->running_fw->fw_file.fw_features)) {
9580 		idr_init(&ar->wmi.mgmt_pending_tx);
9581 	}
9582 
9583 	return 0;
9584 }
9585 
ath10k_wmi_free_host_mem(struct ath10k * ar)9586 void ath10k_wmi_free_host_mem(struct ath10k *ar)
9587 {
9588 	int i;
9589 
9590 	/* free the host memory chunks requested by firmware */
9591 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
9592 		dma_free_coherent(ar->dev,
9593 				  ar->wmi.mem_chunks[i].len,
9594 				  ar->wmi.mem_chunks[i].vaddr,
9595 				  ar->wmi.mem_chunks[i].paddr);
9596 	}
9597 
9598 	ar->wmi.num_mem_chunks = 0;
9599 }
9600 
ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id,void * ptr,void * ctx)9601 static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
9602 					       void *ctx)
9603 {
9604 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
9605 	struct ath10k *ar = ctx;
9606 	struct sk_buff *msdu;
9607 
9608 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9609 		   "force cleanup mgmt msdu_id %u\n", msdu_id);
9610 
9611 	msdu = pkt_addr->vaddr;
9612 	dma_unmap_single(ar->dev, pkt_addr->paddr,
9613 			 msdu->len, DMA_TO_DEVICE);
9614 	ieee80211_free_txskb(ar->hw, msdu);
9615 
9616 	return 0;
9617 }
9618 
ath10k_wmi_detach(struct ath10k * ar)9619 void ath10k_wmi_detach(struct ath10k *ar)
9620 {
9621 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9622 		     ar->running_fw->fw_file.fw_features)) {
9623 		spin_lock_bh(&ar->data_lock);
9624 		idr_for_each(&ar->wmi.mgmt_pending_tx,
9625 			     ath10k_wmi_mgmt_tx_clean_up_pending, ar);
9626 		idr_destroy(&ar->wmi.mgmt_pending_tx);
9627 		spin_unlock_bh(&ar->data_lock);
9628 	}
9629 
9630 	cancel_work_sync(&ar->svc_rdy_work);
9631 	dev_kfree_skb(ar->svc_rdy_skb);
9632 }
9633