1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
8 */
9
10 #include <linux/skbuff.h>
11 #include <linux/ctype.h>
12
13 #include "core.h"
14 #include "htc.h"
15 #include "debug.h"
16 #include "wmi.h"
17 #include "wmi-tlv.h"
18 #include "mac.h"
19 #include "testmode.h"
20 #include "wmi-ops.h"
21 #include "p2p.h"
22 #include "hw.h"
23 #include "hif.h"
24 #include "txrx.h"
25
26 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
27 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
28 #define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
29
30 /* MAIN WMI cmd track */
31 static struct wmi_cmd_map wmi_cmd_map = {
32 .init_cmdid = WMI_INIT_CMDID,
33 .start_scan_cmdid = WMI_START_SCAN_CMDID,
34 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
35 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
36 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
37 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
38 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
39 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
40 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
41 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
42 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
43 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
44 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
45 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
46 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
47 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
48 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
49 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
50 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
51 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
52 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
53 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
54 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
55 .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
56 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
57 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
58 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
59 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
60 .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
61 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
62 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
63 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
64 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
65 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
66 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
67 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
68 .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
69 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
70 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
71 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
72 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
73 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
74 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
75 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
76 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
77 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
78 .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
79 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
80 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
81 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
82 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
83 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
84 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
85 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
86 .roam_scan_mode = WMI_ROAM_SCAN_MODE,
87 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
88 .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
89 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
90 .roam_ap_profile = WMI_ROAM_AP_PROFILE,
91 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
92 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
93 .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
94 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
95 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
96 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
97 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
98 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
99 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
100 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
101 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
102 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
103 .wlan_profile_set_hist_intvl_cmdid =
104 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
105 .wlan_profile_get_profile_data_cmdid =
106 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
107 .wlan_profile_enable_profile_id_cmdid =
108 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
109 .wlan_profile_list_profile_id_cmdid =
110 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
111 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
112 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
113 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
114 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
115 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
116 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
117 .wow_enable_disable_wake_event_cmdid =
118 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
119 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
120 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
121 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
122 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
123 .vdev_spectral_scan_configure_cmdid =
124 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
125 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
126 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
127 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
128 .network_list_offload_config_cmdid =
129 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
130 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
131 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
132 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
133 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
134 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
135 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
136 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
137 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
138 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
139 .echo_cmdid = WMI_ECHO_CMDID,
140 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
141 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
142 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
143 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
144 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
145 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
146 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
147 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
148 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
149 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
150 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
151 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
152 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
153 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
154 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
155 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
156 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
157 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
158 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
159 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
160 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
161 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
162 .nan_cmdid = WMI_CMD_UNSUPPORTED,
163 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
164 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
165 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
166 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
167 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
168 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
169 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
170 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
171 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
172 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
173 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
174 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
175 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
176 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
177 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
178 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
179 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
180 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
181 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
182 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
183 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
184 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
185 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
186 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
187 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
188 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
189 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
190 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
191 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
192 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
193 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
194 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
195 };
196
197 /* 10.X WMI cmd track */
198 static struct wmi_cmd_map wmi_10x_cmd_map = {
199 .init_cmdid = WMI_10X_INIT_CMDID,
200 .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
201 .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
202 .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
203 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
204 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
205 .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
206 .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
207 .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
208 .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
209 .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
210 .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
211 .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
212 .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
213 .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
214 .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
215 .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
216 .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
217 .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
218 .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
219 .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
220 .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
221 .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
222 .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
223 .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
224 .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
225 .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
226 .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
227 .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
228 .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
229 .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
230 .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
231 .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
232 .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
233 .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
234 .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
235 .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
236 .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
237 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
238 .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
239 .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
240 .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
241 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
242 .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
243 .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
244 .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
245 .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
246 .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
247 .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
248 .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
249 .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
250 .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
251 .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
252 .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
253 .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
254 .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
255 .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
256 .roam_scan_rssi_change_threshold =
257 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
258 .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
259 .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
260 .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
261 .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
262 .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
263 .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
264 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
265 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
266 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
267 .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
268 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
269 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
270 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
271 .wlan_profile_set_hist_intvl_cmdid =
272 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
273 .wlan_profile_get_profile_data_cmdid =
274 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
275 .wlan_profile_enable_profile_id_cmdid =
276 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
277 .wlan_profile_list_profile_id_cmdid =
278 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
279 .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
280 .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
281 .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
282 .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
283 .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
284 .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
285 .wow_enable_disable_wake_event_cmdid =
286 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
287 .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
288 .wow_hostwakeup_from_sleep_cmdid =
289 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
290 .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
291 .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
292 .vdev_spectral_scan_configure_cmdid =
293 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
294 .vdev_spectral_scan_enable_cmdid =
295 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
296 .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
297 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
298 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
299 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
300 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
301 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
302 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
303 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
304 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
305 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
306 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
307 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
308 .echo_cmdid = WMI_10X_ECHO_CMDID,
309 .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
310 .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
311 .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
312 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
313 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
314 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
315 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
316 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
317 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
318 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
319 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
320 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
321 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
322 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
323 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
324 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
325 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
326 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
327 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
328 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
329 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
330 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
331 .nan_cmdid = WMI_CMD_UNSUPPORTED,
332 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
333 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
334 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
335 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
336 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
337 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
338 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
339 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
340 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
341 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
342 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
343 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
344 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
345 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
346 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
347 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
348 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
349 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
350 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
351 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
352 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
353 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
354 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
355 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
356 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
357 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
358 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
359 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
360 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
361 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
362 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
363 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
364 };
365
366 /* 10.2.4 WMI cmd track */
367 static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
368 .init_cmdid = WMI_10_2_INIT_CMDID,
369 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
370 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
371 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
372 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
373 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
374 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
375 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
376 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
377 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
378 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
379 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
380 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
381 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
382 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
383 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
384 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
385 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
386 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
387 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
388 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
389 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
390 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
391 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
392 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
393 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
394 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
395 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
396 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
397 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
398 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
399 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
400 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
401 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
402 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
403 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
404 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
405 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
406 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
407 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
408 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
409 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
410 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
411 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
412 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
413 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
414 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
415 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
416 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
417 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
418 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
419 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
420 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
421 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
422 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
423 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
424 .roam_scan_rssi_change_threshold =
425 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
426 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
427 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
428 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
429 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
430 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
431 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
432 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
433 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
434 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
435 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
436 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
437 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
438 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
439 .wlan_profile_set_hist_intvl_cmdid =
440 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
441 .wlan_profile_get_profile_data_cmdid =
442 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
443 .wlan_profile_enable_profile_id_cmdid =
444 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
445 .wlan_profile_list_profile_id_cmdid =
446 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
447 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
448 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
449 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
450 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
451 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
452 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
453 .wow_enable_disable_wake_event_cmdid =
454 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
455 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
456 .wow_hostwakeup_from_sleep_cmdid =
457 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
458 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
459 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
460 .vdev_spectral_scan_configure_cmdid =
461 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
462 .vdev_spectral_scan_enable_cmdid =
463 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
464 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
465 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
466 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
467 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
468 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
469 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
470 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
471 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
472 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
473 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
474 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
475 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
476 .echo_cmdid = WMI_10_2_ECHO_CMDID,
477 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
478 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
479 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
480 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
481 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
482 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
483 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
484 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
485 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
486 .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
487 .pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
488 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
489 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
490 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
491 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
492 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
493 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
494 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
495 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
496 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
497 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
498 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
499 .nan_cmdid = WMI_CMD_UNSUPPORTED,
500 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
501 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
502 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
503 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
504 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
505 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
506 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
507 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
508 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
509 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
510 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
511 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
512 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
513 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
514 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
515 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
516 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
517 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
518 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
519 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
520 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
521 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
522 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
523 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
524 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
525 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
526 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
527 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
528 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
529 .pdev_bss_chan_info_request_cmdid =
530 WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
531 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
532 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
533 .set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
534 };
535
536 /* 10.4 WMI cmd track */
537 static struct wmi_cmd_map wmi_10_4_cmd_map = {
538 .init_cmdid = WMI_10_4_INIT_CMDID,
539 .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
540 .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
541 .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
542 .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
543 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
544 .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
545 .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
546 .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
547 .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
548 .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
549 .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
550 .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
551 .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
552 .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
553 .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
554 .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
555 .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
556 .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
557 .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
558 .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
559 .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
560 .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
561 .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
562 .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
563 .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
564 .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
565 .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
566 .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
567 .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
568 .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
569 .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
570 .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
571 .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
572 .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
573 .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
574 .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
575 .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
576 .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
577 .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
578 .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
579 .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
580 .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
581 .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
582 .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
583 .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
584 .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
585 .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
586 .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
587 .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
588 .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
589 .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
590 .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
591 .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
592 .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
593 .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
594 .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
595 .roam_scan_rssi_change_threshold =
596 WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
597 .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
598 .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
599 .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
600 .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
601 .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
602 .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
603 .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
604 .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
605 .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
606 .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
607 .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
608 .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
609 .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
610 .wlan_profile_set_hist_intvl_cmdid =
611 WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
612 .wlan_profile_get_profile_data_cmdid =
613 WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
614 .wlan_profile_enable_profile_id_cmdid =
615 WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
616 .wlan_profile_list_profile_id_cmdid =
617 WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
618 .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
619 .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
620 .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
621 .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
622 .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
623 .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
624 .wow_enable_disable_wake_event_cmdid =
625 WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
626 .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
627 .wow_hostwakeup_from_sleep_cmdid =
628 WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
629 .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
630 .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
631 .vdev_spectral_scan_configure_cmdid =
632 WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
633 .vdev_spectral_scan_enable_cmdid =
634 WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
635 .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
636 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
637 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
638 .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
639 .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
640 .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
641 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
642 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
643 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
644 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
645 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
646 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
647 .echo_cmdid = WMI_10_4_ECHO_CMDID,
648 .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
649 .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
650 .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
651 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
652 .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
653 .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
654 .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
655 .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
656 .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
657 .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
658 .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
659 .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
660 .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
661 .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
662 .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
663 .wlan_peer_caching_add_peer_cmdid =
664 WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
665 .wlan_peer_caching_evict_peer_cmdid =
666 WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
667 .wlan_peer_caching_restore_peer_cmdid =
668 WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
669 .wlan_peer_caching_print_all_peers_info_cmdid =
670 WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
671 .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
672 .peer_add_proxy_sta_entry_cmdid =
673 WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
674 .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
675 .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
676 .nan_cmdid = WMI_10_4_NAN_CMDID,
677 .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
678 .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
679 .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
680 .pdev_smart_ant_set_rx_antenna_cmdid =
681 WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
682 .peer_smart_ant_set_tx_antenna_cmdid =
683 WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
684 .peer_smart_ant_set_train_info_cmdid =
685 WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
686 .peer_smart_ant_set_node_config_ops_cmdid =
687 WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
688 .pdev_set_antenna_switch_table_cmdid =
689 WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
690 .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
691 .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
692 .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
693 .pdev_ratepwr_chainmsk_table_cmdid =
694 WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
695 .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
696 .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
697 .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
698 .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
699 .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
700 .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
701 .pdev_get_ani_ofdm_config_cmdid =
702 WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
703 .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
704 .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
705 .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
706 .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
707 .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
708 .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
709 .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
710 .vdev_filter_neighbor_rx_packets_cmdid =
711 WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
712 .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
713 .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
714 .pdev_bss_chan_info_request_cmdid =
715 WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
716 .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
717 .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
718 .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
719 .atf_ssid_grouping_request_cmdid =
720 WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
721 .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
722 .set_periodic_channel_stats_cfg_cmdid =
723 WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
724 .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
725 .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
726 .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
727 .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
728 .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
729 .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
730 .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
731 .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
732 .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
733 .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
734 .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
735 .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
736 .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
737 .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
738 .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
739 .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
740 .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
741 .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
742 .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
743 .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
744 .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
745 .per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
746 };
747
748 static struct wmi_peer_param_map wmi_peer_param_map = {
749 .smps_state = WMI_PEER_SMPS_STATE,
750 .ampdu = WMI_PEER_AMPDU,
751 .authorize = WMI_PEER_AUTHORIZE,
752 .chan_width = WMI_PEER_CHAN_WIDTH,
753 .nss = WMI_PEER_NSS,
754 .use_4addr = WMI_PEER_USE_4ADDR,
755 .use_fixed_power = WMI_PEER_USE_FIXED_PWR,
756 .debug = WMI_PEER_DEBUG,
757 .phymode = WMI_PEER_PHYMODE,
758 .dummy_var = WMI_PEER_DUMMY_VAR,
759 };
760
761 /* MAIN WMI VDEV param map */
762 static struct wmi_vdev_param_map wmi_vdev_param_map = {
763 .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
764 .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
765 .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
766 .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
767 .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
768 .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
769 .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
770 .preamble = WMI_VDEV_PARAM_PREAMBLE,
771 .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
772 .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
773 .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
774 .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
775 .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
776 .wmi_vdev_oc_scheduler_air_time_limit =
777 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
778 .wds = WMI_VDEV_PARAM_WDS,
779 .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
780 .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
781 .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
782 .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
783 .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
784 .chwidth = WMI_VDEV_PARAM_CHWIDTH,
785 .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
786 .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
787 .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
788 .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
789 .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
790 .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
791 .sgi = WMI_VDEV_PARAM_SGI,
792 .ldpc = WMI_VDEV_PARAM_LDPC,
793 .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
794 .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
795 .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
796 .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
797 .nss = WMI_VDEV_PARAM_NSS,
798 .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
799 .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
800 .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
801 .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
802 .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
803 .ap_keepalive_min_idle_inactive_time_secs =
804 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
805 .ap_keepalive_max_idle_inactive_time_secs =
806 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
807 .ap_keepalive_max_unresponsive_time_secs =
808 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
809 .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
810 .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
811 .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
812 .txbf = WMI_VDEV_PARAM_TXBF,
813 .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
814 .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
815 .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
816 .ap_detect_out_of_sync_sleeping_sta_time_secs =
817 WMI_VDEV_PARAM_UNSUPPORTED,
818 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
819 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
820 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
821 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
822 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
823 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
824 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
825 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
826 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
827 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
828 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
829 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
830 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
831 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
832 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
833 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
834 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
835 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
836 };
837
838 /* 10.X WMI VDEV param map */
839 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
840 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
841 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
842 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
843 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
844 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
845 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
846 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
847 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
848 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
849 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
850 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
851 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
852 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
853 .wmi_vdev_oc_scheduler_air_time_limit =
854 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
855 .wds = WMI_10X_VDEV_PARAM_WDS,
856 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
857 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
858 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
859 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
860 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
861 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
862 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
863 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
864 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
865 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
866 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
867 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
868 .sgi = WMI_10X_VDEV_PARAM_SGI,
869 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
870 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
871 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
872 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
873 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
874 .nss = WMI_10X_VDEV_PARAM_NSS,
875 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
876 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
877 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
878 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
879 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
880 .ap_keepalive_min_idle_inactive_time_secs =
881 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
882 .ap_keepalive_max_idle_inactive_time_secs =
883 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
884 .ap_keepalive_max_unresponsive_time_secs =
885 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
886 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
887 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
888 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
889 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
890 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
891 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
892 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
893 .ap_detect_out_of_sync_sleeping_sta_time_secs =
894 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
895 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
896 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
897 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
898 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
899 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
900 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
901 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
902 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
903 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
904 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
905 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
906 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
907 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
908 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
909 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
910 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
911 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
912 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
913 };
914
915 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
916 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
917 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
918 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
919 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
920 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
921 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
922 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
923 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
924 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
925 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
926 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
927 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
928 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
929 .wmi_vdev_oc_scheduler_air_time_limit =
930 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
931 .wds = WMI_10X_VDEV_PARAM_WDS,
932 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
933 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
934 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
935 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
936 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
937 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
938 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
939 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
940 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
941 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
942 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
943 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
944 .sgi = WMI_10X_VDEV_PARAM_SGI,
945 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
946 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
947 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
948 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
949 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
950 .nss = WMI_10X_VDEV_PARAM_NSS,
951 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
952 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
953 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
954 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
955 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
956 .ap_keepalive_min_idle_inactive_time_secs =
957 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
958 .ap_keepalive_max_idle_inactive_time_secs =
959 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
960 .ap_keepalive_max_unresponsive_time_secs =
961 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
962 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
963 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
964 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
965 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
966 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
967 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
968 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
969 .ap_detect_out_of_sync_sleeping_sta_time_secs =
970 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
971 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
972 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
973 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
974 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
975 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
976 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
977 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
978 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
979 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
980 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
981 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
982 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
983 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
984 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
985 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
986 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
987 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
988 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
989 };
990
991 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
992 .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
993 .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
994 .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
995 .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
996 .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
997 .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
998 .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
999 .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
1000 .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
1001 .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
1002 .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
1003 .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
1004 .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
1005 .wmi_vdev_oc_scheduler_air_time_limit =
1006 WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
1007 .wds = WMI_10_4_VDEV_PARAM_WDS,
1008 .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
1009 .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
1010 .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
1011 .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
1012 .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
1013 .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
1014 .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
1015 .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
1016 .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
1017 .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
1018 .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
1019 .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
1020 .sgi = WMI_10_4_VDEV_PARAM_SGI,
1021 .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
1022 .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
1023 .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
1024 .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
1025 .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
1026 .nss = WMI_10_4_VDEV_PARAM_NSS,
1027 .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
1028 .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
1029 .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
1030 .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
1031 .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
1032 .ap_keepalive_min_idle_inactive_time_secs =
1033 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
1034 .ap_keepalive_max_idle_inactive_time_secs =
1035 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
1036 .ap_keepalive_max_unresponsive_time_secs =
1037 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
1038 .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
1039 .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
1040 .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
1041 .txbf = WMI_10_4_VDEV_PARAM_TXBF,
1042 .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
1043 .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
1044 .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
1045 .ap_detect_out_of_sync_sleeping_sta_time_secs =
1046 WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
1047 .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
1048 .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
1049 .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
1050 .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
1051 .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
1052 .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
1053 .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
1054 .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
1055 .early_rx_bmiss_sample_cycle =
1056 WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
1057 .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
1058 .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
1059 .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
1060 .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
1061 .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
1062 .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
1063 .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
1064 .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
1065 .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
1066 .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
1067 .rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
1068 };
1069
1070 static struct wmi_pdev_param_map wmi_pdev_param_map = {
1071 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
1072 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
1073 .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1074 .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1075 .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
1076 .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
1077 .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
1078 .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1079 .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
1080 .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
1081 .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1082 .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1083 .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
1084 .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1085 .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
1086 .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1087 .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1088 .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1089 .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1090 .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1091 .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1092 .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1093 .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1094 .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
1095 .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
1096 .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1097 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1098 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1099 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1100 .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1101 .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1102 .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1103 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1104 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
1105 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1106 .dcs = WMI_PDEV_PARAM_DCS,
1107 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
1108 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1109 .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1110 .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1111 .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1112 .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
1113 .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
1114 .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1115 .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1116 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1117 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
1118 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1119 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
1120 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1121 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1122 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1123 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1124 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1125 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1126 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1127 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1128 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1129 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1130 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1131 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1132 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1133 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1134 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1135 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1136 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1137 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1138 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1139 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1140 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1141 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1142 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1143 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1144 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1145 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1146 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1147 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1148 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1149 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1150 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1151 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1152 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1153 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1154 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1155 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1156 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1157 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1158 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1159 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1160 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1161 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1162 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1163 };
1164
1165 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1166 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1167 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1168 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1169 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1170 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1171 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1172 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1173 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1174 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1175 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1176 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1177 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1178 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1179 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1180 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1181 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1182 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1183 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1184 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1185 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1186 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1187 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1188 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1189 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1190 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1191 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1192 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1193 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1194 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1195 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1196 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1197 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1198 .bcnflt_stats_update_period =
1199 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1200 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1201 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1202 .dcs = WMI_10X_PDEV_PARAM_DCS,
1203 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1204 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1205 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1206 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1207 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1208 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1209 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1210 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1211 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1212 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1213 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1214 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1215 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1216 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1217 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1218 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1219 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1220 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1221 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1222 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1223 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1224 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1225 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1226 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1227 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1228 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1229 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1230 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1231 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1232 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1233 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1234 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1235 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1236 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1237 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1238 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1239 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1240 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1241 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1242 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1243 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1244 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1245 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1246 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1247 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1248 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1249 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1250 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1251 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1252 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1253 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1254 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1255 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1256 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1257 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1258 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1259 };
1260
1261 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1262 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1263 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1264 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1265 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1266 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1267 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1268 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1269 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1270 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1271 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1272 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1273 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1274 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1275 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1276 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1277 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1278 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1279 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1280 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1281 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1282 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1283 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1284 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1285 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1286 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1287 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1288 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1289 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1290 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1291 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1292 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1293 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1294 .bcnflt_stats_update_period =
1295 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1296 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1297 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1298 .dcs = WMI_10X_PDEV_PARAM_DCS,
1299 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1300 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1301 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1302 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1303 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1304 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1305 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1306 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1307 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1308 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1309 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1310 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1311 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1312 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1313 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1314 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1315 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1316 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1317 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1318 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1319 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1320 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1321 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1322 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1323 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1324 .peer_sta_ps_statechg_enable =
1325 WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
1326 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1327 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1328 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1329 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1330 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1331 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1332 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1333 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1334 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1335 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1336 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1337 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1338 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1339 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1340 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1341 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1342 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1343 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1344 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1345 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1346 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1347 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1348 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1349 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1350 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1351 .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
1352 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1353 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1354 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1355 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1356 };
1357
1358 /* firmware 10.2 specific mappings */
1359 static struct wmi_cmd_map wmi_10_2_cmd_map = {
1360 .init_cmdid = WMI_10_2_INIT_CMDID,
1361 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
1362 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
1363 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
1364 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
1365 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
1366 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
1367 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
1368 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
1369 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
1370 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
1371 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
1372 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
1373 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
1374 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
1375 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
1376 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
1377 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
1378 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
1379 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
1380 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
1381 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
1382 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
1383 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
1384 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
1385 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
1386 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
1387 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
1388 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
1389 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
1390 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
1391 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
1392 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
1393 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
1394 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
1395 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
1396 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
1397 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1398 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
1399 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
1400 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
1401 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1402 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
1403 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
1404 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
1405 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
1406 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
1407 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
1408 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
1409 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
1410 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
1411 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
1412 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
1413 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
1414 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
1415 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
1416 .roam_scan_rssi_change_threshold =
1417 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
1418 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
1419 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
1420 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
1421 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
1422 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
1423 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
1424 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
1425 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
1426 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
1427 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
1428 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
1429 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
1430 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
1431 .wlan_profile_set_hist_intvl_cmdid =
1432 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1433 .wlan_profile_get_profile_data_cmdid =
1434 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1435 .wlan_profile_enable_profile_id_cmdid =
1436 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1437 .wlan_profile_list_profile_id_cmdid =
1438 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1439 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
1440 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
1441 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
1442 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
1443 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
1444 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
1445 .wow_enable_disable_wake_event_cmdid =
1446 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
1447 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
1448 .wow_hostwakeup_from_sleep_cmdid =
1449 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
1450 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
1451 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
1452 .vdev_spectral_scan_configure_cmdid =
1453 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
1454 .vdev_spectral_scan_enable_cmdid =
1455 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
1456 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
1457 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
1458 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
1459 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
1460 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
1461 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
1462 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
1463 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
1464 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
1465 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
1466 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
1467 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
1468 .echo_cmdid = WMI_10_2_ECHO_CMDID,
1469 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
1470 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
1471 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
1472 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
1473 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1474 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1475 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
1476 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
1477 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
1478 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
1479 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
1480 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
1481 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
1482 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
1483 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
1484 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
1485 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
1486 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
1487 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
1488 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
1489 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1490 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
1491 .nan_cmdid = WMI_CMD_UNSUPPORTED,
1492 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
1493 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
1494 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
1495 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1496 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1497 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
1498 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
1499 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
1500 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
1501 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
1502 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
1503 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
1504 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
1505 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
1506 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
1507 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1508 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1509 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
1510 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
1511 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
1512 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
1513 .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
1514 };
1515
1516 static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1517 .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
1518 .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
1519 .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
1520 .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
1521 .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
1522 .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
1523 .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
1524 .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1525 .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
1526 .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
1527 .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1528 .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
1529 .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
1530 .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1531 .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
1532 .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
1533 .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
1534 .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
1535 .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
1536 .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1537 .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1538 .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
1539 .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1540 .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
1541 .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
1542 .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1543 .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1544 .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1545 .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1546 .pdev_stats_update_period =
1547 WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1548 .vdev_stats_update_period =
1549 WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1550 .peer_stats_update_period =
1551 WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1552 .bcnflt_stats_update_period =
1553 WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1554 .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
1555 .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
1556 .dcs = WMI_10_4_PDEV_PARAM_DCS,
1557 .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
1558 .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
1559 .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
1560 .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
1561 .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
1562 .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
1563 .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
1564 .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
1565 .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
1566 .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
1567 .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
1568 .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
1569 .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
1570 .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
1571 .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
1572 .smart_antenna_default_antenna =
1573 WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
1574 .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
1575 .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
1576 .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
1577 .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
1578 .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
1579 .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
1580 .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
1581 .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
1582 .remove_mcast2ucast_buffer =
1583 WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
1584 .peer_sta_ps_statechg_enable =
1585 WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
1586 .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
1587 .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
1588 .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
1589 .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
1590 .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
1591 .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
1592 .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
1593 .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
1594 .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
1595 .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
1596 .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
1597 .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
1598 .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
1599 .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
1600 .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
1601 .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
1602 .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
1603 .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
1604 .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
1605 .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
1606 .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
1607 .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
1608 .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
1609 .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
1610 .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
1611 .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
1612 .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1613 .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1614 .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1615 .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1616 };
1617
1618 static const u8 wmi_key_cipher_suites[] = {
1619 [WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
1620 [WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
1621 [WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
1622 [WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
1623 [WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
1624 [WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
1625 [WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
1626 [WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
1627 [WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
1628 };
1629
1630 static const u8 wmi_tlv_key_cipher_suites[] = {
1631 [WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
1632 [WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
1633 [WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
1634 [WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
1635 [WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
1636 [WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
1637 [WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
1638 [WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
1639 [WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
1640 };
1641
1642 static const struct wmi_peer_flags_map wmi_peer_flags_map = {
1643 .auth = WMI_PEER_AUTH,
1644 .qos = WMI_PEER_QOS,
1645 .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
1646 .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
1647 .apsd = WMI_PEER_APSD,
1648 .ht = WMI_PEER_HT,
1649 .bw40 = WMI_PEER_40MHZ,
1650 .stbc = WMI_PEER_STBC,
1651 .ldbc = WMI_PEER_LDPC,
1652 .dyn_mimops = WMI_PEER_DYN_MIMOPS,
1653 .static_mimops = WMI_PEER_STATIC_MIMOPS,
1654 .spatial_mux = WMI_PEER_SPATIAL_MUX,
1655 .vht = WMI_PEER_VHT,
1656 .bw80 = WMI_PEER_80MHZ,
1657 .vht_2g = WMI_PEER_VHT_2G,
1658 .pmf = WMI_PEER_PMF,
1659 .bw160 = WMI_PEER_160MHZ,
1660 };
1661
1662 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
1663 .auth = WMI_10X_PEER_AUTH,
1664 .qos = WMI_10X_PEER_QOS,
1665 .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
1666 .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
1667 .apsd = WMI_10X_PEER_APSD,
1668 .ht = WMI_10X_PEER_HT,
1669 .bw40 = WMI_10X_PEER_40MHZ,
1670 .stbc = WMI_10X_PEER_STBC,
1671 .ldbc = WMI_10X_PEER_LDPC,
1672 .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
1673 .static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
1674 .spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
1675 .vht = WMI_10X_PEER_VHT,
1676 .bw80 = WMI_10X_PEER_80MHZ,
1677 .bw160 = WMI_10X_PEER_160MHZ,
1678 };
1679
1680 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
1681 .auth = WMI_10_2_PEER_AUTH,
1682 .qos = WMI_10_2_PEER_QOS,
1683 .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
1684 .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
1685 .apsd = WMI_10_2_PEER_APSD,
1686 .ht = WMI_10_2_PEER_HT,
1687 .bw40 = WMI_10_2_PEER_40MHZ,
1688 .stbc = WMI_10_2_PEER_STBC,
1689 .ldbc = WMI_10_2_PEER_LDPC,
1690 .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
1691 .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
1692 .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
1693 .vht = WMI_10_2_PEER_VHT,
1694 .bw80 = WMI_10_2_PEER_80MHZ,
1695 .vht_2g = WMI_10_2_PEER_VHT_2G,
1696 .pmf = WMI_10_2_PEER_PMF,
1697 .bw160 = WMI_10_2_PEER_160MHZ,
1698 };
1699
ath10k_wmi_put_wmi_channel(struct ath10k * ar,struct wmi_channel * ch,const struct wmi_channel_arg * arg)1700 void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
1701 const struct wmi_channel_arg *arg)
1702 {
1703 u32 flags = 0;
1704 struct ieee80211_channel *chan = NULL;
1705
1706 memset(ch, 0, sizeof(*ch));
1707
1708 if (arg->passive)
1709 flags |= WMI_CHAN_FLAG_PASSIVE;
1710 if (arg->allow_ibss)
1711 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1712 if (arg->allow_ht)
1713 flags |= WMI_CHAN_FLAG_ALLOW_HT;
1714 if (arg->allow_vht)
1715 flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1716 if (arg->ht40plus)
1717 flags |= WMI_CHAN_FLAG_HT40_PLUS;
1718 if (arg->chan_radar)
1719 flags |= WMI_CHAN_FLAG_DFS;
1720
1721 ch->band_center_freq2 = 0;
1722 ch->mhz = __cpu_to_le32(arg->freq);
1723 ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
1724 if (arg->mode == MODE_11AC_VHT80_80) {
1725 ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
1726 chan = ieee80211_get_channel(ar->hw->wiphy,
1727 arg->band_center_freq2 - 10);
1728 }
1729
1730 if (arg->mode == MODE_11AC_VHT160) {
1731 u32 band_center_freq1;
1732 u32 band_center_freq2;
1733
1734 if (arg->freq > arg->band_center_freq1) {
1735 band_center_freq1 = arg->band_center_freq1 + 40;
1736 band_center_freq2 = arg->band_center_freq1 - 40;
1737 } else {
1738 band_center_freq1 = arg->band_center_freq1 - 40;
1739 band_center_freq2 = arg->band_center_freq1 + 40;
1740 }
1741
1742 ch->band_center_freq1 =
1743 __cpu_to_le32(band_center_freq1);
1744 /* Minus 10 to get a defined 5G channel frequency*/
1745 chan = ieee80211_get_channel(ar->hw->wiphy,
1746 band_center_freq2 - 10);
1747 /* The center frequency of the entire VHT160 */
1748 ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
1749 }
1750
1751 if (chan && chan->flags & IEEE80211_CHAN_RADAR)
1752 flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
1753
1754 ch->min_power = arg->min_power;
1755 ch->max_power = arg->max_power;
1756 ch->reg_power = arg->max_reg_power;
1757 ch->antenna_max = arg->max_antenna_gain;
1758 ch->max_tx_power = arg->max_power;
1759
1760 /* mode & flags share storage */
1761 ch->mode = arg->mode;
1762 ch->flags |= __cpu_to_le32(flags);
1763 }
1764
ath10k_wmi_wait_for_service_ready(struct ath10k * ar)1765 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
1766 {
1767 unsigned long time_left, i;
1768
1769 time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1770 WMI_SERVICE_READY_TIMEOUT_HZ);
1771 if (!time_left) {
1772 /* Sometimes the PCI HIF doesn't receive interrupt
1773 * for the service ready message even if the buffer
1774 * was completed. PCIe sniffer shows that it's
1775 * because the corresponding CE ring doesn't fires
1776 * it. Workaround here by polling CE rings once.
1777 */
1778 ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
1779
1780 for (i = 0; i < CE_COUNT; i++)
1781 ath10k_hif_send_complete_check(ar, i, 1);
1782
1783 time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1784 WMI_SERVICE_READY_TIMEOUT_HZ);
1785 if (!time_left) {
1786 ath10k_warn(ar, "polling timed out\n");
1787 return -ETIMEDOUT;
1788 }
1789
1790 ath10k_warn(ar, "service ready completion received, continuing normally\n");
1791 }
1792
1793 return 0;
1794 }
1795
ath10k_wmi_wait_for_unified_ready(struct ath10k * ar)1796 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
1797 {
1798 unsigned long time_left;
1799
1800 time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
1801 WMI_UNIFIED_READY_TIMEOUT_HZ);
1802 if (!time_left)
1803 return -ETIMEDOUT;
1804 return 0;
1805 }
1806
ath10k_wmi_alloc_skb(struct ath10k * ar,u32 len)1807 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
1808 {
1809 struct sk_buff *skb;
1810 u32 round_len = roundup(len, 4);
1811
1812 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
1813 if (!skb)
1814 return NULL;
1815
1816 skb_reserve(skb, WMI_SKB_HEADROOM);
1817 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1818 ath10k_warn(ar, "Unaligned WMI skb\n");
1819
1820 skb_put(skb, round_len);
1821 memset(skb->data, 0, round_len);
1822
1823 return skb;
1824 }
1825
ath10k_wmi_htc_tx_complete(struct ath10k * ar,struct sk_buff * skb)1826 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
1827 {
1828 dev_kfree_skb(skb);
1829 }
1830
ath10k_wmi_cmd_send_nowait(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1831 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1832 u32 cmd_id)
1833 {
1834 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
1835 struct wmi_cmd_hdr *cmd_hdr;
1836 int ret;
1837 u32 cmd = 0;
1838
1839 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1840 return -ENOMEM;
1841
1842 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
1843
1844 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1845 cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1846
1847 memset(skb_cb, 0, sizeof(*skb_cb));
1848 trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1849 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1850
1851 if (ret)
1852 goto err_pull;
1853
1854 return 0;
1855
1856 err_pull:
1857 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
1858 return ret;
1859 }
1860
ath10k_wmi_tx_beacon_nowait(struct ath10k_vif * arvif)1861 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
1862 {
1863 struct ath10k *ar = arvif->ar;
1864 struct ath10k_skb_cb *cb;
1865 struct sk_buff *bcn;
1866 bool dtim_zero;
1867 bool deliver_cab;
1868 int ret;
1869
1870 spin_lock_bh(&ar->data_lock);
1871
1872 bcn = arvif->beacon;
1873
1874 if (!bcn)
1875 goto unlock;
1876
1877 cb = ATH10K_SKB_CB(bcn);
1878
1879 switch (arvif->beacon_state) {
1880 case ATH10K_BEACON_SENDING:
1881 case ATH10K_BEACON_SENT:
1882 break;
1883 case ATH10K_BEACON_SCHEDULED:
1884 arvif->beacon_state = ATH10K_BEACON_SENDING;
1885 spin_unlock_bh(&ar->data_lock);
1886
1887 dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
1888 deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
1889 ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
1890 arvif->vdev_id,
1891 bcn->data, bcn->len,
1892 cb->paddr,
1893 dtim_zero,
1894 deliver_cab);
1895
1896 spin_lock_bh(&ar->data_lock);
1897
1898 if (ret == 0)
1899 arvif->beacon_state = ATH10K_BEACON_SENT;
1900 else
1901 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1902 }
1903
1904 unlock:
1905 spin_unlock_bh(&ar->data_lock);
1906 }
1907
ath10k_wmi_tx_beacons_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1908 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1909 struct ieee80211_vif *vif)
1910 {
1911 struct ath10k_vif *arvif = (void *)vif->drv_priv;
1912
1913 ath10k_wmi_tx_beacon_nowait(arvif);
1914 }
1915
ath10k_wmi_tx_beacons_nowait(struct ath10k * ar)1916 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1917 {
1918 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1919 ATH10K_ITER_NORMAL_FLAGS,
1920 ath10k_wmi_tx_beacons_iter,
1921 NULL);
1922 }
1923
ath10k_wmi_op_ep_tx_credits(struct ath10k * ar)1924 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1925 {
1926 /* try to send pending beacons first. they take priority */
1927 ath10k_wmi_tx_beacons_nowait(ar);
1928
1929 wake_up(&ar->wmi.tx_credits_wq);
1930 }
1931
ath10k_wmi_cmd_send(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1932 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1933 {
1934 int ret = -EOPNOTSUPP;
1935
1936 might_sleep();
1937
1938 if (cmd_id == WMI_CMD_UNSUPPORTED) {
1939 ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1940 cmd_id);
1941 return ret;
1942 }
1943
1944 wait_event_timeout(ar->wmi.tx_credits_wq, ({
1945 if (ar->state == ATH10K_STATE_WEDGED) {
1946 ret = -ESHUTDOWN;
1947 ath10k_dbg(ar, ATH10K_DBG_WMI,
1948 "drop wmi command %d, hardware is wedged\n", cmd_id);
1949 }
1950 /* try to send pending beacons first. they take priority */
1951 ath10k_wmi_tx_beacons_nowait(ar);
1952
1953 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1954
1955 if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1956 ret = -ESHUTDOWN;
1957
1958 (ret != -EAGAIN);
1959 }), 3 * HZ);
1960
1961 if (ret)
1962 dev_kfree_skb_any(skb);
1963
1964 if (ret == -EAGAIN) {
1965 ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
1966 cmd_id);
1967 ath10k_core_start_recovery(ar);
1968 }
1969
1970 return ret;
1971 }
1972
1973 static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k * ar,struct sk_buff * msdu)1974 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1975 {
1976 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
1977 struct ath10k_vif *arvif;
1978 struct wmi_mgmt_tx_cmd *cmd;
1979 struct ieee80211_hdr *hdr;
1980 struct sk_buff *skb;
1981 int len;
1982 u32 vdev_id;
1983 u32 buf_len = msdu->len;
1984 u16 fc;
1985 const u8 *peer_addr;
1986
1987 hdr = (struct ieee80211_hdr *)msdu->data;
1988 fc = le16_to_cpu(hdr->frame_control);
1989
1990 if (cb->vif) {
1991 arvif = (void *)cb->vif->drv_priv;
1992 vdev_id = arvif->vdev_id;
1993 } else {
1994 vdev_id = 0;
1995 }
1996
1997 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1998 return ERR_PTR(-EINVAL);
1999
2000 len = sizeof(cmd->hdr) + msdu->len;
2001
2002 if ((ieee80211_is_action(hdr->frame_control) ||
2003 ieee80211_is_deauth(hdr->frame_control) ||
2004 ieee80211_is_disassoc(hdr->frame_control)) &&
2005 ieee80211_has_protected(hdr->frame_control)) {
2006 peer_addr = hdr->addr1;
2007 if (is_multicast_ether_addr(peer_addr)) {
2008 len += sizeof(struct ieee80211_mmie_16);
2009 buf_len += sizeof(struct ieee80211_mmie_16);
2010 } else {
2011 if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
2012 cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
2013 len += IEEE80211_GCMP_MIC_LEN;
2014 buf_len += IEEE80211_GCMP_MIC_LEN;
2015 } else {
2016 len += IEEE80211_CCMP_MIC_LEN;
2017 buf_len += IEEE80211_CCMP_MIC_LEN;
2018 }
2019 }
2020 }
2021
2022 len = round_up(len, 4);
2023
2024 skb = ath10k_wmi_alloc_skb(ar, len);
2025 if (!skb)
2026 return ERR_PTR(-ENOMEM);
2027
2028 cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
2029
2030 cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
2031 cmd->hdr.tx_rate = 0;
2032 cmd->hdr.tx_power = 0;
2033 cmd->hdr.buf_len = __cpu_to_le32(buf_len);
2034
2035 ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
2036 memcpy(cmd->buf, msdu->data, msdu->len);
2037
2038 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
2039 msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
2040 fc & IEEE80211_FCTL_STYPE);
2041 trace_ath10k_tx_hdr(ar, skb->data, skb->len);
2042 trace_ath10k_tx_payload(ar, skb->data, skb->len);
2043
2044 return skb;
2045 }
2046
ath10k_wmi_event_scan_started(struct ath10k * ar)2047 static void ath10k_wmi_event_scan_started(struct ath10k *ar)
2048 {
2049 lockdep_assert_held(&ar->data_lock);
2050
2051 switch (ar->scan.state) {
2052 case ATH10K_SCAN_IDLE:
2053 case ATH10K_SCAN_RUNNING:
2054 case ATH10K_SCAN_ABORTING:
2055 ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
2056 ath10k_scan_state_str(ar->scan.state),
2057 ar->scan.state);
2058 break;
2059 case ATH10K_SCAN_STARTING:
2060 ar->scan.state = ATH10K_SCAN_RUNNING;
2061
2062 if (ar->scan.is_roc)
2063 ieee80211_ready_on_channel(ar->hw);
2064
2065 complete(&ar->scan.started);
2066 break;
2067 }
2068 }
2069
ath10k_wmi_event_scan_start_failed(struct ath10k * ar)2070 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
2071 {
2072 lockdep_assert_held(&ar->data_lock);
2073
2074 switch (ar->scan.state) {
2075 case ATH10K_SCAN_IDLE:
2076 case ATH10K_SCAN_RUNNING:
2077 case ATH10K_SCAN_ABORTING:
2078 ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
2079 ath10k_scan_state_str(ar->scan.state),
2080 ar->scan.state);
2081 break;
2082 case ATH10K_SCAN_STARTING:
2083 complete(&ar->scan.started);
2084 __ath10k_scan_finish(ar);
2085 break;
2086 }
2087 }
2088
ath10k_wmi_event_scan_completed(struct ath10k * ar)2089 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
2090 {
2091 lockdep_assert_held(&ar->data_lock);
2092
2093 switch (ar->scan.state) {
2094 case ATH10K_SCAN_IDLE:
2095 case ATH10K_SCAN_STARTING:
2096 /* One suspected reason scan can be completed while starting is
2097 * if firmware fails to deliver all scan events to the host,
2098 * e.g. when transport pipe is full. This has been observed
2099 * with spectral scan phyerr events starving wmi transport
2100 * pipe. In such case the "scan completed" event should be (and
2101 * is) ignored by the host as it may be just firmware's scan
2102 * state machine recovering.
2103 */
2104 ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
2105 ath10k_scan_state_str(ar->scan.state),
2106 ar->scan.state);
2107 break;
2108 case ATH10K_SCAN_RUNNING:
2109 case ATH10K_SCAN_ABORTING:
2110 __ath10k_scan_finish(ar);
2111 break;
2112 }
2113 }
2114
ath10k_wmi_event_scan_bss_chan(struct ath10k * ar)2115 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
2116 {
2117 lockdep_assert_held(&ar->data_lock);
2118
2119 switch (ar->scan.state) {
2120 case ATH10K_SCAN_IDLE:
2121 case ATH10K_SCAN_STARTING:
2122 ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
2123 ath10k_scan_state_str(ar->scan.state),
2124 ar->scan.state);
2125 break;
2126 case ATH10K_SCAN_RUNNING:
2127 case ATH10K_SCAN_ABORTING:
2128 ar->scan_channel = NULL;
2129 break;
2130 }
2131 }
2132
ath10k_wmi_event_scan_foreign_chan(struct ath10k * ar,u32 freq)2133 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
2134 {
2135 lockdep_assert_held(&ar->data_lock);
2136
2137 switch (ar->scan.state) {
2138 case ATH10K_SCAN_IDLE:
2139 case ATH10K_SCAN_STARTING:
2140 ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
2141 ath10k_scan_state_str(ar->scan.state),
2142 ar->scan.state);
2143 break;
2144 case ATH10K_SCAN_RUNNING:
2145 case ATH10K_SCAN_ABORTING:
2146 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
2147
2148 if (ar->scan.is_roc && ar->scan.roc_freq == freq)
2149 complete(&ar->scan.on_channel);
2150 break;
2151 }
2152 }
2153
2154 static const char *
ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)2155 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
2156 enum wmi_scan_completion_reason reason)
2157 {
2158 switch (type) {
2159 case WMI_SCAN_EVENT_STARTED:
2160 return "started";
2161 case WMI_SCAN_EVENT_COMPLETED:
2162 switch (reason) {
2163 case WMI_SCAN_REASON_COMPLETED:
2164 return "completed";
2165 case WMI_SCAN_REASON_CANCELLED:
2166 return "completed [cancelled]";
2167 case WMI_SCAN_REASON_PREEMPTED:
2168 return "completed [preempted]";
2169 case WMI_SCAN_REASON_TIMEDOUT:
2170 return "completed [timedout]";
2171 case WMI_SCAN_REASON_INTERNAL_FAILURE:
2172 return "completed [internal err]";
2173 case WMI_SCAN_REASON_MAX:
2174 break;
2175 }
2176 return "completed [unknown]";
2177 case WMI_SCAN_EVENT_BSS_CHANNEL:
2178 return "bss channel";
2179 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2180 return "foreign channel";
2181 case WMI_SCAN_EVENT_DEQUEUED:
2182 return "dequeued";
2183 case WMI_SCAN_EVENT_PREEMPTED:
2184 return "preempted";
2185 case WMI_SCAN_EVENT_START_FAILED:
2186 return "start failed";
2187 case WMI_SCAN_EVENT_RESTARTED:
2188 return "restarted";
2189 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2190 return "foreign channel exit";
2191 default:
2192 return "unknown";
2193 }
2194 }
2195
ath10k_wmi_op_pull_scan_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_scan_ev_arg * arg)2196 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
2197 struct wmi_scan_ev_arg *arg)
2198 {
2199 struct wmi_scan_event *ev = (void *)skb->data;
2200
2201 if (skb->len < sizeof(*ev))
2202 return -EPROTO;
2203
2204 skb_pull(skb, sizeof(*ev));
2205 arg->event_type = ev->event_type;
2206 arg->reason = ev->reason;
2207 arg->channel_freq = ev->channel_freq;
2208 arg->scan_req_id = ev->scan_req_id;
2209 arg->scan_id = ev->scan_id;
2210 arg->vdev_id = ev->vdev_id;
2211
2212 return 0;
2213 }
2214
ath10k_wmi_event_scan(struct ath10k * ar,struct sk_buff * skb)2215 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2216 {
2217 struct wmi_scan_ev_arg arg = {};
2218 enum wmi_scan_event_type event_type;
2219 enum wmi_scan_completion_reason reason;
2220 u32 freq;
2221 u32 req_id;
2222 u32 scan_id;
2223 u32 vdev_id;
2224 int ret;
2225
2226 ret = ath10k_wmi_pull_scan(ar, skb, &arg);
2227 if (ret) {
2228 ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
2229 return ret;
2230 }
2231
2232 event_type = __le32_to_cpu(arg.event_type);
2233 reason = __le32_to_cpu(arg.reason);
2234 freq = __le32_to_cpu(arg.channel_freq);
2235 req_id = __le32_to_cpu(arg.scan_req_id);
2236 scan_id = __le32_to_cpu(arg.scan_id);
2237 vdev_id = __le32_to_cpu(arg.vdev_id);
2238
2239 spin_lock_bh(&ar->data_lock);
2240
2241 ath10k_dbg(ar, ATH10K_DBG_WMI,
2242 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
2243 ath10k_wmi_event_scan_type_str(event_type, reason),
2244 event_type, reason, freq, req_id, scan_id, vdev_id,
2245 ath10k_scan_state_str(ar->scan.state), ar->scan.state);
2246
2247 switch (event_type) {
2248 case WMI_SCAN_EVENT_STARTED:
2249 ath10k_wmi_event_scan_started(ar);
2250 break;
2251 case WMI_SCAN_EVENT_COMPLETED:
2252 ath10k_wmi_event_scan_completed(ar);
2253 break;
2254 case WMI_SCAN_EVENT_BSS_CHANNEL:
2255 ath10k_wmi_event_scan_bss_chan(ar);
2256 break;
2257 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2258 ath10k_wmi_event_scan_foreign_chan(ar, freq);
2259 break;
2260 case WMI_SCAN_EVENT_START_FAILED:
2261 ath10k_warn(ar, "received scan start failure event\n");
2262 ath10k_wmi_event_scan_start_failed(ar);
2263 break;
2264 case WMI_SCAN_EVENT_DEQUEUED:
2265 case WMI_SCAN_EVENT_PREEMPTED:
2266 case WMI_SCAN_EVENT_RESTARTED:
2267 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2268 default:
2269 break;
2270 }
2271
2272 spin_unlock_bh(&ar->data_lock);
2273 return 0;
2274 }
2275
2276 /* If keys are configured, HW decrypts all frames
2277 * with protected bit set. Mark such frames as decrypted.
2278 */
ath10k_wmi_handle_wep_reauth(struct ath10k * ar,struct sk_buff * skb,struct ieee80211_rx_status * status)2279 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
2280 struct sk_buff *skb,
2281 struct ieee80211_rx_status *status)
2282 {
2283 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2284 unsigned int hdrlen;
2285 bool peer_key;
2286 u8 *addr, keyidx;
2287
2288 if (!ieee80211_is_auth(hdr->frame_control) ||
2289 !ieee80211_has_protected(hdr->frame_control))
2290 return;
2291
2292 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2293 if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
2294 return;
2295
2296 keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
2297 addr = ieee80211_get_SA(hdr);
2298
2299 spin_lock_bh(&ar->data_lock);
2300 peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
2301 spin_unlock_bh(&ar->data_lock);
2302
2303 if (peer_key) {
2304 ath10k_dbg(ar, ATH10K_DBG_MAC,
2305 "mac wep key present for peer %pM\n", addr);
2306 status->flag |= RX_FLAG_DECRYPTED;
2307 }
2308 }
2309
ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2310 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2311 struct wmi_mgmt_rx_ev_arg *arg)
2312 {
2313 struct wmi_mgmt_rx_event_v1 *ev_v1;
2314 struct wmi_mgmt_rx_event_v2 *ev_v2;
2315 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2316 struct wmi_mgmt_rx_ext_info *ext_info;
2317 size_t pull_len;
2318 u32 msdu_len;
2319 u32 len;
2320
2321 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
2322 ar->running_fw->fw_file.fw_features)) {
2323 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
2324 ev_hdr = &ev_v2->hdr.v1;
2325 pull_len = sizeof(*ev_v2);
2326 } else {
2327 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
2328 ev_hdr = &ev_v1->hdr;
2329 pull_len = sizeof(*ev_v1);
2330 }
2331
2332 if (skb->len < pull_len)
2333 return -EPROTO;
2334
2335 skb_pull(skb, pull_len);
2336 arg->channel = ev_hdr->channel;
2337 arg->buf_len = ev_hdr->buf_len;
2338 arg->status = ev_hdr->status;
2339 arg->snr = ev_hdr->snr;
2340 arg->phy_mode = ev_hdr->phy_mode;
2341 arg->rate = ev_hdr->rate;
2342
2343 msdu_len = __le32_to_cpu(arg->buf_len);
2344 if (skb->len < msdu_len)
2345 return -EPROTO;
2346
2347 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2348 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2349 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2350 memcpy(&arg->ext_info, ext_info,
2351 sizeof(struct wmi_mgmt_rx_ext_info));
2352 }
2353 /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2354 * trailer with credit update. Trim the excess garbage.
2355 */
2356 skb_trim(skb, msdu_len);
2357
2358 return 0;
2359 }
2360
ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2361 static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2362 struct sk_buff *skb,
2363 struct wmi_mgmt_rx_ev_arg *arg)
2364 {
2365 struct wmi_10_4_mgmt_rx_event *ev;
2366 struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2367 size_t pull_len;
2368 u32 msdu_len;
2369 struct wmi_mgmt_rx_ext_info *ext_info;
2370 u32 len;
2371
2372 ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2373 ev_hdr = &ev->hdr;
2374 pull_len = sizeof(*ev);
2375
2376 if (skb->len < pull_len)
2377 return -EPROTO;
2378
2379 skb_pull(skb, pull_len);
2380 arg->channel = ev_hdr->channel;
2381 arg->buf_len = ev_hdr->buf_len;
2382 arg->status = ev_hdr->status;
2383 arg->snr = ev_hdr->snr;
2384 arg->phy_mode = ev_hdr->phy_mode;
2385 arg->rate = ev_hdr->rate;
2386
2387 msdu_len = __le32_to_cpu(arg->buf_len);
2388 if (skb->len < msdu_len)
2389 return -EPROTO;
2390
2391 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2392 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2393 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2394 memcpy(&arg->ext_info, ext_info,
2395 sizeof(struct wmi_mgmt_rx_ext_info));
2396 }
2397
2398 /* Make sure bytes added for padding are removed. */
2399 skb_trim(skb, msdu_len);
2400
2401 return 0;
2402 }
2403
ath10k_wmi_rx_is_decrypted(struct ath10k * ar,struct ieee80211_hdr * hdr)2404 static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
2405 struct ieee80211_hdr *hdr)
2406 {
2407 if (!ieee80211_has_protected(hdr->frame_control))
2408 return false;
2409
2410 /* FW delivers WEP Shared Auth frame with Protected Bit set and
2411 * encrypted payload. However in case of PMF it delivers decrypted
2412 * frames with Protected Bit set.
2413 */
2414 if (ieee80211_is_auth(hdr->frame_control))
2415 return false;
2416
2417 /* qca99x0 based FW delivers broadcast or multicast management frames
2418 * (ex: group privacy action frames in mesh) as encrypted payload.
2419 */
2420 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
2421 ar->hw_params.sw_decrypt_mcast_mgmt)
2422 return false;
2423
2424 return true;
2425 }
2426
2427 static int
wmi_process_mgmt_tx_comp(struct ath10k * ar,struct mgmt_tx_compl_params * param)2428 wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
2429 {
2430 struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
2431 struct ath10k_wmi *wmi = &ar->wmi;
2432 struct ieee80211_tx_info *info;
2433 struct sk_buff *msdu;
2434 int ret;
2435
2436 spin_lock_bh(&ar->data_lock);
2437
2438 pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
2439 if (!pkt_addr) {
2440 ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
2441 param->desc_id);
2442 ret = -ENOENT;
2443 goto out;
2444 }
2445
2446 msdu = pkt_addr->vaddr;
2447 dma_unmap_single(ar->dev, pkt_addr->paddr,
2448 msdu->len, DMA_TO_DEVICE);
2449 info = IEEE80211_SKB_CB(msdu);
2450 kfree(pkt_addr);
2451
2452 if (param->status) {
2453 info->flags &= ~IEEE80211_TX_STAT_ACK;
2454 } else {
2455 info->flags |= IEEE80211_TX_STAT_ACK;
2456 info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
2457 param->ack_rssi;
2458 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
2459 }
2460
2461 ieee80211_tx_status_irqsafe(ar->hw, msdu);
2462
2463 ret = 0;
2464
2465 out:
2466 idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
2467 spin_unlock_bh(&ar->data_lock);
2468 return ret;
2469 }
2470
ath10k_wmi_event_mgmt_tx_compl(struct ath10k * ar,struct sk_buff * skb)2471 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
2472 {
2473 struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
2474 struct mgmt_tx_compl_params param;
2475 int ret;
2476
2477 ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
2478 if (ret) {
2479 ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
2480 return ret;
2481 }
2482
2483 memset(¶m, 0, sizeof(struct mgmt_tx_compl_params));
2484 param.desc_id = __le32_to_cpu(arg.desc_id);
2485 param.status = __le32_to_cpu(arg.status);
2486
2487 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2488 param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
2489
2490 wmi_process_mgmt_tx_comp(ar, ¶m);
2491
2492 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
2493
2494 return 0;
2495 }
2496
ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k * ar,struct sk_buff * skb)2497 int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
2498 {
2499 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
2500 struct mgmt_tx_compl_params param;
2501 u32 num_reports;
2502 int i, ret;
2503
2504 ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
2505 if (ret) {
2506 ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
2507 return ret;
2508 }
2509
2510 num_reports = __le32_to_cpu(arg.num_reports);
2511
2512 for (i = 0; i < num_reports; i++) {
2513 memset(¶m, 0, sizeof(struct mgmt_tx_compl_params));
2514 param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
2515 param.status = __le32_to_cpu(arg.desc_ids[i]);
2516
2517 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2518 param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
2519 wmi_process_mgmt_tx_comp(ar, ¶m);
2520 }
2521
2522 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
2523
2524 return 0;
2525 }
2526
ath10k_wmi_event_mgmt_rx(struct ath10k * ar,struct sk_buff * skb)2527 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2528 {
2529 struct wmi_mgmt_rx_ev_arg arg = {};
2530 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2531 struct ieee80211_hdr *hdr;
2532 struct ieee80211_supported_band *sband;
2533 u32 rx_status;
2534 u32 channel;
2535 u32 phy_mode;
2536 u32 snr, rssi;
2537 u32 rate;
2538 u16 fc;
2539 int ret, i;
2540
2541 ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
2542 if (ret) {
2543 ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
2544 dev_kfree_skb(skb);
2545 return ret;
2546 }
2547
2548 channel = __le32_to_cpu(arg.channel);
2549 rx_status = __le32_to_cpu(arg.status);
2550 snr = __le32_to_cpu(arg.snr);
2551 phy_mode = __le32_to_cpu(arg.phy_mode);
2552 rate = __le32_to_cpu(arg.rate);
2553
2554 memset(status, 0, sizeof(*status));
2555
2556 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2557 "event mgmt rx status %08x\n", rx_status);
2558
2559 if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
2560 (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
2561 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
2562 dev_kfree_skb(skb);
2563 return 0;
2564 }
2565
2566 if (rx_status & WMI_RX_STATUS_ERR_MIC)
2567 status->flag |= RX_FLAG_MMIC_ERROR;
2568
2569 if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2570 status->mactime =
2571 __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2572 status->flag |= RX_FLAG_MACTIME_END;
2573 }
2574 /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2575 * MODE_11B. This means phy_mode is not a reliable source for the band
2576 * of mgmt rx.
2577 */
2578 if (channel >= 1 && channel <= 14) {
2579 status->band = NL80211_BAND_2GHZ;
2580 } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
2581 status->band = NL80211_BAND_5GHZ;
2582 } else {
2583 /* Shouldn't happen unless list of advertised channels to
2584 * mac80211 has been changed.
2585 */
2586 WARN_ON_ONCE(1);
2587 dev_kfree_skb(skb);
2588 return 0;
2589 }
2590
2591 if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2592 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2593
2594 sband = &ar->mac.sbands[status->band];
2595
2596 status->freq = ieee80211_channel_to_frequency(channel, status->band);
2597 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
2598
2599 BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
2600
2601 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
2602 status->chains &= ~BIT(i);
2603 rssi = __le32_to_cpu(arg.rssi[i]);
2604 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
2605
2606 if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
2607 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
2608 status->chains |= BIT(i);
2609 }
2610 }
2611
2612 status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
2613
2614 hdr = (struct ieee80211_hdr *)skb->data;
2615 fc = le16_to_cpu(hdr->frame_control);
2616
2617 /* Firmware is guaranteed to report all essential management frames via
2618 * WMI while it can deliver some extra via HTT. Since there can be
2619 * duplicates split the reporting wrt monitor/sniffing.
2620 */
2621 status->flag |= RX_FLAG_SKIP_MONITOR;
2622
2623 ath10k_wmi_handle_wep_reauth(ar, skb, status);
2624
2625 if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
2626 status->flag |= RX_FLAG_DECRYPTED;
2627
2628 if (!ieee80211_is_action(hdr->frame_control) &&
2629 !ieee80211_is_deauth(hdr->frame_control) &&
2630 !ieee80211_is_disassoc(hdr->frame_control)) {
2631 status->flag |= RX_FLAG_IV_STRIPPED |
2632 RX_FLAG_MMIC_STRIPPED;
2633 hdr->frame_control = __cpu_to_le16(fc &
2634 ~IEEE80211_FCTL_PROTECTED);
2635 }
2636 }
2637
2638 if (ieee80211_is_beacon(hdr->frame_control))
2639 ath10k_mac_handle_beacon(ar, skb);
2640
2641 if (ieee80211_is_beacon(hdr->frame_control) ||
2642 ieee80211_is_probe_resp(hdr->frame_control))
2643 status->boottime_ns = ktime_get_boottime_ns();
2644
2645 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2646 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
2647 skb, skb->len,
2648 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
2649
2650 ath10k_dbg(ar, ATH10K_DBG_MGMT,
2651 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
2652 status->freq, status->band, status->signal,
2653 status->rate_idx);
2654
2655 ieee80211_rx_ni(ar->hw, skb);
2656
2657 return 0;
2658 }
2659
freq_to_idx(struct ath10k * ar,int freq)2660 static int freq_to_idx(struct ath10k *ar, int freq)
2661 {
2662 struct ieee80211_supported_band *sband;
2663 int band, ch, idx = 0;
2664
2665 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2666 sband = ar->hw->wiphy->bands[band];
2667 if (!sband)
2668 continue;
2669
2670 for (ch = 0; ch < sband->n_channels; ch++, idx++)
2671 if (sband->channels[ch].center_freq == freq)
2672 goto exit;
2673 }
2674
2675 exit:
2676 return idx;
2677 }
2678
ath10k_wmi_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2679 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
2680 struct wmi_ch_info_ev_arg *arg)
2681 {
2682 struct wmi_chan_info_event *ev = (void *)skb->data;
2683
2684 if (skb->len < sizeof(*ev))
2685 return -EPROTO;
2686
2687 skb_pull(skb, sizeof(*ev));
2688 arg->err_code = ev->err_code;
2689 arg->freq = ev->freq;
2690 arg->cmd_flags = ev->cmd_flags;
2691 arg->noise_floor = ev->noise_floor;
2692 arg->rx_clear_count = ev->rx_clear_count;
2693 arg->cycle_count = ev->cycle_count;
2694
2695 return 0;
2696 }
2697
ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2698 static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
2699 struct sk_buff *skb,
2700 struct wmi_ch_info_ev_arg *arg)
2701 {
2702 struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
2703
2704 if (skb->len < sizeof(*ev))
2705 return -EPROTO;
2706
2707 skb_pull(skb, sizeof(*ev));
2708 arg->err_code = ev->err_code;
2709 arg->freq = ev->freq;
2710 arg->cmd_flags = ev->cmd_flags;
2711 arg->noise_floor = ev->noise_floor;
2712 arg->rx_clear_count = ev->rx_clear_count;
2713 arg->cycle_count = ev->cycle_count;
2714 arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
2715 arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
2716 arg->rx_frame_count = ev->rx_frame_count;
2717
2718 return 0;
2719 }
2720
2721 /*
2722 * Handle the channel info event for firmware which only sends one
2723 * chan_info event per scanned channel.
2724 */
ath10k_wmi_event_chan_info_unpaired(struct ath10k * ar,struct chan_info_params * params)2725 static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
2726 struct chan_info_params *params)
2727 {
2728 struct survey_info *survey;
2729 int idx;
2730
2731 if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2732 ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
2733 return;
2734 }
2735
2736 idx = freq_to_idx(ar, params->freq);
2737 if (idx >= ARRAY_SIZE(ar->survey)) {
2738 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2739 params->freq, idx);
2740 return;
2741 }
2742
2743 survey = &ar->survey[idx];
2744
2745 if (!params->mac_clk_mhz)
2746 return;
2747
2748 memset(survey, 0, sizeof(*survey));
2749
2750 survey->noise = params->noise_floor;
2751 survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
2752 survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
2753 survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
2754 SURVEY_INFO_TIME_BUSY;
2755 }
2756
2757 /*
2758 * Handle the channel info event for firmware which sends chan_info
2759 * event in pairs(start and stop events) for every scanned channel.
2760 */
ath10k_wmi_event_chan_info_paired(struct ath10k * ar,struct chan_info_params * params)2761 static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
2762 struct chan_info_params *params)
2763 {
2764 struct survey_info *survey;
2765 int idx;
2766
2767 idx = freq_to_idx(ar, params->freq);
2768 if (idx >= ARRAY_SIZE(ar->survey)) {
2769 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2770 params->freq, idx);
2771 return;
2772 }
2773
2774 if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2775 if (ar->ch_info_can_report_survey) {
2776 survey = &ar->survey[idx];
2777 survey->noise = params->noise_floor;
2778 survey->filled = SURVEY_INFO_NOISE_DBM;
2779
2780 ath10k_hw_fill_survey_time(ar,
2781 survey,
2782 params->cycle_count,
2783 params->rx_clear_count,
2784 ar->survey_last_cycle_count,
2785 ar->survey_last_rx_clear_count);
2786 }
2787
2788 ar->ch_info_can_report_survey = false;
2789 } else {
2790 ar->ch_info_can_report_survey = true;
2791 }
2792
2793 if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
2794 ar->survey_last_rx_clear_count = params->rx_clear_count;
2795 ar->survey_last_cycle_count = params->cycle_count;
2796 }
2797 }
2798
ath10k_wmi_event_chan_info(struct ath10k * ar,struct sk_buff * skb)2799 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
2800 {
2801 struct chan_info_params ch_info_param;
2802 struct wmi_ch_info_ev_arg arg = {};
2803 int ret;
2804
2805 ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
2806 if (ret) {
2807 ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
2808 return;
2809 }
2810
2811 ch_info_param.err_code = __le32_to_cpu(arg.err_code);
2812 ch_info_param.freq = __le32_to_cpu(arg.freq);
2813 ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
2814 ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
2815 ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
2816 ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
2817 ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
2818
2819 ath10k_dbg(ar, ATH10K_DBG_WMI,
2820 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
2821 ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
2822 ch_info_param.noise_floor, ch_info_param.rx_clear_count,
2823 ch_info_param.cycle_count);
2824
2825 spin_lock_bh(&ar->data_lock);
2826
2827 switch (ar->scan.state) {
2828 case ATH10K_SCAN_IDLE:
2829 case ATH10K_SCAN_STARTING:
2830 ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
2831 goto exit;
2832 case ATH10K_SCAN_RUNNING:
2833 case ATH10K_SCAN_ABORTING:
2834 break;
2835 }
2836
2837 if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
2838 ar->running_fw->fw_file.fw_features))
2839 ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
2840 else
2841 ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
2842
2843 exit:
2844 spin_unlock_bh(&ar->data_lock);
2845 }
2846
ath10k_wmi_event_echo(struct ath10k * ar,struct sk_buff * skb)2847 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
2848 {
2849 struct wmi_echo_ev_arg arg = {};
2850 int ret;
2851
2852 ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
2853 if (ret) {
2854 ath10k_warn(ar, "failed to parse echo: %d\n", ret);
2855 return;
2856 }
2857
2858 ath10k_dbg(ar, ATH10K_DBG_WMI,
2859 "wmi event echo value 0x%08x\n",
2860 le32_to_cpu(arg.value));
2861
2862 if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
2863 complete(&ar->wmi.barrier);
2864 }
2865
ath10k_wmi_event_debug_mesg(struct ath10k * ar,struct sk_buff * skb)2866 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
2867 {
2868 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
2869 skb->len);
2870
2871 trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
2872
2873 return 0;
2874 }
2875
ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base * src,struct ath10k_fw_stats_pdev * dst)2876 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
2877 struct ath10k_fw_stats_pdev *dst)
2878 {
2879 dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
2880 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
2881 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
2882 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
2883 dst->cycle_count = __le32_to_cpu(src->cycle_count);
2884 dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
2885 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
2886 }
2887
ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2888 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
2889 struct ath10k_fw_stats_pdev *dst)
2890 {
2891 dst->comp_queued = __le32_to_cpu(src->comp_queued);
2892 dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2893 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2894 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2895 dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2896 dst->local_enqued = __le32_to_cpu(src->local_enqued);
2897 dst->local_freed = __le32_to_cpu(src->local_freed);
2898 dst->hw_queued = __le32_to_cpu(src->hw_queued);
2899 dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2900 dst->underrun = __le32_to_cpu(src->underrun);
2901 dst->tx_abort = __le32_to_cpu(src->tx_abort);
2902 dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2903 dst->tx_ko = __le32_to_cpu(src->tx_ko);
2904 dst->data_rc = __le32_to_cpu(src->data_rc);
2905 dst->self_triggers = __le32_to_cpu(src->self_triggers);
2906 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2907 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2908 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2909 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2910 dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2911 dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2912 dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2913 }
2914
2915 static void
ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2916 ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
2917 struct ath10k_fw_stats_pdev *dst)
2918 {
2919 dst->comp_queued = __le32_to_cpu(src->comp_queued);
2920 dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2921 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2922 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2923 dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2924 dst->local_enqued = __le32_to_cpu(src->local_enqued);
2925 dst->local_freed = __le32_to_cpu(src->local_freed);
2926 dst->hw_queued = __le32_to_cpu(src->hw_queued);
2927 dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2928 dst->underrun = __le32_to_cpu(src->underrun);
2929 dst->tx_abort = __le32_to_cpu(src->tx_abort);
2930 dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2931 dst->tx_ko = __le32_to_cpu(src->tx_ko);
2932 dst->data_rc = __le32_to_cpu(src->data_rc);
2933 dst->self_triggers = __le32_to_cpu(src->self_triggers);
2934 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2935 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2936 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2937 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2938 dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2939 dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2940 dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2941 dst->hw_paused = __le32_to_cpu(src->hw_paused);
2942 dst->seq_posted = __le32_to_cpu(src->seq_posted);
2943 dst->seq_failed_queueing =
2944 __le32_to_cpu(src->seq_failed_queueing);
2945 dst->seq_completed = __le32_to_cpu(src->seq_completed);
2946 dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
2947 dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
2948 dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
2949 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2950 dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
2951 dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
2952 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2953 dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
2954 }
2955
ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx * src,struct ath10k_fw_stats_pdev * dst)2956 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
2957 struct ath10k_fw_stats_pdev *dst)
2958 {
2959 dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
2960 dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
2961 dst->r0_frags = __le32_to_cpu(src->r0_frags);
2962 dst->r1_frags = __le32_to_cpu(src->r1_frags);
2963 dst->r2_frags = __le32_to_cpu(src->r2_frags);
2964 dst->r3_frags = __le32_to_cpu(src->r3_frags);
2965 dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
2966 dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
2967 dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
2968 dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
2969 dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
2970 dst->phy_errs = __le32_to_cpu(src->phy_errs);
2971 dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
2972 dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
2973 }
2974
ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra * src,struct ath10k_fw_stats_pdev * dst)2975 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
2976 struct ath10k_fw_stats_pdev *dst)
2977 {
2978 dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
2979 dst->rts_bad = __le32_to_cpu(src->rts_bad);
2980 dst->rts_good = __le32_to_cpu(src->rts_good);
2981 dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
2982 dst->no_beacons = __le32_to_cpu(src->no_beacons);
2983 dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
2984 }
2985
ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats * src,struct ath10k_fw_stats_peer * dst)2986 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2987 struct ath10k_fw_stats_peer *dst)
2988 {
2989 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2990 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2991 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2992 }
2993
2994 static void
ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats * src,struct ath10k_fw_stats_peer * dst)2995 ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2996 struct ath10k_fw_stats_peer *dst)
2997 {
2998 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2999 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
3000 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
3001 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3002 }
3003
3004 static void
ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd * src,struct ath10k_fw_stats_vdev_extd * dst)3005 ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
3006 struct ath10k_fw_stats_vdev_extd *dst)
3007 {
3008 dst->vdev_id = __le32_to_cpu(src->vdev_id);
3009 dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
3010 dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
3011 dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
3012 dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
3013 dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
3014 dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
3015 dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
3016 dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
3017 dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
3018 dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
3019 dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
3020 dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
3021 dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
3022 dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
3023 dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
3024 }
3025
ath10k_wmi_main_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3026 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
3027 struct sk_buff *skb,
3028 struct ath10k_fw_stats *stats)
3029 {
3030 const struct wmi_stats_event *ev = (void *)skb->data;
3031 u32 num_pdev_stats, num_peer_stats;
3032 int i;
3033
3034 if (!skb_pull(skb, sizeof(*ev)))
3035 return -EPROTO;
3036
3037 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3038 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3039
3040 for (i = 0; i < num_pdev_stats; i++) {
3041 const struct wmi_pdev_stats *src;
3042 struct ath10k_fw_stats_pdev *dst;
3043
3044 src = (void *)skb->data;
3045 if (!skb_pull(skb, sizeof(*src)))
3046 return -EPROTO;
3047
3048 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3049 if (!dst)
3050 continue;
3051
3052 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3053 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3054 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3055
3056 list_add_tail(&dst->list, &stats->pdevs);
3057 }
3058
3059 /* fw doesn't implement vdev stats */
3060
3061 for (i = 0; i < num_peer_stats; i++) {
3062 const struct wmi_peer_stats *src;
3063 struct ath10k_fw_stats_peer *dst;
3064
3065 src = (void *)skb->data;
3066 if (!skb_pull(skb, sizeof(*src)))
3067 return -EPROTO;
3068
3069 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3070 if (!dst)
3071 continue;
3072
3073 ath10k_wmi_pull_peer_stats(src, dst);
3074 list_add_tail(&dst->list, &stats->peers);
3075 }
3076
3077 return 0;
3078 }
3079
ath10k_wmi_10x_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3080 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
3081 struct sk_buff *skb,
3082 struct ath10k_fw_stats *stats)
3083 {
3084 const struct wmi_stats_event *ev = (void *)skb->data;
3085 u32 num_pdev_stats, num_peer_stats;
3086 int i;
3087
3088 if (!skb_pull(skb, sizeof(*ev)))
3089 return -EPROTO;
3090
3091 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3092 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3093
3094 for (i = 0; i < num_pdev_stats; i++) {
3095 const struct wmi_10x_pdev_stats *src;
3096 struct ath10k_fw_stats_pdev *dst;
3097
3098 src = (void *)skb->data;
3099 if (!skb_pull(skb, sizeof(*src)))
3100 return -EPROTO;
3101
3102 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3103 if (!dst)
3104 continue;
3105
3106 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3107 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3108 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3109 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3110
3111 list_add_tail(&dst->list, &stats->pdevs);
3112 }
3113
3114 /* fw doesn't implement vdev stats */
3115
3116 for (i = 0; i < num_peer_stats; i++) {
3117 const struct wmi_10x_peer_stats *src;
3118 struct ath10k_fw_stats_peer *dst;
3119
3120 src = (void *)skb->data;
3121 if (!skb_pull(skb, sizeof(*src)))
3122 return -EPROTO;
3123
3124 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3125 if (!dst)
3126 continue;
3127
3128 ath10k_wmi_pull_peer_stats(&src->old, dst);
3129
3130 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3131
3132 list_add_tail(&dst->list, &stats->peers);
3133 }
3134
3135 return 0;
3136 }
3137
ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3138 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
3139 struct sk_buff *skb,
3140 struct ath10k_fw_stats *stats)
3141 {
3142 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3143 u32 num_pdev_stats;
3144 u32 num_pdev_ext_stats;
3145 u32 num_peer_stats;
3146 int i;
3147
3148 if (!skb_pull(skb, sizeof(*ev)))
3149 return -EPROTO;
3150
3151 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3152 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3153 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3154
3155 for (i = 0; i < num_pdev_stats; i++) {
3156 const struct wmi_10_2_pdev_stats *src;
3157 struct ath10k_fw_stats_pdev *dst;
3158
3159 src = (void *)skb->data;
3160 if (!skb_pull(skb, sizeof(*src)))
3161 return -EPROTO;
3162
3163 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3164 if (!dst)
3165 continue;
3166
3167 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3168 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3169 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3170 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3171 /* FIXME: expose 10.2 specific values */
3172
3173 list_add_tail(&dst->list, &stats->pdevs);
3174 }
3175
3176 for (i = 0; i < num_pdev_ext_stats; i++) {
3177 const struct wmi_10_2_pdev_ext_stats *src;
3178
3179 src = (void *)skb->data;
3180 if (!skb_pull(skb, sizeof(*src)))
3181 return -EPROTO;
3182
3183 /* FIXME: expose values to userspace
3184 *
3185 * Note: Even though this loop seems to do nothing it is
3186 * required to parse following sub-structures properly.
3187 */
3188 }
3189
3190 /* fw doesn't implement vdev stats */
3191
3192 for (i = 0; i < num_peer_stats; i++) {
3193 const struct wmi_10_2_peer_stats *src;
3194 struct ath10k_fw_stats_peer *dst;
3195
3196 src = (void *)skb->data;
3197 if (!skb_pull(skb, sizeof(*src)))
3198 return -EPROTO;
3199
3200 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3201 if (!dst)
3202 continue;
3203
3204 ath10k_wmi_pull_peer_stats(&src->old, dst);
3205
3206 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3207 /* FIXME: expose 10.2 specific values */
3208
3209 list_add_tail(&dst->list, &stats->peers);
3210 }
3211
3212 return 0;
3213 }
3214
ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3215 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
3216 struct sk_buff *skb,
3217 struct ath10k_fw_stats *stats)
3218 {
3219 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3220 u32 num_pdev_stats;
3221 u32 num_pdev_ext_stats;
3222 u32 num_peer_stats;
3223 int i;
3224
3225 if (!skb_pull(skb, sizeof(*ev)))
3226 return -EPROTO;
3227
3228 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3229 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3230 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3231
3232 for (i = 0; i < num_pdev_stats; i++) {
3233 const struct wmi_10_2_pdev_stats *src;
3234 struct ath10k_fw_stats_pdev *dst;
3235
3236 src = (void *)skb->data;
3237 if (!skb_pull(skb, sizeof(*src)))
3238 return -EPROTO;
3239
3240 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3241 if (!dst)
3242 continue;
3243
3244 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3245 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3246 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3247 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3248 /* FIXME: expose 10.2 specific values */
3249
3250 list_add_tail(&dst->list, &stats->pdevs);
3251 }
3252
3253 for (i = 0; i < num_pdev_ext_stats; i++) {
3254 const struct wmi_10_2_pdev_ext_stats *src;
3255
3256 src = (void *)skb->data;
3257 if (!skb_pull(skb, sizeof(*src)))
3258 return -EPROTO;
3259
3260 /* FIXME: expose values to userspace
3261 *
3262 * Note: Even though this loop seems to do nothing it is
3263 * required to parse following sub-structures properly.
3264 */
3265 }
3266
3267 /* fw doesn't implement vdev stats */
3268
3269 for (i = 0; i < num_peer_stats; i++) {
3270 const struct wmi_10_2_4_ext_peer_stats *src;
3271 struct ath10k_fw_stats_peer *dst;
3272 int stats_len;
3273
3274 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
3275 stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
3276 else
3277 stats_len = sizeof(struct wmi_10_2_4_peer_stats);
3278
3279 src = (void *)skb->data;
3280 if (!skb_pull(skb, stats_len))
3281 return -EPROTO;
3282
3283 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3284 if (!dst)
3285 continue;
3286
3287 ath10k_wmi_pull_peer_stats(&src->common.old, dst);
3288
3289 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
3290
3291 if (ath10k_peer_stats_enabled(ar))
3292 dst->rx_duration = __le32_to_cpu(src->rx_duration);
3293 /* FIXME: expose 10.2 specific values */
3294
3295 list_add_tail(&dst->list, &stats->peers);
3296 }
3297
3298 return 0;
3299 }
3300
ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3301 static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
3302 struct sk_buff *skb,
3303 struct ath10k_fw_stats *stats)
3304 {
3305 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3306 u32 num_pdev_stats;
3307 u32 num_pdev_ext_stats;
3308 u32 num_vdev_stats;
3309 u32 num_peer_stats;
3310 u32 num_bcnflt_stats;
3311 u32 stats_id;
3312 int i;
3313
3314 if (!skb_pull(skb, sizeof(*ev)))
3315 return -EPROTO;
3316
3317 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3318 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3319 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
3320 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3321 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
3322 stats_id = __le32_to_cpu(ev->stats_id);
3323
3324 for (i = 0; i < num_pdev_stats; i++) {
3325 const struct wmi_10_4_pdev_stats *src;
3326 struct ath10k_fw_stats_pdev *dst;
3327
3328 src = (void *)skb->data;
3329 if (!skb_pull(skb, sizeof(*src)))
3330 return -EPROTO;
3331
3332 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3333 if (!dst)
3334 continue;
3335
3336 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3337 ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
3338 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3339 dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
3340 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3341
3342 list_add_tail(&dst->list, &stats->pdevs);
3343 }
3344
3345 for (i = 0; i < num_pdev_ext_stats; i++) {
3346 const struct wmi_10_2_pdev_ext_stats *src;
3347
3348 src = (void *)skb->data;
3349 if (!skb_pull(skb, sizeof(*src)))
3350 return -EPROTO;
3351
3352 /* FIXME: expose values to userspace
3353 *
3354 * Note: Even though this loop seems to do nothing it is
3355 * required to parse following sub-structures properly.
3356 */
3357 }
3358
3359 for (i = 0; i < num_vdev_stats; i++) {
3360 const struct wmi_vdev_stats *src;
3361
3362 /* Ignore vdev stats here as it has only vdev id. Actual vdev
3363 * stats will be retrieved from vdev extended stats.
3364 */
3365 src = (void *)skb->data;
3366 if (!skb_pull(skb, sizeof(*src)))
3367 return -EPROTO;
3368 }
3369
3370 for (i = 0; i < num_peer_stats; i++) {
3371 const struct wmi_10_4_peer_stats *src;
3372 struct ath10k_fw_stats_peer *dst;
3373
3374 src = (void *)skb->data;
3375 if (!skb_pull(skb, sizeof(*src)))
3376 return -EPROTO;
3377
3378 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3379 if (!dst)
3380 continue;
3381
3382 ath10k_wmi_10_4_pull_peer_stats(src, dst);
3383 list_add_tail(&dst->list, &stats->peers);
3384 }
3385
3386 for (i = 0; i < num_bcnflt_stats; i++) {
3387 const struct wmi_10_4_bss_bcn_filter_stats *src;
3388
3389 src = (void *)skb->data;
3390 if (!skb_pull(skb, sizeof(*src)))
3391 return -EPROTO;
3392
3393 /* FIXME: expose values to userspace
3394 *
3395 * Note: Even though this loop seems to do nothing it is
3396 * required to parse following sub-structures properly.
3397 */
3398 }
3399
3400 if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
3401 stats->extended = true;
3402
3403 for (i = 0; i < num_peer_stats; i++) {
3404 const struct wmi_10_4_peer_extd_stats *src;
3405 struct ath10k_fw_extd_stats_peer *dst;
3406
3407 src = (void *)skb->data;
3408 if (!skb_pull(skb, sizeof(*src)))
3409 return -EPROTO;
3410
3411 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3412 if (!dst)
3413 continue;
3414
3415 ether_addr_copy(dst->peer_macaddr,
3416 src->peer_macaddr.addr);
3417 dst->rx_duration = __le32_to_cpu(src->rx_duration);
3418 list_add_tail(&dst->list, &stats->peers_extd);
3419 }
3420 }
3421
3422 if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
3423 for (i = 0; i < num_vdev_stats; i++) {
3424 const struct wmi_vdev_stats_extd *src;
3425 struct ath10k_fw_stats_vdev_extd *dst;
3426
3427 src = (void *)skb->data;
3428 if (!skb_pull(skb, sizeof(*src)))
3429 return -EPROTO;
3430
3431 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3432 if (!dst)
3433 continue;
3434 ath10k_wmi_10_4_pull_vdev_stats(src, dst);
3435 list_add_tail(&dst->list, &stats->vdevs);
3436 }
3437 }
3438
3439 return 0;
3440 }
3441
ath10k_wmi_event_update_stats(struct ath10k * ar,struct sk_buff * skb)3442 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
3443 {
3444 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
3445 ath10k_debug_fw_stats_process(ar, skb);
3446 }
3447
3448 static int
ath10k_wmi_op_pull_vdev_start_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_vdev_start_ev_arg * arg)3449 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
3450 struct wmi_vdev_start_ev_arg *arg)
3451 {
3452 struct wmi_vdev_start_response_event *ev = (void *)skb->data;
3453
3454 if (skb->len < sizeof(*ev))
3455 return -EPROTO;
3456
3457 skb_pull(skb, sizeof(*ev));
3458 arg->vdev_id = ev->vdev_id;
3459 arg->req_id = ev->req_id;
3460 arg->resp_type = ev->resp_type;
3461 arg->status = ev->status;
3462
3463 return 0;
3464 }
3465
ath10k_wmi_event_vdev_start_resp(struct ath10k * ar,struct sk_buff * skb)3466 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
3467 {
3468 struct wmi_vdev_start_ev_arg arg = {};
3469 int ret;
3470 u32 status;
3471
3472 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
3473
3474 ar->last_wmi_vdev_start_status = 0;
3475
3476 ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
3477 if (ret) {
3478 ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
3479 ar->last_wmi_vdev_start_status = ret;
3480 goto out;
3481 }
3482
3483 status = __le32_to_cpu(arg.status);
3484 if (WARN_ON_ONCE(status)) {
3485 ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
3486 status, (status == WMI_VDEV_START_CHAN_INVALID) ?
3487 "chan-invalid" : "unknown");
3488 /* Setup is done one way or another though, so we should still
3489 * do the completion, so don't return here.
3490 */
3491 ar->last_wmi_vdev_start_status = -EINVAL;
3492 }
3493
3494 out:
3495 complete(&ar->vdev_setup_done);
3496 }
3497
ath10k_wmi_event_vdev_stopped(struct ath10k * ar,struct sk_buff * skb)3498 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
3499 {
3500 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
3501 complete(&ar->vdev_setup_done);
3502 }
3503
3504 static int
ath10k_wmi_op_pull_peer_kick_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_peer_kick_ev_arg * arg)3505 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
3506 struct wmi_peer_kick_ev_arg *arg)
3507 {
3508 struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
3509
3510 if (skb->len < sizeof(*ev))
3511 return -EPROTO;
3512
3513 skb_pull(skb, sizeof(*ev));
3514 arg->mac_addr = ev->peer_macaddr.addr;
3515
3516 return 0;
3517 }
3518
ath10k_wmi_event_peer_sta_kickout(struct ath10k * ar,struct sk_buff * skb)3519 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
3520 {
3521 struct wmi_peer_kick_ev_arg arg = {};
3522 struct ieee80211_sta *sta;
3523 int ret;
3524
3525 ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
3526 if (ret) {
3527 ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
3528 ret);
3529 return;
3530 }
3531
3532 ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
3533 arg.mac_addr);
3534
3535 rcu_read_lock();
3536
3537 sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
3538 if (!sta) {
3539 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
3540 arg.mac_addr);
3541 goto exit;
3542 }
3543
3544 ieee80211_report_low_ack(sta, 10);
3545
3546 exit:
3547 rcu_read_unlock();
3548 }
3549
3550 /*
3551 * FIXME
3552 *
3553 * We don't report to mac80211 sleep state of connected
3554 * stations. Due to this mac80211 can't fill in TIM IE
3555 * correctly.
3556 *
3557 * I know of no way of getting nullfunc frames that contain
3558 * sleep transition from connected stations - these do not
3559 * seem to be sent from the target to the host. There also
3560 * doesn't seem to be a dedicated event for that. So the
3561 * only way left to do this would be to read tim_bitmap
3562 * during SWBA.
3563 *
3564 * We could probably try using tim_bitmap from SWBA to tell
3565 * mac80211 which stations are asleep and which are not. The
3566 * problem here is calling mac80211 functions so many times
3567 * could take too long and make us miss the time to submit
3568 * the beacon to the target.
3569 *
3570 * So as a workaround we try to extend the TIM IE if there
3571 * is unicast buffered for stations with aid > 7 and fill it
3572 * in ourselves.
3573 */
ath10k_wmi_update_tim(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_tim_info_arg * tim_info)3574 static void ath10k_wmi_update_tim(struct ath10k *ar,
3575 struct ath10k_vif *arvif,
3576 struct sk_buff *bcn,
3577 const struct wmi_tim_info_arg *tim_info)
3578 {
3579 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
3580 struct ieee80211_tim_ie *tim;
3581 u8 *ies, *ie;
3582 u8 ie_len, pvm_len;
3583 __le32 t;
3584 u32 v, tim_len;
3585
3586 /* When FW reports 0 in tim_len, ensure at least first byte
3587 * in tim_bitmap is considered for pvm calculation.
3588 */
3589 tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
3590
3591 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
3592 * we must copy the bitmap upon change and reuse it later
3593 */
3594 if (__le32_to_cpu(tim_info->tim_changed)) {
3595 int i;
3596
3597 if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
3598 ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
3599 tim_len, sizeof(arvif->u.ap.tim_bitmap));
3600 tim_len = sizeof(arvif->u.ap.tim_bitmap);
3601 }
3602
3603 for (i = 0; i < tim_len; i++) {
3604 t = tim_info->tim_bitmap[i / 4];
3605 v = __le32_to_cpu(t);
3606 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
3607 }
3608
3609 /* FW reports either length 0 or length based on max supported
3610 * station. so we calculate this on our own
3611 */
3612 arvif->u.ap.tim_len = 0;
3613 for (i = 0; i < tim_len; i++)
3614 if (arvif->u.ap.tim_bitmap[i])
3615 arvif->u.ap.tim_len = i;
3616
3617 arvif->u.ap.tim_len++;
3618 }
3619
3620 ies = bcn->data;
3621 ies += ieee80211_hdrlen(hdr->frame_control);
3622 ies += 12; /* fixed parameters */
3623
3624 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
3625 (u8 *)skb_tail_pointer(bcn) - ies);
3626 if (!ie) {
3627 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
3628 ath10k_warn(ar, "no tim ie found;\n");
3629 return;
3630 }
3631
3632 tim = (void *)ie + 2;
3633 ie_len = ie[1];
3634 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
3635
3636 if (pvm_len < arvif->u.ap.tim_len) {
3637 int expand_size = tim_len - pvm_len;
3638 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
3639 void *next_ie = ie + 2 + ie_len;
3640
3641 if (skb_put(bcn, expand_size)) {
3642 memmove(next_ie + expand_size, next_ie, move_size);
3643
3644 ie[1] += expand_size;
3645 ie_len += expand_size;
3646 pvm_len += expand_size;
3647 } else {
3648 ath10k_warn(ar, "tim expansion failed\n");
3649 }
3650 }
3651
3652 if (pvm_len > tim_len) {
3653 ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
3654 return;
3655 }
3656
3657 tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
3658 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
3659
3660 if (tim->dtim_count == 0) {
3661 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
3662
3663 if (__le32_to_cpu(tim_info->tim_mcast) == 1)
3664 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
3665 }
3666
3667 ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
3668 tim->dtim_count, tim->dtim_period,
3669 tim->bitmap_ctrl, pvm_len);
3670 }
3671
ath10k_wmi_update_noa(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_p2p_noa_info * noa)3672 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
3673 struct sk_buff *bcn,
3674 const struct wmi_p2p_noa_info *noa)
3675 {
3676 if (!arvif->vif->p2p)
3677 return;
3678
3679 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
3680
3681 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
3682 ath10k_p2p_noa_update(arvif, noa);
3683
3684 if (arvif->u.ap.noa_data)
3685 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
3686 skb_put_data(bcn, arvif->u.ap.noa_data,
3687 arvif->u.ap.noa_len);
3688 }
3689
ath10k_wmi_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3690 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
3691 struct wmi_swba_ev_arg *arg)
3692 {
3693 struct wmi_host_swba_event *ev = (void *)skb->data;
3694 u32 map;
3695 size_t i;
3696
3697 if (skb->len < sizeof(*ev))
3698 return -EPROTO;
3699
3700 skb_pull(skb, sizeof(*ev));
3701 arg->vdev_map = ev->vdev_map;
3702
3703 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3704 if (!(map & BIT(0)))
3705 continue;
3706
3707 /* If this happens there were some changes in firmware and
3708 * ath10k should update the max size of tim_info array.
3709 */
3710 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3711 break;
3712
3713 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3714 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3715 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3716 return -EPROTO;
3717 }
3718
3719 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3720 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3721 arg->tim_info[i].tim_bitmap =
3722 ev->bcn_info[i].tim_info.tim_bitmap;
3723 arg->tim_info[i].tim_changed =
3724 ev->bcn_info[i].tim_info.tim_changed;
3725 arg->tim_info[i].tim_num_ps_pending =
3726 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3727
3728 arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
3729 i++;
3730 }
3731
3732 return 0;
3733 }
3734
ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3735 static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
3736 struct sk_buff *skb,
3737 struct wmi_swba_ev_arg *arg)
3738 {
3739 struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
3740 u32 map;
3741 size_t i;
3742
3743 if (skb->len < sizeof(*ev))
3744 return -EPROTO;
3745
3746 skb_pull(skb, sizeof(*ev));
3747 arg->vdev_map = ev->vdev_map;
3748
3749 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3750 if (!(map & BIT(0)))
3751 continue;
3752
3753 /* If this happens there were some changes in firmware and
3754 * ath10k should update the max size of tim_info array.
3755 */
3756 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3757 break;
3758
3759 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3760 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3761 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3762 return -EPROTO;
3763 }
3764
3765 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3766 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3767 arg->tim_info[i].tim_bitmap =
3768 ev->bcn_info[i].tim_info.tim_bitmap;
3769 arg->tim_info[i].tim_changed =
3770 ev->bcn_info[i].tim_info.tim_changed;
3771 arg->tim_info[i].tim_num_ps_pending =
3772 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3773 i++;
3774 }
3775
3776 return 0;
3777 }
3778
ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3779 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
3780 struct sk_buff *skb,
3781 struct wmi_swba_ev_arg *arg)
3782 {
3783 struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
3784 u32 map, tim_len;
3785 size_t i;
3786
3787 if (skb->len < sizeof(*ev))
3788 return -EPROTO;
3789
3790 skb_pull(skb, sizeof(*ev));
3791 arg->vdev_map = ev->vdev_map;
3792
3793 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3794 if (!(map & BIT(0)))
3795 continue;
3796
3797 /* If this happens there were some changes in firmware and
3798 * ath10k should update the max size of tim_info array.
3799 */
3800 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3801 break;
3802
3803 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3804 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3805 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3806 return -EPROTO;
3807 }
3808
3809 tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
3810 if (tim_len) {
3811 /* Exclude 4 byte guard length */
3812 tim_len -= 4;
3813 arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
3814 } else {
3815 arg->tim_info[i].tim_len = 0;
3816 }
3817
3818 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3819 arg->tim_info[i].tim_bitmap =
3820 ev->bcn_info[i].tim_info.tim_bitmap;
3821 arg->tim_info[i].tim_changed =
3822 ev->bcn_info[i].tim_info.tim_changed;
3823 arg->tim_info[i].tim_num_ps_pending =
3824 ev->bcn_info[i].tim_info.tim_num_ps_pending;
3825
3826 /* 10.4 firmware doesn't have p2p support. notice of absence
3827 * info can be ignored for now.
3828 */
3829
3830 i++;
3831 }
3832
3833 return 0;
3834 }
3835
ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k * ar)3836 static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
3837 {
3838 return WMI_TXBF_CONF_BEFORE_ASSOC;
3839 }
3840
ath10k_wmi_event_host_swba(struct ath10k * ar,struct sk_buff * skb)3841 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3842 {
3843 struct wmi_swba_ev_arg arg = {};
3844 u32 map;
3845 int i = -1;
3846 const struct wmi_tim_info_arg *tim_info;
3847 const struct wmi_p2p_noa_info *noa_info;
3848 struct ath10k_vif *arvif;
3849 struct sk_buff *bcn;
3850 dma_addr_t paddr;
3851 int ret, vdev_id = 0;
3852
3853 ret = ath10k_wmi_pull_swba(ar, skb, &arg);
3854 if (ret) {
3855 ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
3856 return;
3857 }
3858
3859 map = __le32_to_cpu(arg.vdev_map);
3860
3861 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
3862 map);
3863
3864 for (; map; map >>= 1, vdev_id++) {
3865 if (!(map & 0x1))
3866 continue;
3867
3868 i++;
3869
3870 if (i >= WMI_MAX_AP_VDEV) {
3871 ath10k_warn(ar, "swba has corrupted vdev map\n");
3872 break;
3873 }
3874
3875 tim_info = &arg.tim_info[i];
3876 noa_info = arg.noa_info[i];
3877
3878 ath10k_dbg(ar, ATH10K_DBG_MGMT,
3879 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
3880 i,
3881 __le32_to_cpu(tim_info->tim_len),
3882 __le32_to_cpu(tim_info->tim_mcast),
3883 __le32_to_cpu(tim_info->tim_changed),
3884 __le32_to_cpu(tim_info->tim_num_ps_pending),
3885 __le32_to_cpu(tim_info->tim_bitmap[3]),
3886 __le32_to_cpu(tim_info->tim_bitmap[2]),
3887 __le32_to_cpu(tim_info->tim_bitmap[1]),
3888 __le32_to_cpu(tim_info->tim_bitmap[0]));
3889
3890 /* TODO: Only first 4 word from tim_bitmap is dumped.
3891 * Extend debug code to dump full tim_bitmap.
3892 */
3893
3894 arvif = ath10k_get_arvif(ar, vdev_id);
3895 if (arvif == NULL) {
3896 ath10k_warn(ar, "no vif for vdev_id %d found\n",
3897 vdev_id);
3898 continue;
3899 }
3900
3901 /* mac80211 would have already asked us to stop beaconing and
3902 * bring the vdev down, so continue in that case
3903 */
3904 if (!arvif->is_up)
3905 continue;
3906
3907 /* There are no completions for beacons so wait for next SWBA
3908 * before telling mac80211 to decrement CSA counter
3909 *
3910 * Once CSA counter is completed stop sending beacons until
3911 * actual channel switch is done
3912 */
3913 if (arvif->vif->bss_conf.csa_active &&
3914 ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
3915 ieee80211_csa_finish(arvif->vif, 0);
3916 continue;
3917 }
3918
3919 bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
3920 if (!bcn) {
3921 ath10k_warn(ar, "could not get mac80211 beacon\n");
3922 continue;
3923 }
3924
3925 ath10k_tx_h_seq_no(arvif->vif, bcn);
3926 ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
3927 ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
3928
3929 spin_lock_bh(&ar->data_lock);
3930
3931 if (arvif->beacon) {
3932 switch (arvif->beacon_state) {
3933 case ATH10K_BEACON_SENT:
3934 break;
3935 case ATH10K_BEACON_SCHEDULED:
3936 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
3937 arvif->vdev_id);
3938 break;
3939 case ATH10K_BEACON_SENDING:
3940 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
3941 arvif->vdev_id);
3942 dev_kfree_skb(bcn);
3943 goto skip;
3944 }
3945
3946 ath10k_mac_vif_beacon_free(arvif);
3947 }
3948
3949 if (!arvif->beacon_buf) {
3950 paddr = dma_map_single(arvif->ar->dev, bcn->data,
3951 bcn->len, DMA_TO_DEVICE);
3952 ret = dma_mapping_error(arvif->ar->dev, paddr);
3953 if (ret) {
3954 ath10k_warn(ar, "failed to map beacon: %d\n",
3955 ret);
3956 dev_kfree_skb_any(bcn);
3957 goto skip;
3958 }
3959
3960 ATH10K_SKB_CB(bcn)->paddr = paddr;
3961 } else {
3962 if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
3963 ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
3964 bcn->len, IEEE80211_MAX_FRAME_LEN);
3965 skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
3966 }
3967 memcpy(arvif->beacon_buf, bcn->data, bcn->len);
3968 ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
3969 }
3970
3971 arvif->beacon = bcn;
3972 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
3973
3974 trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
3975 trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
3976
3977 skip:
3978 spin_unlock_bh(&ar->data_lock);
3979 }
3980
3981 ath10k_wmi_tx_beacons_nowait(ar);
3982 }
3983
ath10k_wmi_event_tbttoffset_update(struct ath10k * ar,struct sk_buff * skb)3984 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
3985 {
3986 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
3987 }
3988
ath10k_radar_detected(struct ath10k * ar)3989 static void ath10k_radar_detected(struct ath10k *ar)
3990 {
3991 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
3992 ATH10K_DFS_STAT_INC(ar, radar_detected);
3993
3994 /* Control radar events reporting in debugfs file
3995 * dfs_block_radar_events
3996 */
3997 if (ar->dfs_block_radar_events)
3998 ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
3999 else
4000 ieee80211_radar_detected(ar->hw, NULL);
4001 }
4002
ath10k_radar_confirmation_work(struct work_struct * work)4003 static void ath10k_radar_confirmation_work(struct work_struct *work)
4004 {
4005 struct ath10k *ar = container_of(work, struct ath10k,
4006 radar_confirmation_work);
4007 struct ath10k_radar_found_info radar_info;
4008 int ret, time_left;
4009
4010 reinit_completion(&ar->wmi.radar_confirm);
4011
4012 spin_lock_bh(&ar->data_lock);
4013 memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
4014 spin_unlock_bh(&ar->data_lock);
4015
4016 ret = ath10k_wmi_report_radar_found(ar, &radar_info);
4017 if (ret) {
4018 ath10k_warn(ar, "failed to send radar found %d\n", ret);
4019 goto wait_complete;
4020 }
4021
4022 time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
4023 ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
4024 if (time_left) {
4025 /* DFS Confirmation status event received and
4026 * necessary action completed.
4027 */
4028 goto wait_complete;
4029 } else {
4030 /* DFS Confirmation event not received from FW.Considering this
4031 * as real radar.
4032 */
4033 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4034 "dfs confirmation not received from fw, considering as radar\n");
4035 goto radar_detected;
4036 }
4037
4038 radar_detected:
4039 ath10k_radar_detected(ar);
4040
4041 /* Reset state to allow sending confirmation on consecutive radar
4042 * detections, unless radar confirmation is disabled/stopped.
4043 */
4044 wait_complete:
4045 spin_lock_bh(&ar->data_lock);
4046 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
4047 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
4048 spin_unlock_bh(&ar->data_lock);
4049 }
4050
ath10k_dfs_radar_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_radar_report * rr,u64 tsf)4051 static void ath10k_dfs_radar_report(struct ath10k *ar,
4052 struct wmi_phyerr_ev_arg *phyerr,
4053 const struct phyerr_radar_report *rr,
4054 u64 tsf)
4055 {
4056 u32 reg0, reg1, tsf32l;
4057 struct ieee80211_channel *ch;
4058 struct pulse_event pe;
4059 struct radar_detector_specs rs;
4060 u64 tsf64;
4061 u8 rssi, width;
4062 struct ath10k_radar_found_info *radar_info;
4063
4064 reg0 = __le32_to_cpu(rr->reg0);
4065 reg1 = __le32_to_cpu(rr->reg1);
4066
4067 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4068 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
4069 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
4070 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
4071 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
4072 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
4073 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4074 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
4075 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
4076 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
4077 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
4078 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
4079 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
4080 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4081 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
4082 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
4083 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
4084
4085 if (!ar->dfs_detector)
4086 return;
4087
4088 spin_lock_bh(&ar->data_lock);
4089 ch = ar->rx_channel;
4090
4091 /* fetch target operating channel during channel change */
4092 if (!ch)
4093 ch = ar->tgt_oper_chan;
4094
4095 spin_unlock_bh(&ar->data_lock);
4096
4097 if (!ch) {
4098 ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
4099 goto radar_detected;
4100 }
4101
4102 /* report event to DFS pattern detector */
4103 tsf32l = phyerr->tsf_timestamp;
4104 tsf64 = tsf & (~0xFFFFFFFFULL);
4105 tsf64 |= tsf32l;
4106
4107 width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
4108 rssi = phyerr->rssi_combined;
4109
4110 /* hardware store this as 8 bit signed value,
4111 * set to zero if negative number
4112 */
4113 if (rssi & 0x80)
4114 rssi = 0;
4115
4116 pe.ts = tsf64;
4117 pe.freq = ch->center_freq;
4118 pe.width = width;
4119 pe.rssi = rssi;
4120 pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
4121 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4122 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
4123 pe.freq, pe.width, pe.rssi, pe.ts);
4124
4125 ATH10K_DFS_STAT_INC(ar, pulses_detected);
4126
4127 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
4128 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4129 "dfs no pulse pattern detected, yet\n");
4130 return;
4131 }
4132
4133 if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
4134 ar->dfs_detector->region == NL80211_DFS_FCC) {
4135 /* Consecutive radar indications need not be
4136 * sent to the firmware until we get confirmation
4137 * for the previous detected radar.
4138 */
4139 spin_lock_bh(&ar->data_lock);
4140 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
4141 spin_unlock_bh(&ar->data_lock);
4142 return;
4143 }
4144 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
4145 radar_info = &ar->last_radar_info;
4146
4147 radar_info->pri_min = rs.pri_min;
4148 radar_info->pri_max = rs.pri_max;
4149 radar_info->width_min = rs.width_min;
4150 radar_info->width_max = rs.width_max;
4151 /*TODO Find sidx_min and sidx_max */
4152 radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4153 radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4154
4155 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4156 "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
4157 radar_info->pri_min, radar_info->pri_max,
4158 radar_info->width_min, radar_info->width_max,
4159 radar_info->sidx_min, radar_info->sidx_max);
4160 ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
4161 spin_unlock_bh(&ar->data_lock);
4162 return;
4163 }
4164
4165 radar_detected:
4166 ath10k_radar_detected(ar);
4167 }
4168
ath10k_dfs_fft_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_fft_report * fftr,u64 tsf)4169 static int ath10k_dfs_fft_report(struct ath10k *ar,
4170 struct wmi_phyerr_ev_arg *phyerr,
4171 const struct phyerr_fft_report *fftr,
4172 u64 tsf)
4173 {
4174 u32 reg0, reg1;
4175 u8 rssi, peak_mag;
4176
4177 reg0 = __le32_to_cpu(fftr->reg0);
4178 reg1 = __le32_to_cpu(fftr->reg1);
4179 rssi = phyerr->rssi_combined;
4180
4181 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4182 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
4183 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
4184 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
4185 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
4186 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
4187 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4188 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
4189 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
4190 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
4191 MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
4192 MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
4193
4194 peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
4195
4196 /* false event detection */
4197 if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
4198 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
4199 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
4200 ATH10K_DFS_STAT_INC(ar, pulses_discarded);
4201 return -EINVAL;
4202 }
4203
4204 return 0;
4205 }
4206
ath10k_wmi_event_dfs(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4207 void ath10k_wmi_event_dfs(struct ath10k *ar,
4208 struct wmi_phyerr_ev_arg *phyerr,
4209 u64 tsf)
4210 {
4211 int buf_len, tlv_len, res, i = 0;
4212 const struct phyerr_tlv *tlv;
4213 const struct phyerr_radar_report *rr;
4214 const struct phyerr_fft_report *fftr;
4215 const u8 *tlv_buf;
4216
4217 buf_len = phyerr->buf_len;
4218 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4219 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
4220 phyerr->phy_err_code, phyerr->rssi_combined,
4221 phyerr->tsf_timestamp, tsf, buf_len);
4222
4223 /* Skip event if DFS disabled */
4224 if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
4225 return;
4226
4227 ATH10K_DFS_STAT_INC(ar, pulses_total);
4228
4229 while (i < buf_len) {
4230 if (i + sizeof(*tlv) > buf_len) {
4231 ath10k_warn(ar, "too short buf for tlv header (%d)\n",
4232 i);
4233 return;
4234 }
4235
4236 tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4237 tlv_len = __le16_to_cpu(tlv->len);
4238 tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4239 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4240 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
4241 tlv_len, tlv->tag, tlv->sig);
4242
4243 switch (tlv->tag) {
4244 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
4245 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
4246 ath10k_warn(ar, "too short radar pulse summary (%d)\n",
4247 i);
4248 return;
4249 }
4250
4251 rr = (struct phyerr_radar_report *)tlv_buf;
4252 ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
4253 break;
4254 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4255 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
4256 ath10k_warn(ar, "too short fft report (%d)\n",
4257 i);
4258 return;
4259 }
4260
4261 fftr = (struct phyerr_fft_report *)tlv_buf;
4262 res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
4263 if (res)
4264 return;
4265 break;
4266 }
4267
4268 i += sizeof(*tlv) + tlv_len;
4269 }
4270 }
4271
ath10k_wmi_event_spectral_scan(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4272 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4273 struct wmi_phyerr_ev_arg *phyerr,
4274 u64 tsf)
4275 {
4276 int buf_len, tlv_len, res, i = 0;
4277 struct phyerr_tlv *tlv;
4278 const void *tlv_buf;
4279 const struct phyerr_fft_report *fftr;
4280 size_t fftr_len;
4281
4282 buf_len = phyerr->buf_len;
4283
4284 while (i < buf_len) {
4285 if (i + sizeof(*tlv) > buf_len) {
4286 ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
4287 i);
4288 return;
4289 }
4290
4291 tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4292 tlv_len = __le16_to_cpu(tlv->len);
4293 tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4294
4295 if (i + sizeof(*tlv) + tlv_len > buf_len) {
4296 ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
4297 i);
4298 return;
4299 }
4300
4301 switch (tlv->tag) {
4302 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4303 if (sizeof(*fftr) > tlv_len) {
4304 ath10k_warn(ar, "failed to parse fft report at byte %d\n",
4305 i);
4306 return;
4307 }
4308
4309 fftr_len = tlv_len - sizeof(*fftr);
4310 fftr = tlv_buf;
4311 res = ath10k_spectral_process_fft(ar, phyerr,
4312 fftr, fftr_len,
4313 tsf);
4314 if (res < 0) {
4315 ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
4316 res);
4317 return;
4318 }
4319 break;
4320 }
4321
4322 i += sizeof(*tlv) + tlv_len;
4323 }
4324 }
4325
ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4326 static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4327 struct sk_buff *skb,
4328 struct wmi_phyerr_hdr_arg *arg)
4329 {
4330 struct wmi_phyerr_event *ev = (void *)skb->data;
4331
4332 if (skb->len < sizeof(*ev))
4333 return -EPROTO;
4334
4335 arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
4336 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4337 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4338 arg->buf_len = skb->len - sizeof(*ev);
4339 arg->phyerrs = ev->phyerrs;
4340
4341 return 0;
4342 }
4343
ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4344 static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4345 struct sk_buff *skb,
4346 struct wmi_phyerr_hdr_arg *arg)
4347 {
4348 struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
4349
4350 if (skb->len < sizeof(*ev))
4351 return -EPROTO;
4352
4353 /* 10.4 firmware always reports only one phyerr */
4354 arg->num_phyerrs = 1;
4355
4356 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4357 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4358 arg->buf_len = skb->len;
4359 arg->phyerrs = skb->data;
4360
4361 return 0;
4362 }
4363
ath10k_wmi_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4364 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
4365 const void *phyerr_buf,
4366 int left_len,
4367 struct wmi_phyerr_ev_arg *arg)
4368 {
4369 const struct wmi_phyerr *phyerr = phyerr_buf;
4370 int i;
4371
4372 if (left_len < sizeof(*phyerr)) {
4373 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4374 left_len, sizeof(*phyerr));
4375 return -EINVAL;
4376 }
4377
4378 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4379 arg->freq1 = __le16_to_cpu(phyerr->freq1);
4380 arg->freq2 = __le16_to_cpu(phyerr->freq2);
4381 arg->rssi_combined = phyerr->rssi_combined;
4382 arg->chan_width_mhz = phyerr->chan_width_mhz;
4383 arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4384 arg->buf = phyerr->buf;
4385 arg->hdr_len = sizeof(*phyerr);
4386
4387 for (i = 0; i < 4; i++)
4388 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4389
4390 switch (phyerr->phy_err_code) {
4391 case PHY_ERROR_GEN_SPECTRAL_SCAN:
4392 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4393 break;
4394 case PHY_ERROR_GEN_FALSE_RADAR_EXT:
4395 arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
4396 break;
4397 case PHY_ERROR_GEN_RADAR:
4398 arg->phy_err_code = PHY_ERROR_RADAR;
4399 break;
4400 default:
4401 arg->phy_err_code = PHY_ERROR_UNKNOWN;
4402 break;
4403 }
4404
4405 return 0;
4406 }
4407
ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4408 static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
4409 const void *phyerr_buf,
4410 int left_len,
4411 struct wmi_phyerr_ev_arg *arg)
4412 {
4413 const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
4414 u32 phy_err_mask;
4415 int i;
4416
4417 if (left_len < sizeof(*phyerr)) {
4418 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4419 left_len, sizeof(*phyerr));
4420 return -EINVAL;
4421 }
4422
4423 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4424 arg->freq1 = __le16_to_cpu(phyerr->freq1);
4425 arg->freq2 = __le16_to_cpu(phyerr->freq2);
4426 arg->rssi_combined = phyerr->rssi_combined;
4427 arg->chan_width_mhz = phyerr->chan_width_mhz;
4428 arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4429 arg->buf = phyerr->buf;
4430 arg->hdr_len = sizeof(*phyerr);
4431
4432 for (i = 0; i < 4; i++)
4433 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4434
4435 phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
4436
4437 if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
4438 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4439 else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
4440 arg->phy_err_code = PHY_ERROR_RADAR;
4441 else
4442 arg->phy_err_code = PHY_ERROR_UNKNOWN;
4443
4444 return 0;
4445 }
4446
ath10k_wmi_event_phyerr(struct ath10k * ar,struct sk_buff * skb)4447 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
4448 {
4449 struct wmi_phyerr_hdr_arg hdr_arg = {};
4450 struct wmi_phyerr_ev_arg phyerr_arg = {};
4451 const void *phyerr;
4452 u32 count, i, buf_len, phy_err_code;
4453 u64 tsf;
4454 int left_len, ret;
4455
4456 ATH10K_DFS_STAT_INC(ar, phy_errors);
4457
4458 ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
4459 if (ret) {
4460 ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
4461 return;
4462 }
4463
4464 /* Check number of included events */
4465 count = hdr_arg.num_phyerrs;
4466
4467 left_len = hdr_arg.buf_len;
4468
4469 tsf = hdr_arg.tsf_u32;
4470 tsf <<= 32;
4471 tsf |= hdr_arg.tsf_l32;
4472
4473 ath10k_dbg(ar, ATH10K_DBG_WMI,
4474 "wmi event phyerr count %d tsf64 0x%llX\n",
4475 count, tsf);
4476
4477 phyerr = hdr_arg.phyerrs;
4478 for (i = 0; i < count; i++) {
4479 ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
4480 if (ret) {
4481 ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
4482 i);
4483 return;
4484 }
4485
4486 left_len -= phyerr_arg.hdr_len;
4487 buf_len = phyerr_arg.buf_len;
4488 phy_err_code = phyerr_arg.phy_err_code;
4489
4490 if (left_len < buf_len) {
4491 ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
4492 return;
4493 }
4494
4495 left_len -= buf_len;
4496
4497 switch (phy_err_code) {
4498 case PHY_ERROR_RADAR:
4499 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4500 break;
4501 case PHY_ERROR_SPECTRAL_SCAN:
4502 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4503 break;
4504 case PHY_ERROR_FALSE_RADAR_EXT:
4505 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4506 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4507 break;
4508 default:
4509 break;
4510 }
4511
4512 phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
4513 }
4514 }
4515
4516 static int
ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_dfs_status_ev_arg * arg)4517 ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
4518 struct wmi_dfs_status_ev_arg *arg)
4519 {
4520 struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
4521
4522 if (skb->len < sizeof(*ev))
4523 return -EPROTO;
4524
4525 arg->status = ev->status;
4526
4527 return 0;
4528 }
4529
4530 static void
ath10k_wmi_event_dfs_status_check(struct ath10k * ar,struct sk_buff * skb)4531 ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
4532 {
4533 struct wmi_dfs_status_ev_arg status_arg = {};
4534 int ret;
4535
4536 ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
4537
4538 if (ret) {
4539 ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
4540 return;
4541 }
4542
4543 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4544 "dfs status event received from fw: %d\n",
4545 status_arg.status);
4546
4547 /* Even in case of radar detection failure we follow the same
4548 * behaviour as if radar is detected i.e to switch to a different
4549 * channel.
4550 */
4551 if (status_arg.status == WMI_HW_RADAR_DETECTED ||
4552 status_arg.status == WMI_RADAR_DETECTION_FAIL)
4553 ath10k_radar_detected(ar);
4554 complete(&ar->wmi.radar_confirm);
4555 }
4556
ath10k_wmi_event_roam(struct ath10k * ar,struct sk_buff * skb)4557 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
4558 {
4559 struct wmi_roam_ev_arg arg = {};
4560 int ret;
4561 u32 vdev_id;
4562 u32 reason;
4563 s32 rssi;
4564
4565 ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
4566 if (ret) {
4567 ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
4568 return;
4569 }
4570
4571 vdev_id = __le32_to_cpu(arg.vdev_id);
4572 reason = __le32_to_cpu(arg.reason);
4573 rssi = __le32_to_cpu(arg.rssi);
4574 rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
4575
4576 ath10k_dbg(ar, ATH10K_DBG_WMI,
4577 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
4578 vdev_id, reason, rssi);
4579
4580 if (reason >= WMI_ROAM_REASON_MAX)
4581 ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
4582 reason, vdev_id);
4583
4584 switch (reason) {
4585 case WMI_ROAM_REASON_BEACON_MISS:
4586 ath10k_mac_handle_beacon_miss(ar, vdev_id);
4587 break;
4588 case WMI_ROAM_REASON_BETTER_AP:
4589 case WMI_ROAM_REASON_LOW_RSSI:
4590 case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
4591 case WMI_ROAM_REASON_HO_FAILED:
4592 ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
4593 reason, vdev_id);
4594 break;
4595 }
4596 }
4597
ath10k_wmi_event_profile_match(struct ath10k * ar,struct sk_buff * skb)4598 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
4599 {
4600 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
4601 }
4602
ath10k_wmi_event_debug_print(struct ath10k * ar,struct sk_buff * skb)4603 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
4604 {
4605 char buf[101], c;
4606 int i;
4607
4608 for (i = 0; i < sizeof(buf) - 1; i++) {
4609 if (i >= skb->len)
4610 break;
4611
4612 c = skb->data[i];
4613
4614 if (c == '\0')
4615 break;
4616
4617 if (isascii(c) && isprint(c))
4618 buf[i] = c;
4619 else
4620 buf[i] = '.';
4621 }
4622
4623 if (i == sizeof(buf) - 1)
4624 ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
4625
4626 /* for some reason the debug prints end with \n, remove that */
4627 if (skb->data[i - 1] == '\n')
4628 i--;
4629
4630 /* the last byte is always reserved for the null character */
4631 buf[i] = '\0';
4632
4633 ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
4634 }
4635
ath10k_wmi_event_pdev_qvit(struct ath10k * ar,struct sk_buff * skb)4636 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
4637 {
4638 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
4639 }
4640
ath10k_wmi_event_wlan_profile_data(struct ath10k * ar,struct sk_buff * skb)4641 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
4642 {
4643 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
4644 }
4645
ath10k_wmi_event_rtt_measurement_report(struct ath10k * ar,struct sk_buff * skb)4646 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4647 struct sk_buff *skb)
4648 {
4649 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
4650 }
4651
ath10k_wmi_event_tsf_measurement_report(struct ath10k * ar,struct sk_buff * skb)4652 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4653 struct sk_buff *skb)
4654 {
4655 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
4656 }
4657
ath10k_wmi_event_rtt_error_report(struct ath10k * ar,struct sk_buff * skb)4658 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
4659 {
4660 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
4661 }
4662
ath10k_wmi_event_wow_wakeup_host(struct ath10k * ar,struct sk_buff * skb)4663 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
4664 {
4665 struct wmi_wow_ev_arg ev = {};
4666 int ret;
4667
4668 complete(&ar->wow.wakeup_completed);
4669
4670 ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
4671 if (ret) {
4672 ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
4673 return;
4674 }
4675
4676 ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
4677 wow_reason(ev.wake_reason));
4678 }
4679
ath10k_wmi_event_dcs_interference(struct ath10k * ar,struct sk_buff * skb)4680 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
4681 {
4682 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
4683 }
4684
ath10k_tpc_config_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type)4685 static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
4686 struct wmi_pdev_tpc_config_event *ev,
4687 u32 rate_idx, u32 num_chains,
4688 u32 rate_code, u8 type)
4689 {
4690 u8 tpc, num_streams, preamble, ch, stm_idx;
4691
4692 num_streams = ATH10K_HW_NSS(rate_code);
4693 preamble = ATH10K_HW_PREAMBLE(rate_code);
4694 ch = num_chains - 1;
4695
4696 tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
4697
4698 if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4699 goto out;
4700
4701 if (preamble == WMI_RATE_PREAMBLE_CCK)
4702 goto out;
4703
4704 stm_idx = num_streams - 1;
4705 if (num_chains <= num_streams)
4706 goto out;
4707
4708 switch (type) {
4709 case WMI_TPC_TABLE_TYPE_STBC:
4710 tpc = min_t(u8, tpc,
4711 ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
4712 break;
4713 case WMI_TPC_TABLE_TYPE_TXBF:
4714 tpc = min_t(u8, tpc,
4715 ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
4716 break;
4717 case WMI_TPC_TABLE_TYPE_CDD:
4718 tpc = min_t(u8, tpc,
4719 ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
4720 break;
4721 default:
4722 ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
4723 tpc = 0;
4724 break;
4725 }
4726
4727 out:
4728 return tpc;
4729 }
4730
ath10k_tpc_config_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,struct ath10k_tpc_stats * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)4731 static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4732 struct wmi_pdev_tpc_config_event *ev,
4733 struct ath10k_tpc_stats *tpc_stats,
4734 u8 *rate_code, u16 *pream_table, u8 type)
4735 {
4736 u32 i, j, pream_idx, flags;
4737 u8 tpc[WMI_TPC_TX_N_CHAIN];
4738 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4739 char buff[WMI_TPC_BUF_SIZE];
4740
4741 flags = __le32_to_cpu(ev->flags);
4742
4743 switch (type) {
4744 case WMI_TPC_TABLE_TYPE_CDD:
4745 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4746 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4747 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4748 return;
4749 }
4750 break;
4751 case WMI_TPC_TABLE_TYPE_STBC:
4752 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4753 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4754 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4755 return;
4756 }
4757 break;
4758 case WMI_TPC_TABLE_TYPE_TXBF:
4759 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4760 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4761 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4762 return;
4763 }
4764 break;
4765 default:
4766 ath10k_dbg(ar, ATH10K_DBG_WMI,
4767 "invalid table type in wmi tpc event: %d\n", type);
4768 return;
4769 }
4770
4771 pream_idx = 0;
4772 for (i = 0; i < tpc_stats->rate_max; i++) {
4773 memset(tpc_value, 0, sizeof(tpc_value));
4774 memset(buff, 0, sizeof(buff));
4775 if (i == pream_table[pream_idx])
4776 pream_idx++;
4777
4778 for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4779 tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4780 rate_code[i],
4781 type);
4782 snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4783 strlcat(tpc_value, buff, sizeof(tpc_value));
4784 }
4785 tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
4786 tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
4787 memcpy(tpc_stats->tpc_table[type].tpc_value[i],
4788 tpc_value, sizeof(tpc_value));
4789 }
4790 }
4791
ath10k_wmi_tpc_config_get_rate_code(u8 * rate_code,u16 * pream_table,u32 num_tx_chain)4792 void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4793 u32 num_tx_chain)
4794 {
4795 u32 i, j, pream_idx;
4796 u8 rate_idx;
4797
4798 /* Create the rate code table based on the chains supported */
4799 rate_idx = 0;
4800 pream_idx = 0;
4801
4802 /* Fill CCK rate code */
4803 for (i = 0; i < 4; i++) {
4804 rate_code[rate_idx] =
4805 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
4806 rate_idx++;
4807 }
4808 pream_table[pream_idx] = rate_idx;
4809 pream_idx++;
4810
4811 /* Fill OFDM rate code */
4812 for (i = 0; i < 8; i++) {
4813 rate_code[rate_idx] =
4814 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
4815 rate_idx++;
4816 }
4817 pream_table[pream_idx] = rate_idx;
4818 pream_idx++;
4819
4820 /* Fill HT20 rate code */
4821 for (i = 0; i < num_tx_chain; i++) {
4822 for (j = 0; j < 8; j++) {
4823 rate_code[rate_idx] =
4824 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4825 rate_idx++;
4826 }
4827 }
4828 pream_table[pream_idx] = rate_idx;
4829 pream_idx++;
4830
4831 /* Fill HT40 rate code */
4832 for (i = 0; i < num_tx_chain; i++) {
4833 for (j = 0; j < 8; j++) {
4834 rate_code[rate_idx] =
4835 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4836 rate_idx++;
4837 }
4838 }
4839 pream_table[pream_idx] = rate_idx;
4840 pream_idx++;
4841
4842 /* Fill VHT20 rate code */
4843 for (i = 0; i < num_tx_chain; i++) {
4844 for (j = 0; j < 10; j++) {
4845 rate_code[rate_idx] =
4846 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4847 rate_idx++;
4848 }
4849 }
4850 pream_table[pream_idx] = rate_idx;
4851 pream_idx++;
4852
4853 /* Fill VHT40 rate code */
4854 for (i = 0; i < num_tx_chain; i++) {
4855 for (j = 0; j < 10; j++) {
4856 rate_code[rate_idx] =
4857 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4858 rate_idx++;
4859 }
4860 }
4861 pream_table[pream_idx] = rate_idx;
4862 pream_idx++;
4863
4864 /* Fill VHT80 rate code */
4865 for (i = 0; i < num_tx_chain; i++) {
4866 for (j = 0; j < 10; j++) {
4867 rate_code[rate_idx] =
4868 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4869 rate_idx++;
4870 }
4871 }
4872 pream_table[pream_idx] = rate_idx;
4873 pream_idx++;
4874
4875 rate_code[rate_idx++] =
4876 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4877 rate_code[rate_idx++] =
4878 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4879 rate_code[rate_idx++] =
4880 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4881 rate_code[rate_idx++] =
4882 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4883 rate_code[rate_idx++] =
4884 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4885
4886 pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
4887 }
4888
ath10k_wmi_event_pdev_tpc_config(struct ath10k * ar,struct sk_buff * skb)4889 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4890 {
4891 u32 num_tx_chain, rate_max;
4892 u8 rate_code[WMI_TPC_RATE_MAX];
4893 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4894 struct wmi_pdev_tpc_config_event *ev;
4895 struct ath10k_tpc_stats *tpc_stats;
4896
4897 ev = (struct wmi_pdev_tpc_config_event *)skb->data;
4898
4899 num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4900
4901 if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4902 ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
4903 num_tx_chain, WMI_TPC_TX_N_CHAIN);
4904 return;
4905 }
4906
4907 rate_max = __le32_to_cpu(ev->rate_max);
4908 if (rate_max > WMI_TPC_RATE_MAX) {
4909 ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
4910 rate_max, WMI_TPC_RATE_MAX);
4911 rate_max = WMI_TPC_RATE_MAX;
4912 }
4913
4914 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4915 if (!tpc_stats)
4916 return;
4917
4918 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4919 num_tx_chain);
4920
4921 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4922 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4923 tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4924 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4925 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4926 tpc_stats->twice_antenna_reduction =
4927 __le32_to_cpu(ev->twice_antenna_reduction);
4928 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4929 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4930 tpc_stats->num_tx_chain = num_tx_chain;
4931 tpc_stats->rate_max = rate_max;
4932
4933 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4934 rate_code, pream_table,
4935 WMI_TPC_TABLE_TYPE_CDD);
4936 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4937 rate_code, pream_table,
4938 WMI_TPC_TABLE_TYPE_STBC);
4939 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4940 rate_code, pream_table,
4941 WMI_TPC_TABLE_TYPE_TXBF);
4942
4943 ath10k_debug_tpc_stats_process(ar, tpc_stats);
4944
4945 ath10k_dbg(ar, ATH10K_DBG_WMI,
4946 "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4947 __le32_to_cpu(ev->chan_freq),
4948 __le32_to_cpu(ev->phy_mode),
4949 __le32_to_cpu(ev->ctl),
4950 __le32_to_cpu(ev->reg_domain),
4951 a_sle32_to_cpu(ev->twice_antenna_gain),
4952 __le32_to_cpu(ev->twice_antenna_reduction),
4953 __le32_to_cpu(ev->power_limit),
4954 __le32_to_cpu(ev->twice_max_rd_power) / 2,
4955 __le32_to_cpu(ev->num_tx_chain),
4956 __le32_to_cpu(ev->rate_max));
4957 }
4958
4959 static u8
ath10k_wmi_tpc_final_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type,u32 pream_idx)4960 ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
4961 struct wmi_pdev_tpc_final_table_event *ev,
4962 u32 rate_idx, u32 num_chains,
4963 u32 rate_code, u8 type, u32 pream_idx)
4964 {
4965 u8 tpc, num_streams, preamble, ch, stm_idx;
4966 s8 pow_agcdd, pow_agstbc, pow_agtxbf;
4967 int pream;
4968
4969 num_streams = ATH10K_HW_NSS(rate_code);
4970 preamble = ATH10K_HW_PREAMBLE(rate_code);
4971 ch = num_chains - 1;
4972 stm_idx = num_streams - 1;
4973 pream = -1;
4974
4975 if (__le32_to_cpu(ev->chan_freq) <= 2483) {
4976 switch (pream_idx) {
4977 case WMI_TPC_PREAM_2GHZ_CCK:
4978 pream = 0;
4979 break;
4980 case WMI_TPC_PREAM_2GHZ_OFDM:
4981 pream = 1;
4982 break;
4983 case WMI_TPC_PREAM_2GHZ_HT20:
4984 case WMI_TPC_PREAM_2GHZ_VHT20:
4985 pream = 2;
4986 break;
4987 case WMI_TPC_PREAM_2GHZ_HT40:
4988 case WMI_TPC_PREAM_2GHZ_VHT40:
4989 pream = 3;
4990 break;
4991 case WMI_TPC_PREAM_2GHZ_VHT80:
4992 pream = 4;
4993 break;
4994 default:
4995 pream = -1;
4996 break;
4997 }
4998 }
4999
5000 if (__le32_to_cpu(ev->chan_freq) >= 5180) {
5001 switch (pream_idx) {
5002 case WMI_TPC_PREAM_5GHZ_OFDM:
5003 pream = 0;
5004 break;
5005 case WMI_TPC_PREAM_5GHZ_HT20:
5006 case WMI_TPC_PREAM_5GHZ_VHT20:
5007 pream = 1;
5008 break;
5009 case WMI_TPC_PREAM_5GHZ_HT40:
5010 case WMI_TPC_PREAM_5GHZ_VHT40:
5011 pream = 2;
5012 break;
5013 case WMI_TPC_PREAM_5GHZ_VHT80:
5014 pream = 3;
5015 break;
5016 case WMI_TPC_PREAM_5GHZ_HTCUP:
5017 pream = 4;
5018 break;
5019 default:
5020 pream = -1;
5021 break;
5022 }
5023 }
5024
5025 if (pream == -1) {
5026 ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
5027 pream_idx, __le32_to_cpu(ev->chan_freq));
5028 tpc = 0;
5029 goto out;
5030 }
5031
5032 if (pream == 4)
5033 tpc = min_t(u8, ev->rates_array[rate_idx],
5034 ev->max_reg_allow_pow[ch]);
5035 else
5036 tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
5037 ev->max_reg_allow_pow[ch]),
5038 ev->ctl_power_table[0][pream][stm_idx]);
5039
5040 if (__le32_to_cpu(ev->num_tx_chain) <= 1)
5041 goto out;
5042
5043 if (preamble == WMI_RATE_PREAMBLE_CCK)
5044 goto out;
5045
5046 if (num_chains <= num_streams)
5047 goto out;
5048
5049 switch (type) {
5050 case WMI_TPC_TABLE_TYPE_STBC:
5051 pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
5052 if (pream == 4)
5053 tpc = min_t(u8, tpc, pow_agstbc);
5054 else
5055 tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
5056 ev->ctl_power_table[0][pream][stm_idx]);
5057 break;
5058 case WMI_TPC_TABLE_TYPE_TXBF:
5059 pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
5060 if (pream == 4)
5061 tpc = min_t(u8, tpc, pow_agtxbf);
5062 else
5063 tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
5064 ev->ctl_power_table[1][pream][stm_idx]);
5065 break;
5066 case WMI_TPC_TABLE_TYPE_CDD:
5067 pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
5068 if (pream == 4)
5069 tpc = min_t(u8, tpc, pow_agcdd);
5070 else
5071 tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
5072 ev->ctl_power_table[0][pream][stm_idx]);
5073 break;
5074 default:
5075 ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
5076 tpc = 0;
5077 break;
5078 }
5079
5080 out:
5081 return tpc;
5082 }
5083
5084 static void
ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,struct ath10k_tpc_stats_final * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)5085 ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
5086 struct wmi_pdev_tpc_final_table_event *ev,
5087 struct ath10k_tpc_stats_final *tpc_stats,
5088 u8 *rate_code, u16 *pream_table, u8 type)
5089 {
5090 u32 i, j, pream_idx, flags;
5091 u8 tpc[WMI_TPC_TX_N_CHAIN];
5092 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
5093 char buff[WMI_TPC_BUF_SIZE];
5094
5095 flags = __le32_to_cpu(ev->flags);
5096
5097 switch (type) {
5098 case WMI_TPC_TABLE_TYPE_CDD:
5099 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
5100 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
5101 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5102 return;
5103 }
5104 break;
5105 case WMI_TPC_TABLE_TYPE_STBC:
5106 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
5107 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
5108 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5109 return;
5110 }
5111 break;
5112 case WMI_TPC_TABLE_TYPE_TXBF:
5113 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
5114 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
5115 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5116 return;
5117 }
5118 break;
5119 default:
5120 ath10k_dbg(ar, ATH10K_DBG_WMI,
5121 "invalid table type in wmi tpc event: %d\n", type);
5122 return;
5123 }
5124
5125 pream_idx = 0;
5126 for (i = 0; i < tpc_stats->rate_max; i++) {
5127 memset(tpc_value, 0, sizeof(tpc_value));
5128 memset(buff, 0, sizeof(buff));
5129 if (i == pream_table[pream_idx])
5130 pream_idx++;
5131
5132 for (j = 0; j < tpc_stats->num_tx_chain; j++) {
5133 tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
5134 rate_code[i],
5135 type, pream_idx);
5136 snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
5137 strlcat(tpc_value, buff, sizeof(tpc_value));
5138 }
5139 tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
5140 tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
5141 memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
5142 tpc_value, sizeof(tpc_value));
5143 }
5144 }
5145
ath10k_wmi_event_tpc_final_table(struct ath10k * ar,struct sk_buff * skb)5146 void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
5147 {
5148 u32 num_tx_chain, rate_max;
5149 u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
5150 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
5151 struct wmi_pdev_tpc_final_table_event *ev;
5152 struct ath10k_tpc_stats_final *tpc_stats;
5153
5154 ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
5155
5156 num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
5157 if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
5158 ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
5159 num_tx_chain, WMI_TPC_TX_N_CHAIN);
5160 return;
5161 }
5162
5163 rate_max = __le32_to_cpu(ev->rate_max);
5164 if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
5165 ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
5166 rate_max, WMI_TPC_FINAL_RATE_MAX);
5167 rate_max = WMI_TPC_FINAL_RATE_MAX;
5168 }
5169
5170 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
5171 if (!tpc_stats)
5172 return;
5173
5174 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
5175 num_tx_chain);
5176
5177 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
5178 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
5179 tpc_stats->ctl = __le32_to_cpu(ev->ctl);
5180 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
5181 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
5182 tpc_stats->twice_antenna_reduction =
5183 __le32_to_cpu(ev->twice_antenna_reduction);
5184 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
5185 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
5186 tpc_stats->num_tx_chain = num_tx_chain;
5187 tpc_stats->rate_max = rate_max;
5188
5189 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5190 rate_code, pream_table,
5191 WMI_TPC_TABLE_TYPE_CDD);
5192 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5193 rate_code, pream_table,
5194 WMI_TPC_TABLE_TYPE_STBC);
5195 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5196 rate_code, pream_table,
5197 WMI_TPC_TABLE_TYPE_TXBF);
5198
5199 ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
5200
5201 ath10k_dbg(ar, ATH10K_DBG_WMI,
5202 "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
5203 __le32_to_cpu(ev->chan_freq),
5204 __le32_to_cpu(ev->phy_mode),
5205 __le32_to_cpu(ev->ctl),
5206 __le32_to_cpu(ev->reg_domain),
5207 a_sle32_to_cpu(ev->twice_antenna_gain),
5208 __le32_to_cpu(ev->twice_antenna_reduction),
5209 __le32_to_cpu(ev->power_limit),
5210 __le32_to_cpu(ev->twice_max_rd_power) / 2,
5211 __le32_to_cpu(ev->num_tx_chain),
5212 __le32_to_cpu(ev->rate_max));
5213 }
5214
5215 static void
ath10k_wmi_handle_tdls_peer_event(struct ath10k * ar,struct sk_buff * skb)5216 ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
5217 {
5218 struct wmi_tdls_peer_event *ev;
5219 struct ath10k_peer *peer;
5220 struct ath10k_vif *arvif;
5221 int vdev_id;
5222 int peer_status;
5223 int peer_reason;
5224 u8 reason;
5225
5226 if (skb->len < sizeof(*ev)) {
5227 ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
5228 skb->len);
5229 return;
5230 }
5231
5232 ev = (struct wmi_tdls_peer_event *)skb->data;
5233 vdev_id = __le32_to_cpu(ev->vdev_id);
5234 peer_status = __le32_to_cpu(ev->peer_status);
5235 peer_reason = __le32_to_cpu(ev->peer_reason);
5236
5237 spin_lock_bh(&ar->data_lock);
5238 peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
5239 spin_unlock_bh(&ar->data_lock);
5240
5241 if (!peer) {
5242 ath10k_warn(ar, "failed to find peer entry for %pM\n",
5243 ev->peer_macaddr.addr);
5244 return;
5245 }
5246
5247 switch (peer_status) {
5248 case WMI_TDLS_SHOULD_TEARDOWN:
5249 switch (peer_reason) {
5250 case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
5251 case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
5252 case WMI_TDLS_TEARDOWN_REASON_RSSI:
5253 reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
5254 break;
5255 default:
5256 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
5257 break;
5258 }
5259
5260 arvif = ath10k_get_arvif(ar, vdev_id);
5261 if (!arvif) {
5262 ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
5263 vdev_id);
5264 return;
5265 }
5266
5267 ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
5268 NL80211_TDLS_TEARDOWN, reason,
5269 GFP_ATOMIC);
5270
5271 ath10k_dbg(ar, ATH10K_DBG_WMI,
5272 "received tdls teardown event for peer %pM reason %u\n",
5273 ev->peer_macaddr.addr, peer_reason);
5274 break;
5275 default:
5276 ath10k_dbg(ar, ATH10K_DBG_WMI,
5277 "received unknown tdls peer event %u\n",
5278 peer_status);
5279 break;
5280 }
5281 }
5282
5283 static void
ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k * ar,struct sk_buff * skb)5284 ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
5285 {
5286 struct wmi_peer_sta_ps_state_chg_event *ev;
5287 struct ieee80211_sta *sta;
5288 struct ath10k_sta *arsta;
5289 u8 peer_addr[ETH_ALEN];
5290
5291 lockdep_assert_held(&ar->data_lock);
5292
5293 ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
5294 ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
5295
5296 rcu_read_lock();
5297
5298 sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
5299
5300 if (!sta) {
5301 ath10k_warn(ar, "failed to find station entry %pM\n",
5302 peer_addr);
5303 goto exit;
5304 }
5305
5306 arsta = (struct ath10k_sta *)sta->drv_priv;
5307 arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
5308
5309 exit:
5310 rcu_read_unlock();
5311 }
5312
ath10k_wmi_event_pdev_ftm_intg(struct ath10k * ar,struct sk_buff * skb)5313 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
5314 {
5315 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
5316 }
5317
ath10k_wmi_event_gtk_offload_status(struct ath10k * ar,struct sk_buff * skb)5318 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
5319 {
5320 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
5321 }
5322
ath10k_wmi_event_gtk_rekey_fail(struct ath10k * ar,struct sk_buff * skb)5323 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
5324 {
5325 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
5326 }
5327
ath10k_wmi_event_delba_complete(struct ath10k * ar,struct sk_buff * skb)5328 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
5329 {
5330 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
5331 }
5332
ath10k_wmi_event_addba_complete(struct ath10k * ar,struct sk_buff * skb)5333 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
5334 {
5335 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
5336 }
5337
ath10k_wmi_event_vdev_install_key_complete(struct ath10k * ar,struct sk_buff * skb)5338 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
5339 struct sk_buff *skb)
5340 {
5341 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
5342 }
5343
ath10k_wmi_event_inst_rssi_stats(struct ath10k * ar,struct sk_buff * skb)5344 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
5345 {
5346 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
5347 }
5348
ath10k_wmi_event_vdev_standby_req(struct ath10k * ar,struct sk_buff * skb)5349 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
5350 {
5351 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
5352 }
5353
ath10k_wmi_event_vdev_resume_req(struct ath10k * ar,struct sk_buff * skb)5354 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
5355 {
5356 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
5357 }
5358
ath10k_wmi_alloc_chunk(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5359 static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5360 u32 num_units, u32 unit_len)
5361 {
5362 dma_addr_t paddr;
5363 u32 pool_size;
5364 int idx = ar->wmi.num_mem_chunks;
5365 void *vaddr;
5366
5367 pool_size = num_units * round_up(unit_len, 4);
5368 vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5369
5370 if (!vaddr)
5371 return -ENOMEM;
5372
5373 ar->wmi.mem_chunks[idx].vaddr = vaddr;
5374 ar->wmi.mem_chunks[idx].paddr = paddr;
5375 ar->wmi.mem_chunks[idx].len = pool_size;
5376 ar->wmi.mem_chunks[idx].req_id = req_id;
5377 ar->wmi.num_mem_chunks++;
5378
5379 return num_units;
5380 }
5381
ath10k_wmi_alloc_host_mem(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5382 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
5383 u32 num_units, u32 unit_len)
5384 {
5385 int ret;
5386
5387 while (num_units) {
5388 ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
5389 if (ret < 0)
5390 return ret;
5391
5392 num_units -= ret;
5393 }
5394
5395 return 0;
5396 }
5397
5398 static bool
ath10k_wmi_is_host_mem_allocated(struct ath10k * ar,const struct wlan_host_mem_req ** mem_reqs,u32 num_mem_reqs)5399 ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
5400 const struct wlan_host_mem_req **mem_reqs,
5401 u32 num_mem_reqs)
5402 {
5403 u32 req_id, num_units, unit_size, num_unit_info;
5404 u32 pool_size;
5405 int i, j;
5406 bool found;
5407
5408 if (ar->wmi.num_mem_chunks != num_mem_reqs)
5409 return false;
5410
5411 for (i = 0; i < num_mem_reqs; ++i) {
5412 req_id = __le32_to_cpu(mem_reqs[i]->req_id);
5413 num_units = __le32_to_cpu(mem_reqs[i]->num_units);
5414 unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
5415 num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
5416
5417 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5418 if (ar->num_active_peers)
5419 num_units = ar->num_active_peers + 1;
5420 else
5421 num_units = ar->max_num_peers + 1;
5422 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5423 num_units = ar->max_num_peers + 1;
5424 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5425 num_units = ar->max_num_vdevs + 1;
5426 }
5427
5428 found = false;
5429 for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
5430 if (ar->wmi.mem_chunks[j].req_id == req_id) {
5431 pool_size = num_units * round_up(unit_size, 4);
5432 if (ar->wmi.mem_chunks[j].len == pool_size) {
5433 found = true;
5434 break;
5435 }
5436 }
5437 }
5438 if (!found)
5439 return false;
5440 }
5441
5442 return true;
5443 }
5444
5445 static int
ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5446 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5447 struct wmi_svc_rdy_ev_arg *arg)
5448 {
5449 struct wmi_service_ready_event *ev;
5450 size_t i, n;
5451
5452 if (skb->len < sizeof(*ev))
5453 return -EPROTO;
5454
5455 ev = (void *)skb->data;
5456 skb_pull(skb, sizeof(*ev));
5457 arg->min_tx_power = ev->hw_min_tx_power;
5458 arg->max_tx_power = ev->hw_max_tx_power;
5459 arg->ht_cap = ev->ht_cap_info;
5460 arg->vht_cap = ev->vht_cap_info;
5461 arg->vht_supp_mcs = ev->vht_supp_mcs;
5462 arg->sw_ver0 = ev->sw_version;
5463 arg->sw_ver1 = ev->sw_version_1;
5464 arg->phy_capab = ev->phy_capability;
5465 arg->num_rf_chains = ev->num_rf_chains;
5466 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5467 arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5468 arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5469 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5470 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5471 arg->num_mem_reqs = ev->num_mem_reqs;
5472 arg->service_map = ev->wmi_service_bitmap;
5473 arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5474
5475 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5476 ARRAY_SIZE(arg->mem_reqs));
5477 for (i = 0; i < n; i++)
5478 arg->mem_reqs[i] = &ev->mem_reqs[i];
5479
5480 if (skb->len <
5481 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5482 return -EPROTO;
5483
5484 return 0;
5485 }
5486
5487 static int
ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5488 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5489 struct wmi_svc_rdy_ev_arg *arg)
5490 {
5491 struct wmi_10x_service_ready_event *ev;
5492 int i, n;
5493
5494 if (skb->len < sizeof(*ev))
5495 return -EPROTO;
5496
5497 ev = (void *)skb->data;
5498 skb_pull(skb, sizeof(*ev));
5499 arg->min_tx_power = ev->hw_min_tx_power;
5500 arg->max_tx_power = ev->hw_max_tx_power;
5501 arg->ht_cap = ev->ht_cap_info;
5502 arg->vht_cap = ev->vht_cap_info;
5503 arg->vht_supp_mcs = ev->vht_supp_mcs;
5504 arg->sw_ver0 = ev->sw_version;
5505 arg->phy_capab = ev->phy_capability;
5506 arg->num_rf_chains = ev->num_rf_chains;
5507 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5508 arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5509 arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5510 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5511 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5512 arg->num_mem_reqs = ev->num_mem_reqs;
5513 arg->service_map = ev->wmi_service_bitmap;
5514 arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5515
5516 /* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
5517 * different values. We would need a translation to handle that,
5518 * but as we don't currently need anything from sys_cap_info from
5519 * WMI interface (only from WMI-TLV) safest it to skip it.
5520 */
5521
5522 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5523 ARRAY_SIZE(arg->mem_reqs));
5524 for (i = 0; i < n; i++)
5525 arg->mem_reqs[i] = &ev->mem_reqs[i];
5526
5527 if (skb->len <
5528 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5529 return -EPROTO;
5530
5531 return 0;
5532 }
5533
ath10k_wmi_event_service_ready_work(struct work_struct * work)5534 static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
5535 {
5536 struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
5537 struct sk_buff *skb = ar->svc_rdy_skb;
5538 struct wmi_svc_rdy_ev_arg arg = {};
5539 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
5540 int ret;
5541 bool allocated;
5542
5543 if (!skb) {
5544 ath10k_warn(ar, "invalid service ready event skb\n");
5545 return;
5546 }
5547
5548 ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
5549 if (ret) {
5550 ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
5551 return;
5552 }
5553
5554 ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
5555 arg.service_map_len);
5556
5557 ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
5558 ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
5559 ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
5560 ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
5561 ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
5562 ar->fw_version_major =
5563 (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
5564 ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
5565 ar->fw_version_release =
5566 (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
5567 ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
5568 ar->phy_capability = __le32_to_cpu(arg.phy_capab);
5569 ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
5570 ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
5571 ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
5572 ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
5573 ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
5574 ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
5575 ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
5576
5577 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
5578 arg.service_map, arg.service_map_len);
5579 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
5580 ar->sys_cap_info);
5581
5582 if (ar->num_rf_chains > ar->max_spatial_stream) {
5583 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
5584 ar->num_rf_chains, ar->max_spatial_stream);
5585 ar->num_rf_chains = ar->max_spatial_stream;
5586 }
5587
5588 if (!ar->cfg_tx_chainmask) {
5589 ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
5590 ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
5591 }
5592
5593 if (strlen(ar->hw->wiphy->fw_version) == 0) {
5594 snprintf(ar->hw->wiphy->fw_version,
5595 sizeof(ar->hw->wiphy->fw_version),
5596 "%u.%u.%u.%u",
5597 ar->fw_version_major,
5598 ar->fw_version_minor,
5599 ar->fw_version_release,
5600 ar->fw_version_build);
5601 }
5602
5603 num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
5604 if (num_mem_reqs > WMI_MAX_MEM_REQS) {
5605 ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
5606 num_mem_reqs);
5607 return;
5608 }
5609
5610 if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
5611 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
5612 ar->running_fw->fw_file.fw_features))
5613 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
5614 ar->max_num_vdevs;
5615 else
5616 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
5617 ar->max_num_vdevs;
5618
5619 ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
5620 ar->max_num_vdevs;
5621 ar->num_tids = ar->num_active_peers * 2;
5622 ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
5623 }
5624
5625 /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
5626 * and WMI_SERVICE_IRAM_TIDS, etc.
5627 */
5628
5629 allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
5630 num_mem_reqs);
5631 if (allocated)
5632 goto skip_mem_alloc;
5633
5634 /* Either this event is received during boot time or there is a change
5635 * in memory requirement from firmware when compared to last request.
5636 * Free any old memory and do a fresh allocation based on the current
5637 * memory requirement.
5638 */
5639 ath10k_wmi_free_host_mem(ar);
5640
5641 for (i = 0; i < num_mem_reqs; ++i) {
5642 req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
5643 num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
5644 unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
5645 num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
5646
5647 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5648 if (ar->num_active_peers)
5649 num_units = ar->num_active_peers + 1;
5650 else
5651 num_units = ar->max_num_peers + 1;
5652 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5653 /* number of units to allocate is number of
5654 * peers, 1 extra for self peer on target
5655 * this needs to be tied, host and target
5656 * can get out of sync
5657 */
5658 num_units = ar->max_num_peers + 1;
5659 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5660 num_units = ar->max_num_vdevs + 1;
5661 }
5662
5663 ath10k_dbg(ar, ATH10K_DBG_WMI,
5664 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
5665 req_id,
5666 __le32_to_cpu(arg.mem_reqs[i]->num_units),
5667 num_unit_info,
5668 unit_size,
5669 num_units);
5670
5671 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
5672 unit_size);
5673 if (ret)
5674 return;
5675 }
5676
5677 skip_mem_alloc:
5678 ath10k_dbg(ar, ATH10K_DBG_WMI,
5679 "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
5680 __le32_to_cpu(arg.min_tx_power),
5681 __le32_to_cpu(arg.max_tx_power),
5682 __le32_to_cpu(arg.ht_cap),
5683 __le32_to_cpu(arg.vht_cap),
5684 __le32_to_cpu(arg.vht_supp_mcs),
5685 __le32_to_cpu(arg.sw_ver0),
5686 __le32_to_cpu(arg.sw_ver1),
5687 __le32_to_cpu(arg.fw_build),
5688 __le32_to_cpu(arg.phy_capab),
5689 __le32_to_cpu(arg.num_rf_chains),
5690 __le32_to_cpu(arg.eeprom_rd),
5691 __le32_to_cpu(arg.low_2ghz_chan),
5692 __le32_to_cpu(arg.high_2ghz_chan),
5693 __le32_to_cpu(arg.low_5ghz_chan),
5694 __le32_to_cpu(arg.high_5ghz_chan),
5695 __le32_to_cpu(arg.num_mem_reqs));
5696
5697 dev_kfree_skb(skb);
5698 ar->svc_rdy_skb = NULL;
5699 complete(&ar->wmi.service_ready);
5700 }
5701
ath10k_wmi_event_service_ready(struct ath10k * ar,struct sk_buff * skb)5702 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
5703 {
5704 ar->svc_rdy_skb = skb;
5705 queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
5706 }
5707
ath10k_wmi_op_pull_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_rdy_ev_arg * arg)5708 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5709 struct wmi_rdy_ev_arg *arg)
5710 {
5711 struct wmi_ready_event *ev = (void *)skb->data;
5712
5713 if (skb->len < sizeof(*ev))
5714 return -EPROTO;
5715
5716 skb_pull(skb, sizeof(*ev));
5717 arg->sw_version = ev->sw_version;
5718 arg->abi_version = ev->abi_version;
5719 arg->status = ev->status;
5720 arg->mac_addr = ev->mac_addr.addr;
5721
5722 return 0;
5723 }
5724
ath10k_wmi_op_pull_roam_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_roam_ev_arg * arg)5725 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
5726 struct wmi_roam_ev_arg *arg)
5727 {
5728 struct wmi_roam_ev *ev = (void *)skb->data;
5729
5730 if (skb->len < sizeof(*ev))
5731 return -EPROTO;
5732
5733 skb_pull(skb, sizeof(*ev));
5734 arg->vdev_id = ev->vdev_id;
5735 arg->reason = ev->reason;
5736
5737 return 0;
5738 }
5739
ath10k_wmi_op_pull_echo_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_echo_ev_arg * arg)5740 static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
5741 struct sk_buff *skb,
5742 struct wmi_echo_ev_arg *arg)
5743 {
5744 struct wmi_echo_event *ev = (void *)skb->data;
5745
5746 arg->value = ev->value;
5747
5748 return 0;
5749 }
5750
ath10k_wmi_event_ready(struct ath10k * ar,struct sk_buff * skb)5751 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
5752 {
5753 struct wmi_rdy_ev_arg arg = {};
5754 int ret;
5755
5756 ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
5757 if (ret) {
5758 ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
5759 return ret;
5760 }
5761
5762 ath10k_dbg(ar, ATH10K_DBG_WMI,
5763 "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
5764 __le32_to_cpu(arg.sw_version),
5765 __le32_to_cpu(arg.abi_version),
5766 arg.mac_addr,
5767 __le32_to_cpu(arg.status));
5768
5769 if (is_zero_ether_addr(ar->mac_addr))
5770 ether_addr_copy(ar->mac_addr, arg.mac_addr);
5771 complete(&ar->wmi.unified_ready);
5772 return 0;
5773 }
5774
ath10k_wmi_event_service_available(struct ath10k * ar,struct sk_buff * skb)5775 void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
5776 {
5777 int ret;
5778 struct wmi_svc_avail_ev_arg arg = {};
5779
5780 ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
5781 if (ret) {
5782 ath10k_warn(ar, "failed to parse service available event: %d\n",
5783 ret);
5784 }
5785
5786 /*
5787 * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
5788 * for the below logic to work.
5789 */
5790 if (arg.service_map_ext_valid)
5791 ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
5792 __le32_to_cpu(arg.service_map_ext_len));
5793 }
5794
ath10k_wmi_event_temperature(struct ath10k * ar,struct sk_buff * skb)5795 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
5796 {
5797 const struct wmi_pdev_temperature_event *ev;
5798
5799 ev = (struct wmi_pdev_temperature_event *)skb->data;
5800 if (WARN_ON(skb->len < sizeof(*ev)))
5801 return -EPROTO;
5802
5803 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
5804 return 0;
5805 }
5806
ath10k_wmi_event_pdev_bss_chan_info(struct ath10k * ar,struct sk_buff * skb)5807 static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
5808 struct sk_buff *skb)
5809 {
5810 struct wmi_pdev_bss_chan_info_event *ev;
5811 struct survey_info *survey;
5812 u64 busy, total, tx, rx, rx_bss;
5813 u32 freq, noise_floor;
5814 u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
5815 int idx;
5816
5817 ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
5818 if (WARN_ON(skb->len < sizeof(*ev)))
5819 return -EPROTO;
5820
5821 freq = __le32_to_cpu(ev->freq);
5822 noise_floor = __le32_to_cpu(ev->noise_floor);
5823 busy = __le64_to_cpu(ev->cycle_busy);
5824 total = __le64_to_cpu(ev->cycle_total);
5825 tx = __le64_to_cpu(ev->cycle_tx);
5826 rx = __le64_to_cpu(ev->cycle_rx);
5827 rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
5828
5829 ath10k_dbg(ar, ATH10K_DBG_WMI,
5830 "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5831 freq, noise_floor, busy, total, tx, rx, rx_bss);
5832
5833 spin_lock_bh(&ar->data_lock);
5834 idx = freq_to_idx(ar, freq);
5835 if (idx >= ARRAY_SIZE(ar->survey)) {
5836 ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5837 freq, idx);
5838 goto exit;
5839 }
5840
5841 survey = &ar->survey[idx];
5842
5843 survey->noise = noise_floor;
5844 survey->time = div_u64(total, cc_freq_hz);
5845 survey->time_busy = div_u64(busy, cc_freq_hz);
5846 survey->time_rx = div_u64(rx_bss, cc_freq_hz);
5847 survey->time_tx = div_u64(tx, cc_freq_hz);
5848 survey->filled |= (SURVEY_INFO_NOISE_DBM |
5849 SURVEY_INFO_TIME |
5850 SURVEY_INFO_TIME_BUSY |
5851 SURVEY_INFO_TIME_RX |
5852 SURVEY_INFO_TIME_TX);
5853 exit:
5854 spin_unlock_bh(&ar->data_lock);
5855 complete(&ar->bss_survey_done);
5856 return 0;
5857 }
5858
ath10k_wmi_queue_set_coverage_class_work(struct ath10k * ar)5859 static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
5860 {
5861 if (ar->hw_params.hw_ops->set_coverage_class) {
5862 spin_lock_bh(&ar->data_lock);
5863
5864 /* This call only ensures that the modified coverage class
5865 * persists in case the firmware sets the registers back to
5866 * their default value. So calling it is only necessary if the
5867 * coverage class has a non-zero value.
5868 */
5869 if (ar->fw_coverage.coverage_class)
5870 queue_work(ar->workqueue, &ar->set_coverage_class_work);
5871
5872 spin_unlock_bh(&ar->data_lock);
5873 }
5874 }
5875
ath10k_wmi_op_rx(struct ath10k * ar,struct sk_buff * skb)5876 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
5877 {
5878 struct wmi_cmd_hdr *cmd_hdr;
5879 enum wmi_event_id id;
5880
5881 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5882 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5883
5884 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5885 goto out;
5886
5887 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5888
5889 switch (id) {
5890 case WMI_MGMT_RX_EVENTID:
5891 ath10k_wmi_event_mgmt_rx(ar, skb);
5892 /* mgmt_rx() owns the skb now! */
5893 return;
5894 case WMI_SCAN_EVENTID:
5895 ath10k_wmi_event_scan(ar, skb);
5896 ath10k_wmi_queue_set_coverage_class_work(ar);
5897 break;
5898 case WMI_CHAN_INFO_EVENTID:
5899 ath10k_wmi_event_chan_info(ar, skb);
5900 break;
5901 case WMI_ECHO_EVENTID:
5902 ath10k_wmi_event_echo(ar, skb);
5903 break;
5904 case WMI_DEBUG_MESG_EVENTID:
5905 ath10k_wmi_event_debug_mesg(ar, skb);
5906 ath10k_wmi_queue_set_coverage_class_work(ar);
5907 break;
5908 case WMI_UPDATE_STATS_EVENTID:
5909 ath10k_wmi_event_update_stats(ar, skb);
5910 break;
5911 case WMI_VDEV_START_RESP_EVENTID:
5912 ath10k_wmi_event_vdev_start_resp(ar, skb);
5913 ath10k_wmi_queue_set_coverage_class_work(ar);
5914 break;
5915 case WMI_VDEV_STOPPED_EVENTID:
5916 ath10k_wmi_event_vdev_stopped(ar, skb);
5917 ath10k_wmi_queue_set_coverage_class_work(ar);
5918 break;
5919 case WMI_PEER_STA_KICKOUT_EVENTID:
5920 ath10k_wmi_event_peer_sta_kickout(ar, skb);
5921 break;
5922 case WMI_HOST_SWBA_EVENTID:
5923 ath10k_wmi_event_host_swba(ar, skb);
5924 break;
5925 case WMI_TBTTOFFSET_UPDATE_EVENTID:
5926 ath10k_wmi_event_tbttoffset_update(ar, skb);
5927 break;
5928 case WMI_PHYERR_EVENTID:
5929 ath10k_wmi_event_phyerr(ar, skb);
5930 break;
5931 case WMI_ROAM_EVENTID:
5932 ath10k_wmi_event_roam(ar, skb);
5933 ath10k_wmi_queue_set_coverage_class_work(ar);
5934 break;
5935 case WMI_PROFILE_MATCH:
5936 ath10k_wmi_event_profile_match(ar, skb);
5937 break;
5938 case WMI_DEBUG_PRINT_EVENTID:
5939 ath10k_wmi_event_debug_print(ar, skb);
5940 ath10k_wmi_queue_set_coverage_class_work(ar);
5941 break;
5942 case WMI_PDEV_QVIT_EVENTID:
5943 ath10k_wmi_event_pdev_qvit(ar, skb);
5944 break;
5945 case WMI_WLAN_PROFILE_DATA_EVENTID:
5946 ath10k_wmi_event_wlan_profile_data(ar, skb);
5947 break;
5948 case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
5949 ath10k_wmi_event_rtt_measurement_report(ar, skb);
5950 break;
5951 case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
5952 ath10k_wmi_event_tsf_measurement_report(ar, skb);
5953 break;
5954 case WMI_RTT_ERROR_REPORT_EVENTID:
5955 ath10k_wmi_event_rtt_error_report(ar, skb);
5956 break;
5957 case WMI_WOW_WAKEUP_HOST_EVENTID:
5958 ath10k_wmi_event_wow_wakeup_host(ar, skb);
5959 break;
5960 case WMI_DCS_INTERFERENCE_EVENTID:
5961 ath10k_wmi_event_dcs_interference(ar, skb);
5962 break;
5963 case WMI_PDEV_TPC_CONFIG_EVENTID:
5964 ath10k_wmi_event_pdev_tpc_config(ar, skb);
5965 break;
5966 case WMI_PDEV_FTM_INTG_EVENTID:
5967 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
5968 break;
5969 case WMI_GTK_OFFLOAD_STATUS_EVENTID:
5970 ath10k_wmi_event_gtk_offload_status(ar, skb);
5971 break;
5972 case WMI_GTK_REKEY_FAIL_EVENTID:
5973 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
5974 break;
5975 case WMI_TX_DELBA_COMPLETE_EVENTID:
5976 ath10k_wmi_event_delba_complete(ar, skb);
5977 break;
5978 case WMI_TX_ADDBA_COMPLETE_EVENTID:
5979 ath10k_wmi_event_addba_complete(ar, skb);
5980 break;
5981 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
5982 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
5983 break;
5984 case WMI_SERVICE_READY_EVENTID:
5985 ath10k_wmi_event_service_ready(ar, skb);
5986 return;
5987 case WMI_READY_EVENTID:
5988 ath10k_wmi_event_ready(ar, skb);
5989 ath10k_wmi_queue_set_coverage_class_work(ar);
5990 break;
5991 case WMI_SERVICE_AVAILABLE_EVENTID:
5992 ath10k_wmi_event_service_available(ar, skb);
5993 break;
5994 default:
5995 ath10k_warn(ar, "Unknown eventid: %d\n", id);
5996 break;
5997 }
5998
5999 out:
6000 dev_kfree_skb(skb);
6001 }
6002
ath10k_wmi_10_1_op_rx(struct ath10k * ar,struct sk_buff * skb)6003 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
6004 {
6005 struct wmi_cmd_hdr *cmd_hdr;
6006 enum wmi_10x_event_id id;
6007 bool consumed;
6008
6009 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6010 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6011
6012 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6013 goto out;
6014
6015 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6016
6017 consumed = ath10k_tm_event_wmi(ar, id, skb);
6018
6019 /* Ready event must be handled normally also in UTF mode so that we
6020 * know the UTF firmware has booted, others we are just bypass WMI
6021 * events to testmode.
6022 */
6023 if (consumed && id != WMI_10X_READY_EVENTID) {
6024 ath10k_dbg(ar, ATH10K_DBG_WMI,
6025 "wmi testmode consumed 0x%x\n", id);
6026 goto out;
6027 }
6028
6029 switch (id) {
6030 case WMI_10X_MGMT_RX_EVENTID:
6031 ath10k_wmi_event_mgmt_rx(ar, skb);
6032 /* mgmt_rx() owns the skb now! */
6033 return;
6034 case WMI_10X_SCAN_EVENTID:
6035 ath10k_wmi_event_scan(ar, skb);
6036 ath10k_wmi_queue_set_coverage_class_work(ar);
6037 break;
6038 case WMI_10X_CHAN_INFO_EVENTID:
6039 ath10k_wmi_event_chan_info(ar, skb);
6040 break;
6041 case WMI_10X_ECHO_EVENTID:
6042 ath10k_wmi_event_echo(ar, skb);
6043 break;
6044 case WMI_10X_DEBUG_MESG_EVENTID:
6045 ath10k_wmi_event_debug_mesg(ar, skb);
6046 ath10k_wmi_queue_set_coverage_class_work(ar);
6047 break;
6048 case WMI_10X_UPDATE_STATS_EVENTID:
6049 ath10k_wmi_event_update_stats(ar, skb);
6050 break;
6051 case WMI_10X_VDEV_START_RESP_EVENTID:
6052 ath10k_wmi_event_vdev_start_resp(ar, skb);
6053 ath10k_wmi_queue_set_coverage_class_work(ar);
6054 break;
6055 case WMI_10X_VDEV_STOPPED_EVENTID:
6056 ath10k_wmi_event_vdev_stopped(ar, skb);
6057 ath10k_wmi_queue_set_coverage_class_work(ar);
6058 break;
6059 case WMI_10X_PEER_STA_KICKOUT_EVENTID:
6060 ath10k_wmi_event_peer_sta_kickout(ar, skb);
6061 break;
6062 case WMI_10X_HOST_SWBA_EVENTID:
6063 ath10k_wmi_event_host_swba(ar, skb);
6064 break;
6065 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
6066 ath10k_wmi_event_tbttoffset_update(ar, skb);
6067 break;
6068 case WMI_10X_PHYERR_EVENTID:
6069 ath10k_wmi_event_phyerr(ar, skb);
6070 break;
6071 case WMI_10X_ROAM_EVENTID:
6072 ath10k_wmi_event_roam(ar, skb);
6073 ath10k_wmi_queue_set_coverage_class_work(ar);
6074 break;
6075 case WMI_10X_PROFILE_MATCH:
6076 ath10k_wmi_event_profile_match(ar, skb);
6077 break;
6078 case WMI_10X_DEBUG_PRINT_EVENTID:
6079 ath10k_wmi_event_debug_print(ar, skb);
6080 ath10k_wmi_queue_set_coverage_class_work(ar);
6081 break;
6082 case WMI_10X_PDEV_QVIT_EVENTID:
6083 ath10k_wmi_event_pdev_qvit(ar, skb);
6084 break;
6085 case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
6086 ath10k_wmi_event_wlan_profile_data(ar, skb);
6087 break;
6088 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
6089 ath10k_wmi_event_rtt_measurement_report(ar, skb);
6090 break;
6091 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
6092 ath10k_wmi_event_tsf_measurement_report(ar, skb);
6093 break;
6094 case WMI_10X_RTT_ERROR_REPORT_EVENTID:
6095 ath10k_wmi_event_rtt_error_report(ar, skb);
6096 break;
6097 case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
6098 ath10k_wmi_event_wow_wakeup_host(ar, skb);
6099 break;
6100 case WMI_10X_DCS_INTERFERENCE_EVENTID:
6101 ath10k_wmi_event_dcs_interference(ar, skb);
6102 break;
6103 case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
6104 ath10k_wmi_event_pdev_tpc_config(ar, skb);
6105 break;
6106 case WMI_10X_INST_RSSI_STATS_EVENTID:
6107 ath10k_wmi_event_inst_rssi_stats(ar, skb);
6108 break;
6109 case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
6110 ath10k_wmi_event_vdev_standby_req(ar, skb);
6111 break;
6112 case WMI_10X_VDEV_RESUME_REQ_EVENTID:
6113 ath10k_wmi_event_vdev_resume_req(ar, skb);
6114 break;
6115 case WMI_10X_SERVICE_READY_EVENTID:
6116 ath10k_wmi_event_service_ready(ar, skb);
6117 return;
6118 case WMI_10X_READY_EVENTID:
6119 ath10k_wmi_event_ready(ar, skb);
6120 ath10k_wmi_queue_set_coverage_class_work(ar);
6121 break;
6122 case WMI_10X_PDEV_UTF_EVENTID:
6123 /* ignore utf events */
6124 break;
6125 default:
6126 ath10k_warn(ar, "Unknown eventid: %d\n", id);
6127 break;
6128 }
6129
6130 out:
6131 dev_kfree_skb(skb);
6132 }
6133
ath10k_wmi_10_2_op_rx(struct ath10k * ar,struct sk_buff * skb)6134 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
6135 {
6136 struct wmi_cmd_hdr *cmd_hdr;
6137 enum wmi_10_2_event_id id;
6138 bool consumed;
6139
6140 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6141 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6142
6143 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6144 goto out;
6145
6146 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6147
6148 consumed = ath10k_tm_event_wmi(ar, id, skb);
6149
6150 /* Ready event must be handled normally also in UTF mode so that we
6151 * know the UTF firmware has booted, others we are just bypass WMI
6152 * events to testmode.
6153 */
6154 if (consumed && id != WMI_10_2_READY_EVENTID) {
6155 ath10k_dbg(ar, ATH10K_DBG_WMI,
6156 "wmi testmode consumed 0x%x\n", id);
6157 goto out;
6158 }
6159
6160 switch (id) {
6161 case WMI_10_2_MGMT_RX_EVENTID:
6162 ath10k_wmi_event_mgmt_rx(ar, skb);
6163 /* mgmt_rx() owns the skb now! */
6164 return;
6165 case WMI_10_2_SCAN_EVENTID:
6166 ath10k_wmi_event_scan(ar, skb);
6167 ath10k_wmi_queue_set_coverage_class_work(ar);
6168 break;
6169 case WMI_10_2_CHAN_INFO_EVENTID:
6170 ath10k_wmi_event_chan_info(ar, skb);
6171 break;
6172 case WMI_10_2_ECHO_EVENTID:
6173 ath10k_wmi_event_echo(ar, skb);
6174 break;
6175 case WMI_10_2_DEBUG_MESG_EVENTID:
6176 ath10k_wmi_event_debug_mesg(ar, skb);
6177 ath10k_wmi_queue_set_coverage_class_work(ar);
6178 break;
6179 case WMI_10_2_UPDATE_STATS_EVENTID:
6180 ath10k_wmi_event_update_stats(ar, skb);
6181 break;
6182 case WMI_10_2_VDEV_START_RESP_EVENTID:
6183 ath10k_wmi_event_vdev_start_resp(ar, skb);
6184 ath10k_wmi_queue_set_coverage_class_work(ar);
6185 break;
6186 case WMI_10_2_VDEV_STOPPED_EVENTID:
6187 ath10k_wmi_event_vdev_stopped(ar, skb);
6188 ath10k_wmi_queue_set_coverage_class_work(ar);
6189 break;
6190 case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
6191 ath10k_wmi_event_peer_sta_kickout(ar, skb);
6192 break;
6193 case WMI_10_2_HOST_SWBA_EVENTID:
6194 ath10k_wmi_event_host_swba(ar, skb);
6195 break;
6196 case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
6197 ath10k_wmi_event_tbttoffset_update(ar, skb);
6198 break;
6199 case WMI_10_2_PHYERR_EVENTID:
6200 ath10k_wmi_event_phyerr(ar, skb);
6201 break;
6202 case WMI_10_2_ROAM_EVENTID:
6203 ath10k_wmi_event_roam(ar, skb);
6204 ath10k_wmi_queue_set_coverage_class_work(ar);
6205 break;
6206 case WMI_10_2_PROFILE_MATCH:
6207 ath10k_wmi_event_profile_match(ar, skb);
6208 break;
6209 case WMI_10_2_DEBUG_PRINT_EVENTID:
6210 ath10k_wmi_event_debug_print(ar, skb);
6211 ath10k_wmi_queue_set_coverage_class_work(ar);
6212 break;
6213 case WMI_10_2_PDEV_QVIT_EVENTID:
6214 ath10k_wmi_event_pdev_qvit(ar, skb);
6215 break;
6216 case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
6217 ath10k_wmi_event_wlan_profile_data(ar, skb);
6218 break;
6219 case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
6220 ath10k_wmi_event_rtt_measurement_report(ar, skb);
6221 break;
6222 case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
6223 ath10k_wmi_event_tsf_measurement_report(ar, skb);
6224 break;
6225 case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
6226 ath10k_wmi_event_rtt_error_report(ar, skb);
6227 break;
6228 case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
6229 ath10k_wmi_event_wow_wakeup_host(ar, skb);
6230 break;
6231 case WMI_10_2_DCS_INTERFERENCE_EVENTID:
6232 ath10k_wmi_event_dcs_interference(ar, skb);
6233 break;
6234 case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
6235 ath10k_wmi_event_pdev_tpc_config(ar, skb);
6236 break;
6237 case WMI_10_2_INST_RSSI_STATS_EVENTID:
6238 ath10k_wmi_event_inst_rssi_stats(ar, skb);
6239 break;
6240 case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
6241 ath10k_wmi_event_vdev_standby_req(ar, skb);
6242 ath10k_wmi_queue_set_coverage_class_work(ar);
6243 break;
6244 case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
6245 ath10k_wmi_event_vdev_resume_req(ar, skb);
6246 ath10k_wmi_queue_set_coverage_class_work(ar);
6247 break;
6248 case WMI_10_2_SERVICE_READY_EVENTID:
6249 ath10k_wmi_event_service_ready(ar, skb);
6250 return;
6251 case WMI_10_2_READY_EVENTID:
6252 ath10k_wmi_event_ready(ar, skb);
6253 ath10k_wmi_queue_set_coverage_class_work(ar);
6254 break;
6255 case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
6256 ath10k_wmi_event_temperature(ar, skb);
6257 break;
6258 case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
6259 ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6260 break;
6261 case WMI_10_2_RTT_KEEPALIVE_EVENTID:
6262 case WMI_10_2_GPIO_INPUT_EVENTID:
6263 case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
6264 case WMI_10_2_GENERIC_BUFFER_EVENTID:
6265 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
6266 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
6267 case WMI_10_2_WDS_PEER_EVENTID:
6268 ath10k_dbg(ar, ATH10K_DBG_WMI,
6269 "received event id %d not implemented\n", id);
6270 break;
6271 case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
6272 ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6273 break;
6274 default:
6275 ath10k_warn(ar, "Unknown eventid: %d\n", id);
6276 break;
6277 }
6278
6279 out:
6280 dev_kfree_skb(skb);
6281 }
6282
ath10k_wmi_10_4_op_rx(struct ath10k * ar,struct sk_buff * skb)6283 static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
6284 {
6285 struct wmi_cmd_hdr *cmd_hdr;
6286 enum wmi_10_4_event_id id;
6287 bool consumed;
6288
6289 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6290 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6291
6292 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6293 goto out;
6294
6295 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6296
6297 consumed = ath10k_tm_event_wmi(ar, id, skb);
6298
6299 /* Ready event must be handled normally also in UTF mode so that we
6300 * know the UTF firmware has booted, others we are just bypass WMI
6301 * events to testmode.
6302 */
6303 if (consumed && id != WMI_10_4_READY_EVENTID) {
6304 ath10k_dbg(ar, ATH10K_DBG_WMI,
6305 "wmi testmode consumed 0x%x\n", id);
6306 goto out;
6307 }
6308
6309 switch (id) {
6310 case WMI_10_4_MGMT_RX_EVENTID:
6311 ath10k_wmi_event_mgmt_rx(ar, skb);
6312 /* mgmt_rx() owns the skb now! */
6313 return;
6314 case WMI_10_4_ECHO_EVENTID:
6315 ath10k_wmi_event_echo(ar, skb);
6316 break;
6317 case WMI_10_4_DEBUG_MESG_EVENTID:
6318 ath10k_wmi_event_debug_mesg(ar, skb);
6319 ath10k_wmi_queue_set_coverage_class_work(ar);
6320 break;
6321 case WMI_10_4_SERVICE_READY_EVENTID:
6322 ath10k_wmi_event_service_ready(ar, skb);
6323 return;
6324 case WMI_10_4_SCAN_EVENTID:
6325 ath10k_wmi_event_scan(ar, skb);
6326 ath10k_wmi_queue_set_coverage_class_work(ar);
6327 break;
6328 case WMI_10_4_CHAN_INFO_EVENTID:
6329 ath10k_wmi_event_chan_info(ar, skb);
6330 break;
6331 case WMI_10_4_PHYERR_EVENTID:
6332 ath10k_wmi_event_phyerr(ar, skb);
6333 break;
6334 case WMI_10_4_READY_EVENTID:
6335 ath10k_wmi_event_ready(ar, skb);
6336 ath10k_wmi_queue_set_coverage_class_work(ar);
6337 break;
6338 case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
6339 ath10k_wmi_event_peer_sta_kickout(ar, skb);
6340 break;
6341 case WMI_10_4_ROAM_EVENTID:
6342 ath10k_wmi_event_roam(ar, skb);
6343 ath10k_wmi_queue_set_coverage_class_work(ar);
6344 break;
6345 case WMI_10_4_HOST_SWBA_EVENTID:
6346 ath10k_wmi_event_host_swba(ar, skb);
6347 break;
6348 case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
6349 ath10k_wmi_event_tbttoffset_update(ar, skb);
6350 break;
6351 case WMI_10_4_DEBUG_PRINT_EVENTID:
6352 ath10k_wmi_event_debug_print(ar, skb);
6353 ath10k_wmi_queue_set_coverage_class_work(ar);
6354 break;
6355 case WMI_10_4_VDEV_START_RESP_EVENTID:
6356 ath10k_wmi_event_vdev_start_resp(ar, skb);
6357 ath10k_wmi_queue_set_coverage_class_work(ar);
6358 break;
6359 case WMI_10_4_VDEV_STOPPED_EVENTID:
6360 ath10k_wmi_event_vdev_stopped(ar, skb);
6361 ath10k_wmi_queue_set_coverage_class_work(ar);
6362 break;
6363 case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
6364 case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
6365 case WMI_10_4_WDS_PEER_EVENTID:
6366 case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
6367 ath10k_dbg(ar, ATH10K_DBG_WMI,
6368 "received event id %d not implemented\n", id);
6369 break;
6370 case WMI_10_4_UPDATE_STATS_EVENTID:
6371 ath10k_wmi_event_update_stats(ar, skb);
6372 break;
6373 case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
6374 ath10k_wmi_event_temperature(ar, skb);
6375 break;
6376 case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
6377 ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6378 break;
6379 case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
6380 ath10k_wmi_event_pdev_tpc_config(ar, skb);
6381 break;
6382 case WMI_10_4_TDLS_PEER_EVENTID:
6383 ath10k_wmi_handle_tdls_peer_event(ar, skb);
6384 break;
6385 case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
6386 ath10k_wmi_event_tpc_final_table(ar, skb);
6387 break;
6388 case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
6389 ath10k_wmi_event_dfs_status_check(ar, skb);
6390 break;
6391 case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
6392 ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6393 break;
6394 default:
6395 ath10k_warn(ar, "Unknown eventid: %d\n", id);
6396 break;
6397 }
6398
6399 out:
6400 dev_kfree_skb(skb);
6401 }
6402
ath10k_wmi_process_rx(struct ath10k * ar,struct sk_buff * skb)6403 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
6404 {
6405 int ret;
6406
6407 ret = ath10k_wmi_rx(ar, skb);
6408 if (ret)
6409 ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
6410 }
6411
ath10k_wmi_connect(struct ath10k * ar)6412 int ath10k_wmi_connect(struct ath10k *ar)
6413 {
6414 int status;
6415 struct ath10k_htc_svc_conn_req conn_req;
6416 struct ath10k_htc_svc_conn_resp conn_resp;
6417
6418 memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
6419
6420 memset(&conn_req, 0, sizeof(conn_req));
6421 memset(&conn_resp, 0, sizeof(conn_resp));
6422
6423 /* these fields are the same for all service endpoints */
6424 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
6425 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
6426 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
6427
6428 /* connect to control service */
6429 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
6430
6431 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
6432 if (status) {
6433 ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
6434 status);
6435 return status;
6436 }
6437
6438 ar->wmi.eid = conn_resp.eid;
6439 return 0;
6440 }
6441
6442 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k * ar,const u8 macaddr[ETH_ALEN])6443 ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
6444 const u8 macaddr[ETH_ALEN])
6445 {
6446 struct wmi_pdev_set_base_macaddr_cmd *cmd;
6447 struct sk_buff *skb;
6448
6449 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6450 if (!skb)
6451 return ERR_PTR(-ENOMEM);
6452
6453 cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
6454 ether_addr_copy(cmd->mac_addr.addr, macaddr);
6455
6456 ath10k_dbg(ar, ATH10K_DBG_WMI,
6457 "wmi pdev basemac %pM\n", macaddr);
6458 return skb;
6459 }
6460
6461 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6462 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
6463 u16 ctl2g, u16 ctl5g,
6464 enum wmi_dfs_region dfs_reg)
6465 {
6466 struct wmi_pdev_set_regdomain_cmd *cmd;
6467 struct sk_buff *skb;
6468
6469 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6470 if (!skb)
6471 return ERR_PTR(-ENOMEM);
6472
6473 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
6474 cmd->reg_domain = __cpu_to_le32(rd);
6475 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6476 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6477 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6478 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6479
6480 ath10k_dbg(ar, ATH10K_DBG_WMI,
6481 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
6482 rd, rd2g, rd5g, ctl2g, ctl5g);
6483 return skb;
6484 }
6485
6486 static struct sk_buff *
ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6487 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
6488 rd5g, u16 ctl2g, u16 ctl5g,
6489 enum wmi_dfs_region dfs_reg)
6490 {
6491 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
6492 struct sk_buff *skb;
6493
6494 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6495 if (!skb)
6496 return ERR_PTR(-ENOMEM);
6497
6498 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
6499 cmd->reg_domain = __cpu_to_le32(rd);
6500 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6501 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6502 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6503 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6504 cmd->dfs_domain = __cpu_to_le32(dfs_reg);
6505
6506 ath10k_dbg(ar, ATH10K_DBG_WMI,
6507 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
6508 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
6509 return skb;
6510 }
6511
6512 static struct sk_buff *
ath10k_wmi_op_gen_pdev_suspend(struct ath10k * ar,u32 suspend_opt)6513 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
6514 {
6515 struct wmi_pdev_suspend_cmd *cmd;
6516 struct sk_buff *skb;
6517
6518 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6519 if (!skb)
6520 return ERR_PTR(-ENOMEM);
6521
6522 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
6523 cmd->suspend_opt = __cpu_to_le32(suspend_opt);
6524
6525 return skb;
6526 }
6527
6528 static struct sk_buff *
ath10k_wmi_op_gen_pdev_resume(struct ath10k * ar)6529 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
6530 {
6531 struct sk_buff *skb;
6532
6533 skb = ath10k_wmi_alloc_skb(ar, 0);
6534 if (!skb)
6535 return ERR_PTR(-ENOMEM);
6536
6537 return skb;
6538 }
6539
6540 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_param(struct ath10k * ar,u32 id,u32 value)6541 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
6542 {
6543 struct wmi_pdev_set_param_cmd *cmd;
6544 struct sk_buff *skb;
6545
6546 if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
6547 ath10k_warn(ar, "pdev param %d not supported by firmware\n",
6548 id);
6549 return ERR_PTR(-EOPNOTSUPP);
6550 }
6551
6552 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6553 if (!skb)
6554 return ERR_PTR(-ENOMEM);
6555
6556 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
6557 cmd->param_id = __cpu_to_le32(id);
6558 cmd->param_value = __cpu_to_le32(value);
6559
6560 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
6561 id, value);
6562 return skb;
6563 }
6564
ath10k_wmi_put_host_mem_chunks(struct ath10k * ar,struct wmi_host_mem_chunks * chunks)6565 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
6566 struct wmi_host_mem_chunks *chunks)
6567 {
6568 struct host_memory_chunk *chunk;
6569 int i;
6570
6571 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
6572
6573 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
6574 chunk = &chunks->items[i];
6575 chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
6576 chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
6577 chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
6578
6579 ath10k_dbg(ar, ATH10K_DBG_WMI,
6580 "wmi chunk %d len %d requested, addr 0x%llx\n",
6581 i,
6582 ar->wmi.mem_chunks[i].len,
6583 (unsigned long long)ar->wmi.mem_chunks[i].paddr);
6584 }
6585 }
6586
ath10k_wmi_op_gen_init(struct ath10k * ar)6587 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
6588 {
6589 struct wmi_init_cmd *cmd;
6590 struct sk_buff *buf;
6591 struct wmi_resource_config config = {};
6592 u32 val;
6593
6594 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
6595 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
6596 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
6597
6598 config.num_offload_reorder_bufs =
6599 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
6600
6601 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
6602 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
6603 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
6604 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
6605 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
6606 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6607 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6608 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6609 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
6610 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6611 config.scan_max_pending_reqs =
6612 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
6613
6614 config.bmiss_offload_max_vdev =
6615 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
6616
6617 config.roam_offload_max_vdev =
6618 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
6619
6620 config.roam_offload_max_ap_profiles =
6621 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
6622
6623 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
6624 config.num_mcast_table_elems =
6625 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
6626
6627 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
6628 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
6629 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
6630 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
6631 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
6632
6633 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6634 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6635
6636 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
6637
6638 config.gtk_offload_max_vdev =
6639 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
6640
6641 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
6642 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
6643
6644 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6645 ar->wmi.num_mem_chunks));
6646 if (!buf)
6647 return ERR_PTR(-ENOMEM);
6648
6649 cmd = (struct wmi_init_cmd *)buf->data;
6650
6651 memcpy(&cmd->resource_config, &config, sizeof(config));
6652 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6653
6654 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
6655 return buf;
6656 }
6657
ath10k_wmi_10_1_op_gen_init(struct ath10k * ar)6658 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
6659 {
6660 struct wmi_init_cmd_10x *cmd;
6661 struct sk_buff *buf;
6662 struct wmi_resource_config_10x config = {};
6663 u32 val;
6664
6665 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6666 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6667 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6668 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6669 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6670 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6671 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6672 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6673 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6674 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6675 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6676 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6677 config.scan_max_pending_reqs =
6678 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6679
6680 config.bmiss_offload_max_vdev =
6681 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6682
6683 config.roam_offload_max_vdev =
6684 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6685
6686 config.roam_offload_max_ap_profiles =
6687 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6688
6689 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6690 config.num_mcast_table_elems =
6691 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6692
6693 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6694 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6695 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6696 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
6697 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6698
6699 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6700 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6701
6702 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6703
6704 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6705 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6706
6707 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6708 ar->wmi.num_mem_chunks));
6709 if (!buf)
6710 return ERR_PTR(-ENOMEM);
6711
6712 cmd = (struct wmi_init_cmd_10x *)buf->data;
6713
6714 memcpy(&cmd->resource_config, &config, sizeof(config));
6715 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6716
6717 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
6718 return buf;
6719 }
6720
ath10k_wmi_10_2_op_gen_init(struct ath10k * ar)6721 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
6722 {
6723 struct wmi_init_cmd_10_2 *cmd;
6724 struct sk_buff *buf;
6725 struct wmi_resource_config_10x config = {};
6726 u32 val, features;
6727
6728 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6729 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6730
6731 if (ath10k_peer_stats_enabled(ar)) {
6732 config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
6733 config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
6734 } else {
6735 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6736 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6737 }
6738
6739 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6740 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6741 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6742 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6743 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6744 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6745 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6746 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6747
6748 config.scan_max_pending_reqs =
6749 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6750
6751 config.bmiss_offload_max_vdev =
6752 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6753
6754 config.roam_offload_max_vdev =
6755 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6756
6757 config.roam_offload_max_ap_profiles =
6758 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6759
6760 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6761 config.num_mcast_table_elems =
6762 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6763
6764 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6765 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6766 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6767 config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
6768 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6769
6770 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6771 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6772
6773 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6774
6775 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6776 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6777
6778 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6779 ar->wmi.num_mem_chunks));
6780 if (!buf)
6781 return ERR_PTR(-ENOMEM);
6782
6783 cmd = (struct wmi_init_cmd_10_2 *)buf->data;
6784
6785 features = WMI_10_2_RX_BATCH_MODE;
6786
6787 if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
6788 test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
6789 features |= WMI_10_2_COEX_GPIO;
6790
6791 if (ath10k_peer_stats_enabled(ar))
6792 features |= WMI_10_2_PEER_STATS;
6793
6794 if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
6795 features |= WMI_10_2_BSS_CHAN_INFO;
6796
6797 cmd->resource_config.feature_mask = __cpu_to_le32(features);
6798
6799 memcpy(&cmd->resource_config.common, &config, sizeof(config));
6800 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6801
6802 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
6803 return buf;
6804 }
6805
ath10k_wmi_10_4_op_gen_init(struct ath10k * ar)6806 static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
6807 {
6808 struct wmi_init_cmd_10_4 *cmd;
6809 struct sk_buff *buf;
6810 struct wmi_resource_config_10_4 config = {};
6811
6812 config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
6813 config.num_peers = __cpu_to_le32(ar->max_num_peers);
6814 config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
6815 config.num_tids = __cpu_to_le32(ar->num_tids);
6816
6817 config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
6818 config.num_offload_reorder_buffs =
6819 __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
6820 config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
6821 config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
6822 config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask);
6823 config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask);
6824
6825 config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6826 config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6827 config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6828 config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
6829
6830 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6831 config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
6832 config.bmiss_offload_max_vdev =
6833 __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
6834 config.roam_offload_max_vdev =
6835 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
6836 config.roam_offload_max_ap_profiles =
6837 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
6838 config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
6839 config.num_mcast_table_elems =
6840 __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
6841
6842 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
6843 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
6844 config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
6845 config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
6846 config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
6847
6848 config.rx_skip_defrag_timeout_dup_detection_check =
6849 __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
6850
6851 config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
6852 config.gtk_offload_max_vdev =
6853 __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
6854 config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
6855 config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
6856 config.max_peer_ext_stats =
6857 __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
6858 config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
6859
6860 config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
6861 config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
6862 config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
6863 config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
6864
6865 config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
6866 config.tt_support =
6867 __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
6868 config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
6869 config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
6870 config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
6871
6872 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6873 ar->wmi.num_mem_chunks));
6874 if (!buf)
6875 return ERR_PTR(-ENOMEM);
6876
6877 cmd = (struct wmi_init_cmd_10_4 *)buf->data;
6878 memcpy(&cmd->resource_config, &config, sizeof(config));
6879 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6880
6881 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
6882 return buf;
6883 }
6884
ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg * arg)6885 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
6886 {
6887 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
6888 return -EINVAL;
6889 if (arg->n_channels > ARRAY_SIZE(arg->channels))
6890 return -EINVAL;
6891 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
6892 return -EINVAL;
6893 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
6894 return -EINVAL;
6895
6896 return 0;
6897 }
6898
6899 static size_t
ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg * arg)6900 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
6901 {
6902 int len = 0;
6903
6904 if (arg->ie_len) {
6905 len += sizeof(struct wmi_ie_data);
6906 len += roundup(arg->ie_len, 4);
6907 }
6908
6909 if (arg->n_channels) {
6910 len += sizeof(struct wmi_chan_list);
6911 len += sizeof(__le32) * arg->n_channels;
6912 }
6913
6914 if (arg->n_ssids) {
6915 len += sizeof(struct wmi_ssid_list);
6916 len += sizeof(struct wmi_ssid) * arg->n_ssids;
6917 }
6918
6919 if (arg->n_bssids) {
6920 len += sizeof(struct wmi_bssid_list);
6921 len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6922 }
6923
6924 return len;
6925 }
6926
ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common * cmn,const struct wmi_start_scan_arg * arg)6927 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
6928 const struct wmi_start_scan_arg *arg)
6929 {
6930 u32 scan_id;
6931 u32 scan_req_id;
6932
6933 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
6934 scan_id |= arg->scan_id;
6935
6936 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6937 scan_req_id |= arg->scan_req_id;
6938
6939 cmn->scan_id = __cpu_to_le32(scan_id);
6940 cmn->scan_req_id = __cpu_to_le32(scan_req_id);
6941 cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
6942 cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
6943 cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
6944 cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
6945 cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
6946 cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
6947 cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
6948 cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
6949 cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
6950 cmn->idle_time = __cpu_to_le32(arg->idle_time);
6951 cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
6952 cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
6953 cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
6954 }
6955
6956 static void
ath10k_wmi_put_start_scan_tlvs(u8 * tlvs,const struct wmi_start_scan_arg * arg)6957 ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
6958 const struct wmi_start_scan_arg *arg)
6959 {
6960 struct wmi_ie_data *ie;
6961 struct wmi_chan_list *channels;
6962 struct wmi_ssid_list *ssids;
6963 struct wmi_bssid_list *bssids;
6964 void *ptr = tlvs;
6965 int i;
6966
6967 if (arg->n_channels) {
6968 channels = ptr;
6969 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
6970 channels->num_chan = __cpu_to_le32(arg->n_channels);
6971
6972 for (i = 0; i < arg->n_channels; i++)
6973 channels->channel_list[i].freq =
6974 __cpu_to_le16(arg->channels[i]);
6975
6976 ptr += sizeof(*channels);
6977 ptr += sizeof(__le32) * arg->n_channels;
6978 }
6979
6980 if (arg->n_ssids) {
6981 ssids = ptr;
6982 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
6983 ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
6984
6985 for (i = 0; i < arg->n_ssids; i++) {
6986 ssids->ssids[i].ssid_len =
6987 __cpu_to_le32(arg->ssids[i].len);
6988 memcpy(&ssids->ssids[i].ssid,
6989 arg->ssids[i].ssid,
6990 arg->ssids[i].len);
6991 }
6992
6993 ptr += sizeof(*ssids);
6994 ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
6995 }
6996
6997 if (arg->n_bssids) {
6998 bssids = ptr;
6999 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
7000 bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
7001
7002 for (i = 0; i < arg->n_bssids; i++)
7003 ether_addr_copy(bssids->bssid_list[i].addr,
7004 arg->bssids[i].bssid);
7005
7006 ptr += sizeof(*bssids);
7007 ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
7008 }
7009
7010 if (arg->ie_len) {
7011 ie = ptr;
7012 ie->tag = __cpu_to_le32(WMI_IE_TAG);
7013 ie->ie_len = __cpu_to_le32(arg->ie_len);
7014 memcpy(ie->ie_data, arg->ie, arg->ie_len);
7015
7016 ptr += sizeof(*ie);
7017 ptr += roundup(arg->ie_len, 4);
7018 }
7019 }
7020
7021 static struct sk_buff *
ath10k_wmi_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7022 ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
7023 const struct wmi_start_scan_arg *arg)
7024 {
7025 struct wmi_start_scan_cmd *cmd;
7026 struct sk_buff *skb;
7027 size_t len;
7028 int ret;
7029
7030 ret = ath10k_wmi_start_scan_verify(arg);
7031 if (ret)
7032 return ERR_PTR(ret);
7033
7034 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7035 skb = ath10k_wmi_alloc_skb(ar, len);
7036 if (!skb)
7037 return ERR_PTR(-ENOMEM);
7038
7039 cmd = (struct wmi_start_scan_cmd *)skb->data;
7040
7041 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7042 ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
7043
7044 cmd->burst_duration_ms = __cpu_to_le32(0);
7045
7046 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
7047 return skb;
7048 }
7049
7050 static struct sk_buff *
ath10k_wmi_10x_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7051 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
7052 const struct wmi_start_scan_arg *arg)
7053 {
7054 struct wmi_10x_start_scan_cmd *cmd;
7055 struct sk_buff *skb;
7056 size_t len;
7057 int ret;
7058
7059 ret = ath10k_wmi_start_scan_verify(arg);
7060 if (ret)
7061 return ERR_PTR(ret);
7062
7063 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7064 skb = ath10k_wmi_alloc_skb(ar, len);
7065 if (!skb)
7066 return ERR_PTR(-ENOMEM);
7067
7068 cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
7069
7070 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7071 ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
7072
7073 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
7074 return skb;
7075 }
7076
ath10k_wmi_start_scan_init(struct ath10k * ar,struct wmi_start_scan_arg * arg)7077 void ath10k_wmi_start_scan_init(struct ath10k *ar,
7078 struct wmi_start_scan_arg *arg)
7079 {
7080 /* setup commonly used values */
7081 arg->scan_req_id = 1;
7082 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
7083 arg->dwell_time_active = 50;
7084 arg->dwell_time_passive = 150;
7085 arg->min_rest_time = 50;
7086 arg->max_rest_time = 500;
7087 arg->repeat_probe_time = 0;
7088 arg->probe_spacing_time = 0;
7089 arg->idle_time = 0;
7090 arg->max_scan_time = 20000;
7091 arg->probe_delay = 5;
7092 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
7093 | WMI_SCAN_EVENT_COMPLETED
7094 | WMI_SCAN_EVENT_BSS_CHANNEL
7095 | WMI_SCAN_EVENT_FOREIGN_CHANNEL
7096 | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
7097 | WMI_SCAN_EVENT_DEQUEUED;
7098 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
7099 arg->n_bssids = 1;
7100 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
7101 }
7102
7103 static struct sk_buff *
ath10k_wmi_op_gen_stop_scan(struct ath10k * ar,const struct wmi_stop_scan_arg * arg)7104 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
7105 const struct wmi_stop_scan_arg *arg)
7106 {
7107 struct wmi_stop_scan_cmd *cmd;
7108 struct sk_buff *skb;
7109 u32 scan_id;
7110 u32 req_id;
7111
7112 if (arg->req_id > 0xFFF)
7113 return ERR_PTR(-EINVAL);
7114 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
7115 return ERR_PTR(-EINVAL);
7116
7117 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7118 if (!skb)
7119 return ERR_PTR(-ENOMEM);
7120
7121 scan_id = arg->u.scan_id;
7122 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
7123
7124 req_id = arg->req_id;
7125 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
7126
7127 cmd = (struct wmi_stop_scan_cmd *)skb->data;
7128 cmd->req_type = __cpu_to_le32(arg->req_type);
7129 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
7130 cmd->scan_id = __cpu_to_le32(scan_id);
7131 cmd->scan_req_id = __cpu_to_le32(req_id);
7132
7133 ath10k_dbg(ar, ATH10K_DBG_WMI,
7134 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
7135 arg->req_id, arg->req_type, arg->u.scan_id);
7136 return skb;
7137 }
7138
7139 static struct sk_buff *
ath10k_wmi_op_gen_vdev_create(struct ath10k * ar,u32 vdev_id,enum wmi_vdev_type type,enum wmi_vdev_subtype subtype,const u8 macaddr[ETH_ALEN])7140 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
7141 enum wmi_vdev_type type,
7142 enum wmi_vdev_subtype subtype,
7143 const u8 macaddr[ETH_ALEN])
7144 {
7145 struct wmi_vdev_create_cmd *cmd;
7146 struct sk_buff *skb;
7147
7148 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7149 if (!skb)
7150 return ERR_PTR(-ENOMEM);
7151
7152 cmd = (struct wmi_vdev_create_cmd *)skb->data;
7153 cmd->vdev_id = __cpu_to_le32(vdev_id);
7154 cmd->vdev_type = __cpu_to_le32(type);
7155 cmd->vdev_subtype = __cpu_to_le32(subtype);
7156 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
7157
7158 ath10k_dbg(ar, ATH10K_DBG_WMI,
7159 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
7160 vdev_id, type, subtype, macaddr);
7161 return skb;
7162 }
7163
7164 static struct sk_buff *
ath10k_wmi_op_gen_vdev_delete(struct ath10k * ar,u32 vdev_id)7165 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
7166 {
7167 struct wmi_vdev_delete_cmd *cmd;
7168 struct sk_buff *skb;
7169
7170 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7171 if (!skb)
7172 return ERR_PTR(-ENOMEM);
7173
7174 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
7175 cmd->vdev_id = __cpu_to_le32(vdev_id);
7176
7177 ath10k_dbg(ar, ATH10K_DBG_WMI,
7178 "WMI vdev delete id %d\n", vdev_id);
7179 return skb;
7180 }
7181
7182 static struct sk_buff *
ath10k_wmi_op_gen_vdev_start(struct ath10k * ar,const struct wmi_vdev_start_request_arg * arg,bool restart)7183 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
7184 const struct wmi_vdev_start_request_arg *arg,
7185 bool restart)
7186 {
7187 struct wmi_vdev_start_request_cmd *cmd;
7188 struct sk_buff *skb;
7189 const char *cmdname;
7190 u32 flags = 0;
7191
7192 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
7193 return ERR_PTR(-EINVAL);
7194 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
7195 return ERR_PTR(-EINVAL);
7196
7197 if (restart)
7198 cmdname = "restart";
7199 else
7200 cmdname = "start";
7201
7202 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7203 if (!skb)
7204 return ERR_PTR(-ENOMEM);
7205
7206 if (arg->hidden_ssid)
7207 flags |= WMI_VDEV_START_HIDDEN_SSID;
7208 if (arg->pmf_enabled)
7209 flags |= WMI_VDEV_START_PMF_ENABLED;
7210
7211 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
7212 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7213 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
7214 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
7215 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
7216 cmd->flags = __cpu_to_le32(flags);
7217 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
7218 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
7219
7220 if (arg->ssid) {
7221 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
7222 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
7223 }
7224
7225 ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
7226
7227 ath10k_dbg(ar, ATH10K_DBG_WMI,
7228 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
7229 cmdname, arg->vdev_id,
7230 flags, arg->channel.freq, arg->channel.mode,
7231 cmd->chan.flags, arg->channel.max_power);
7232
7233 return skb;
7234 }
7235
7236 static struct sk_buff *
ath10k_wmi_op_gen_vdev_stop(struct ath10k * ar,u32 vdev_id)7237 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
7238 {
7239 struct wmi_vdev_stop_cmd *cmd;
7240 struct sk_buff *skb;
7241
7242 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7243 if (!skb)
7244 return ERR_PTR(-ENOMEM);
7245
7246 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
7247 cmd->vdev_id = __cpu_to_le32(vdev_id);
7248
7249 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
7250 return skb;
7251 }
7252
7253 static struct sk_buff *
ath10k_wmi_op_gen_vdev_up(struct ath10k * ar,u32 vdev_id,u32 aid,const u8 * bssid)7254 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
7255 const u8 *bssid)
7256 {
7257 struct wmi_vdev_up_cmd *cmd;
7258 struct sk_buff *skb;
7259
7260 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7261 if (!skb)
7262 return ERR_PTR(-ENOMEM);
7263
7264 cmd = (struct wmi_vdev_up_cmd *)skb->data;
7265 cmd->vdev_id = __cpu_to_le32(vdev_id);
7266 cmd->vdev_assoc_id = __cpu_to_le32(aid);
7267 ether_addr_copy(cmd->vdev_bssid.addr, bssid);
7268
7269 ath10k_dbg(ar, ATH10K_DBG_WMI,
7270 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
7271 vdev_id, aid, bssid);
7272 return skb;
7273 }
7274
7275 static struct sk_buff *
ath10k_wmi_op_gen_vdev_down(struct ath10k * ar,u32 vdev_id)7276 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
7277 {
7278 struct wmi_vdev_down_cmd *cmd;
7279 struct sk_buff *skb;
7280
7281 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7282 if (!skb)
7283 return ERR_PTR(-ENOMEM);
7284
7285 cmd = (struct wmi_vdev_down_cmd *)skb->data;
7286 cmd->vdev_id = __cpu_to_le32(vdev_id);
7287
7288 ath10k_dbg(ar, ATH10K_DBG_WMI,
7289 "wmi mgmt vdev down id 0x%x\n", vdev_id);
7290 return skb;
7291 }
7292
7293 static struct sk_buff *
ath10k_wmi_op_gen_vdev_set_param(struct ath10k * ar,u32 vdev_id,u32 param_id,u32 param_value)7294 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
7295 u32 param_id, u32 param_value)
7296 {
7297 struct wmi_vdev_set_param_cmd *cmd;
7298 struct sk_buff *skb;
7299
7300 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
7301 ath10k_dbg(ar, ATH10K_DBG_WMI,
7302 "vdev param %d not supported by firmware\n",
7303 param_id);
7304 return ERR_PTR(-EOPNOTSUPP);
7305 }
7306
7307 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7308 if (!skb)
7309 return ERR_PTR(-ENOMEM);
7310
7311 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
7312 cmd->vdev_id = __cpu_to_le32(vdev_id);
7313 cmd->param_id = __cpu_to_le32(param_id);
7314 cmd->param_value = __cpu_to_le32(param_value);
7315
7316 ath10k_dbg(ar, ATH10K_DBG_WMI,
7317 "wmi vdev id 0x%x set param %d value %d\n",
7318 vdev_id, param_id, param_value);
7319 return skb;
7320 }
7321
7322 static struct sk_buff *
ath10k_wmi_op_gen_vdev_install_key(struct ath10k * ar,const struct wmi_vdev_install_key_arg * arg)7323 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
7324 const struct wmi_vdev_install_key_arg *arg)
7325 {
7326 struct wmi_vdev_install_key_cmd *cmd;
7327 struct sk_buff *skb;
7328
7329 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
7330 return ERR_PTR(-EINVAL);
7331 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
7332 return ERR_PTR(-EINVAL);
7333
7334 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
7335 if (!skb)
7336 return ERR_PTR(-ENOMEM);
7337
7338 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
7339 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7340 cmd->key_idx = __cpu_to_le32(arg->key_idx);
7341 cmd->key_flags = __cpu_to_le32(arg->key_flags);
7342 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
7343 cmd->key_len = __cpu_to_le32(arg->key_len);
7344 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
7345 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
7346
7347 if (arg->macaddr)
7348 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
7349 if (arg->key_data)
7350 memcpy(cmd->key_data, arg->key_data, arg->key_len);
7351
7352 ath10k_dbg(ar, ATH10K_DBG_WMI,
7353 "wmi vdev install key idx %d cipher %d len %d\n",
7354 arg->key_idx, arg->key_cipher, arg->key_len);
7355 return skb;
7356 }
7357
7358 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k * ar,const struct wmi_vdev_spectral_conf_arg * arg)7359 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
7360 const struct wmi_vdev_spectral_conf_arg *arg)
7361 {
7362 struct wmi_vdev_spectral_conf_cmd *cmd;
7363 struct sk_buff *skb;
7364
7365 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7366 if (!skb)
7367 return ERR_PTR(-ENOMEM);
7368
7369 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
7370 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7371 cmd->scan_count = __cpu_to_le32(arg->scan_count);
7372 cmd->scan_period = __cpu_to_le32(arg->scan_period);
7373 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
7374 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
7375 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
7376 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
7377 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
7378 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
7379 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
7380 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
7381 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
7382 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
7383 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
7384 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
7385 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
7386 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
7387 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
7388 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
7389
7390 return skb;
7391 }
7392
7393 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k * ar,u32 vdev_id,u32 trigger,u32 enable)7394 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
7395 u32 trigger, u32 enable)
7396 {
7397 struct wmi_vdev_spectral_enable_cmd *cmd;
7398 struct sk_buff *skb;
7399
7400 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7401 if (!skb)
7402 return ERR_PTR(-ENOMEM);
7403
7404 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
7405 cmd->vdev_id = __cpu_to_le32(vdev_id);
7406 cmd->trigger_cmd = __cpu_to_le32(trigger);
7407 cmd->enable_cmd = __cpu_to_le32(enable);
7408
7409 return skb;
7410 }
7411
7412 static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],enum wmi_peer_type peer_type)7413 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
7414 const u8 peer_addr[ETH_ALEN],
7415 enum wmi_peer_type peer_type)
7416 {
7417 struct wmi_peer_create_cmd *cmd;
7418 struct sk_buff *skb;
7419
7420 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7421 if (!skb)
7422 return ERR_PTR(-ENOMEM);
7423
7424 cmd = (struct wmi_peer_create_cmd *)skb->data;
7425 cmd->vdev_id = __cpu_to_le32(vdev_id);
7426 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7427 cmd->peer_type = __cpu_to_le32(peer_type);
7428
7429 ath10k_dbg(ar, ATH10K_DBG_WMI,
7430 "wmi peer create vdev_id %d peer_addr %pM\n",
7431 vdev_id, peer_addr);
7432 return skb;
7433 }
7434
7435 static struct sk_buff *
ath10k_wmi_op_gen_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN])7436 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
7437 const u8 peer_addr[ETH_ALEN])
7438 {
7439 struct wmi_peer_delete_cmd *cmd;
7440 struct sk_buff *skb;
7441
7442 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7443 if (!skb)
7444 return ERR_PTR(-ENOMEM);
7445
7446 cmd = (struct wmi_peer_delete_cmd *)skb->data;
7447 cmd->vdev_id = __cpu_to_le32(vdev_id);
7448 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7449
7450 ath10k_dbg(ar, ATH10K_DBG_WMI,
7451 "wmi peer delete vdev_id %d peer_addr %pM\n",
7452 vdev_id, peer_addr);
7453 return skb;
7454 }
7455
7456 static struct sk_buff *
ath10k_wmi_op_gen_peer_flush(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],u32 tid_bitmap)7457 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
7458 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
7459 {
7460 struct wmi_peer_flush_tids_cmd *cmd;
7461 struct sk_buff *skb;
7462
7463 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7464 if (!skb)
7465 return ERR_PTR(-ENOMEM);
7466
7467 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
7468 cmd->vdev_id = __cpu_to_le32(vdev_id);
7469 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
7470 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7471
7472 ath10k_dbg(ar, ATH10K_DBG_WMI,
7473 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
7474 vdev_id, peer_addr, tid_bitmap);
7475 return skb;
7476 }
7477
7478 static struct sk_buff *
ath10k_wmi_op_gen_peer_set_param(struct ath10k * ar,u32 vdev_id,const u8 * peer_addr,enum wmi_peer_param param_id,u32 param_value)7479 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
7480 const u8 *peer_addr,
7481 enum wmi_peer_param param_id,
7482 u32 param_value)
7483 {
7484 struct wmi_peer_set_param_cmd *cmd;
7485 struct sk_buff *skb;
7486
7487 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7488 if (!skb)
7489 return ERR_PTR(-ENOMEM);
7490
7491 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
7492 cmd->vdev_id = __cpu_to_le32(vdev_id);
7493 cmd->param_id = __cpu_to_le32(param_id);
7494 cmd->param_value = __cpu_to_le32(param_value);
7495 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7496
7497 ath10k_dbg(ar, ATH10K_DBG_WMI,
7498 "wmi vdev %d peer 0x%pM set param %d value %d\n",
7499 vdev_id, peer_addr, param_id, param_value);
7500 return skb;
7501 }
7502
ath10k_wmi_op_gen_gpio_config(struct ath10k * ar,u32 gpio_num,u32 input,u32 pull_type,u32 intr_mode)7503 static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar,
7504 u32 gpio_num, u32 input,
7505 u32 pull_type, u32 intr_mode)
7506 {
7507 struct wmi_gpio_config_cmd *cmd;
7508 struct sk_buff *skb;
7509
7510 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7511 if (!skb)
7512 return ERR_PTR(-ENOMEM);
7513
7514 cmd = (struct wmi_gpio_config_cmd *)skb->data;
7515 cmd->pull_type = __cpu_to_le32(pull_type);
7516 cmd->gpio_num = __cpu_to_le32(gpio_num);
7517 cmd->input = __cpu_to_le32(input);
7518 cmd->intr_mode = __cpu_to_le32(intr_mode);
7519
7520 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n",
7521 gpio_num, input, pull_type, intr_mode);
7522
7523 return skb;
7524 }
7525
ath10k_wmi_op_gen_gpio_output(struct ath10k * ar,u32 gpio_num,u32 set)7526 static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar,
7527 u32 gpio_num, u32 set)
7528 {
7529 struct wmi_gpio_output_cmd *cmd;
7530 struct sk_buff *skb;
7531
7532 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7533 if (!skb)
7534 return ERR_PTR(-ENOMEM);
7535
7536 cmd = (struct wmi_gpio_output_cmd *)skb->data;
7537 cmd->gpio_num = __cpu_to_le32(gpio_num);
7538 cmd->set = __cpu_to_le32(set);
7539
7540 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n",
7541 gpio_num, set);
7542
7543 return skb;
7544 }
7545
7546 static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k * ar,u32 vdev_id,enum wmi_sta_ps_mode psmode)7547 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
7548 enum wmi_sta_ps_mode psmode)
7549 {
7550 struct wmi_sta_powersave_mode_cmd *cmd;
7551 struct sk_buff *skb;
7552
7553 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7554 if (!skb)
7555 return ERR_PTR(-ENOMEM);
7556
7557 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
7558 cmd->vdev_id = __cpu_to_le32(vdev_id);
7559 cmd->sta_ps_mode = __cpu_to_le32(psmode);
7560
7561 ath10k_dbg(ar, ATH10K_DBG_WMI,
7562 "wmi set powersave id 0x%x mode %d\n",
7563 vdev_id, psmode);
7564 return skb;
7565 }
7566
7567 static struct sk_buff *
ath10k_wmi_op_gen_set_sta_ps(struct ath10k * ar,u32 vdev_id,enum wmi_sta_powersave_param param_id,u32 value)7568 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
7569 enum wmi_sta_powersave_param param_id,
7570 u32 value)
7571 {
7572 struct wmi_sta_powersave_param_cmd *cmd;
7573 struct sk_buff *skb;
7574
7575 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7576 if (!skb)
7577 return ERR_PTR(-ENOMEM);
7578
7579 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
7580 cmd->vdev_id = __cpu_to_le32(vdev_id);
7581 cmd->param_id = __cpu_to_le32(param_id);
7582 cmd->param_value = __cpu_to_le32(value);
7583
7584 ath10k_dbg(ar, ATH10K_DBG_STA,
7585 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
7586 vdev_id, param_id, value);
7587 return skb;
7588 }
7589
7590 static struct sk_buff *
ath10k_wmi_op_gen_set_ap_ps(struct ath10k * ar,u32 vdev_id,const u8 * mac,enum wmi_ap_ps_peer_param param_id,u32 value)7591 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7592 enum wmi_ap_ps_peer_param param_id, u32 value)
7593 {
7594 struct wmi_ap_ps_peer_cmd *cmd;
7595 struct sk_buff *skb;
7596
7597 if (!mac)
7598 return ERR_PTR(-EINVAL);
7599
7600 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7601 if (!skb)
7602 return ERR_PTR(-ENOMEM);
7603
7604 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
7605 cmd->vdev_id = __cpu_to_le32(vdev_id);
7606 cmd->param_id = __cpu_to_le32(param_id);
7607 cmd->param_value = __cpu_to_le32(value);
7608 ether_addr_copy(cmd->peer_macaddr.addr, mac);
7609
7610 ath10k_dbg(ar, ATH10K_DBG_WMI,
7611 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
7612 vdev_id, param_id, value, mac);
7613 return skb;
7614 }
7615
7616 static struct sk_buff *
ath10k_wmi_op_gen_scan_chan_list(struct ath10k * ar,const struct wmi_scan_chan_list_arg * arg)7617 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
7618 const struct wmi_scan_chan_list_arg *arg)
7619 {
7620 struct wmi_scan_chan_list_cmd *cmd;
7621 struct sk_buff *skb;
7622 struct wmi_channel_arg *ch;
7623 struct wmi_channel *ci;
7624 int i;
7625
7626 skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
7627 if (!skb)
7628 return ERR_PTR(-EINVAL);
7629
7630 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
7631 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
7632
7633 for (i = 0; i < arg->n_channels; i++) {
7634 ch = &arg->channels[i];
7635 ci = &cmd->chan_info[i];
7636
7637 ath10k_wmi_put_wmi_channel(ar, ci, ch);
7638 }
7639
7640 return skb;
7641 }
7642
7643 static void
ath10k_wmi_peer_assoc_fill(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7644 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
7645 const struct wmi_peer_assoc_complete_arg *arg)
7646 {
7647 struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
7648
7649 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7650 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
7651 cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
7652 cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
7653 cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
7654 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
7655 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
7656 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
7657 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
7658 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
7659 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
7660 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
7661 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
7662
7663 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
7664
7665 cmd->peer_legacy_rates.num_rates =
7666 __cpu_to_le32(arg->peer_legacy_rates.num_rates);
7667 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
7668 arg->peer_legacy_rates.num_rates);
7669
7670 cmd->peer_ht_rates.num_rates =
7671 __cpu_to_le32(arg->peer_ht_rates.num_rates);
7672 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
7673 arg->peer_ht_rates.num_rates);
7674
7675 cmd->peer_vht_rates.rx_max_rate =
7676 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
7677 cmd->peer_vht_rates.rx_mcs_set =
7678 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
7679 cmd->peer_vht_rates.tx_max_rate =
7680 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
7681 cmd->peer_vht_rates.tx_mcs_set =
7682 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
7683 }
7684
7685 static void
ath10k_wmi_peer_assoc_fill_main(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7686 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
7687 const struct wmi_peer_assoc_complete_arg *arg)
7688 {
7689 struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
7690
7691 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7692 memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
7693 }
7694
7695 static void
ath10k_wmi_peer_assoc_fill_10_1(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7696 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
7697 const struct wmi_peer_assoc_complete_arg *arg)
7698 {
7699 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7700 }
7701
7702 static void
ath10k_wmi_peer_assoc_fill_10_2(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7703 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
7704 const struct wmi_peer_assoc_complete_arg *arg)
7705 {
7706 struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
7707 int max_mcs, max_nss;
7708 u32 info0;
7709
7710 /* TODO: Is using max values okay with firmware? */
7711 max_mcs = 0xf;
7712 max_nss = 0xf;
7713
7714 info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
7715 SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
7716
7717 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7718 cmd->info0 = __cpu_to_le32(info0);
7719 }
7720
7721 static void
ath10k_wmi_peer_assoc_fill_10_4(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7722 ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
7723 const struct wmi_peer_assoc_complete_arg *arg)
7724 {
7725 struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
7726
7727 ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
7728 cmd->peer_bw_rxnss_override =
7729 __cpu_to_le32(arg->peer_bw_rxnss_override);
7730 }
7731
7732 static int
ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg * arg)7733 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
7734 {
7735 if (arg->peer_mpdu_density > 16)
7736 return -EINVAL;
7737 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
7738 return -EINVAL;
7739 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
7740 return -EINVAL;
7741
7742 return 0;
7743 }
7744
7745 static struct sk_buff *
ath10k_wmi_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7746 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
7747 const struct wmi_peer_assoc_complete_arg *arg)
7748 {
7749 size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
7750 struct sk_buff *skb;
7751 int ret;
7752
7753 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7754 if (ret)
7755 return ERR_PTR(ret);
7756
7757 skb = ath10k_wmi_alloc_skb(ar, len);
7758 if (!skb)
7759 return ERR_PTR(-ENOMEM);
7760
7761 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
7762
7763 ath10k_dbg(ar, ATH10K_DBG_WMI,
7764 "wmi peer assoc vdev %d addr %pM (%s)\n",
7765 arg->vdev_id, arg->addr,
7766 arg->peer_reassoc ? "reassociate" : "new");
7767 return skb;
7768 }
7769
7770 static struct sk_buff *
ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7771 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
7772 const struct wmi_peer_assoc_complete_arg *arg)
7773 {
7774 size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
7775 struct sk_buff *skb;
7776 int ret;
7777
7778 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7779 if (ret)
7780 return ERR_PTR(ret);
7781
7782 skb = ath10k_wmi_alloc_skb(ar, len);
7783 if (!skb)
7784 return ERR_PTR(-ENOMEM);
7785
7786 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
7787
7788 ath10k_dbg(ar, ATH10K_DBG_WMI,
7789 "wmi peer assoc vdev %d addr %pM (%s)\n",
7790 arg->vdev_id, arg->addr,
7791 arg->peer_reassoc ? "reassociate" : "new");
7792 return skb;
7793 }
7794
7795 static struct sk_buff *
ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7796 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
7797 const struct wmi_peer_assoc_complete_arg *arg)
7798 {
7799 size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
7800 struct sk_buff *skb;
7801 int ret;
7802
7803 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7804 if (ret)
7805 return ERR_PTR(ret);
7806
7807 skb = ath10k_wmi_alloc_skb(ar, len);
7808 if (!skb)
7809 return ERR_PTR(-ENOMEM);
7810
7811 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
7812
7813 ath10k_dbg(ar, ATH10K_DBG_WMI,
7814 "wmi peer assoc vdev %d addr %pM (%s)\n",
7815 arg->vdev_id, arg->addr,
7816 arg->peer_reassoc ? "reassociate" : "new");
7817 return skb;
7818 }
7819
7820 static struct sk_buff *
ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7821 ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
7822 const struct wmi_peer_assoc_complete_arg *arg)
7823 {
7824 size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
7825 struct sk_buff *skb;
7826 int ret;
7827
7828 ret = ath10k_wmi_peer_assoc_check_arg(arg);
7829 if (ret)
7830 return ERR_PTR(ret);
7831
7832 skb = ath10k_wmi_alloc_skb(ar, len);
7833 if (!skb)
7834 return ERR_PTR(-ENOMEM);
7835
7836 ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
7837
7838 ath10k_dbg(ar, ATH10K_DBG_WMI,
7839 "wmi peer assoc vdev %d addr %pM (%s)\n",
7840 arg->vdev_id, arg->addr,
7841 arg->peer_reassoc ? "reassociate" : "new");
7842 return skb;
7843 }
7844
7845 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k * ar)7846 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
7847 {
7848 struct sk_buff *skb;
7849
7850 skb = ath10k_wmi_alloc_skb(ar, 0);
7851 if (!skb)
7852 return ERR_PTR(-ENOMEM);
7853
7854 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
7855 return skb;
7856 }
7857
7858 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k * ar,enum wmi_bss_survey_req_type type)7859 ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
7860 enum wmi_bss_survey_req_type type)
7861 {
7862 struct wmi_pdev_chan_info_req_cmd *cmd;
7863 struct sk_buff *skb;
7864
7865 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7866 if (!skb)
7867 return ERR_PTR(-ENOMEM);
7868
7869 cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
7870 cmd->type = __cpu_to_le32(type);
7871
7872 ath10k_dbg(ar, ATH10K_DBG_WMI,
7873 "wmi pdev bss info request type %d\n", type);
7874
7875 return skb;
7876 }
7877
7878 /* This function assumes the beacon is already DMA mapped */
7879 static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k * ar,u32 vdev_id,const void * bcn,size_t bcn_len,u32 bcn_paddr,bool dtim_zero,bool deliver_cab)7880 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
7881 size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
7882 bool deliver_cab)
7883 {
7884 struct wmi_bcn_tx_ref_cmd *cmd;
7885 struct sk_buff *skb;
7886 struct ieee80211_hdr *hdr;
7887 u16 fc;
7888
7889 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7890 if (!skb)
7891 return ERR_PTR(-ENOMEM);
7892
7893 hdr = (struct ieee80211_hdr *)bcn;
7894 fc = le16_to_cpu(hdr->frame_control);
7895
7896 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
7897 cmd->vdev_id = __cpu_to_le32(vdev_id);
7898 cmd->data_len = __cpu_to_le32(bcn_len);
7899 cmd->data_ptr = __cpu_to_le32(bcn_paddr);
7900 cmd->msdu_id = 0;
7901 cmd->frame_control = __cpu_to_le32(fc);
7902 cmd->flags = 0;
7903 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
7904
7905 if (dtim_zero)
7906 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
7907
7908 if (deliver_cab)
7909 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
7910
7911 return skb;
7912 }
7913
ath10k_wmi_set_wmm_param(struct wmi_wmm_params * params,const struct wmi_wmm_params_arg * arg)7914 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
7915 const struct wmi_wmm_params_arg *arg)
7916 {
7917 params->cwmin = __cpu_to_le32(arg->cwmin);
7918 params->cwmax = __cpu_to_le32(arg->cwmax);
7919 params->aifs = __cpu_to_le32(arg->aifs);
7920 params->txop = __cpu_to_le32(arg->txop);
7921 params->acm = __cpu_to_le32(arg->acm);
7922 params->no_ack = __cpu_to_le32(arg->no_ack);
7923 }
7924
7925 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k * ar,const struct wmi_wmm_params_all_arg * arg)7926 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
7927 const struct wmi_wmm_params_all_arg *arg)
7928 {
7929 struct wmi_pdev_set_wmm_params *cmd;
7930 struct sk_buff *skb;
7931
7932 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7933 if (!skb)
7934 return ERR_PTR(-ENOMEM);
7935
7936 cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
7937 ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
7938 ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
7939 ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
7940 ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
7941
7942 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
7943 return skb;
7944 }
7945
7946 static struct sk_buff *
ath10k_wmi_op_gen_request_stats(struct ath10k * ar,u32 stats_mask)7947 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
7948 {
7949 struct wmi_request_stats_cmd *cmd;
7950 struct sk_buff *skb;
7951
7952 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7953 if (!skb)
7954 return ERR_PTR(-ENOMEM);
7955
7956 cmd = (struct wmi_request_stats_cmd *)skb->data;
7957 cmd->stats_id = __cpu_to_le32(stats_mask);
7958
7959 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
7960 stats_mask);
7961 return skb;
7962 }
7963
7964 static struct sk_buff *
ath10k_wmi_op_gen_force_fw_hang(struct ath10k * ar,enum wmi_force_fw_hang_type type,u32 delay_ms)7965 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
7966 enum wmi_force_fw_hang_type type, u32 delay_ms)
7967 {
7968 struct wmi_force_fw_hang_cmd *cmd;
7969 struct sk_buff *skb;
7970
7971 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7972 if (!skb)
7973 return ERR_PTR(-ENOMEM);
7974
7975 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
7976 cmd->type = __cpu_to_le32(type);
7977 cmd->delay_ms = __cpu_to_le32(delay_ms);
7978
7979 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
7980 type, delay_ms);
7981 return skb;
7982 }
7983
7984 static struct sk_buff *
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7985 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7986 u32 log_level)
7987 {
7988 struct wmi_dbglog_cfg_cmd *cmd;
7989 struct sk_buff *skb;
7990 u32 cfg;
7991
7992 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7993 if (!skb)
7994 return ERR_PTR(-ENOMEM);
7995
7996 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
7997
7998 if (module_enable) {
7999 cfg = SM(log_level,
8000 ATH10K_DBGLOG_CFG_LOG_LVL);
8001 } else {
8002 /* set back defaults, all modules with WARN level */
8003 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
8004 ATH10K_DBGLOG_CFG_LOG_LVL);
8005 module_enable = ~0;
8006 }
8007
8008 cmd->module_enable = __cpu_to_le32(module_enable);
8009 cmd->module_valid = __cpu_to_le32(~0);
8010 cmd->config_enable = __cpu_to_le32(cfg);
8011 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
8012
8013 ath10k_dbg(ar, ATH10K_DBG_WMI,
8014 "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
8015 __le32_to_cpu(cmd->module_enable),
8016 __le32_to_cpu(cmd->module_valid),
8017 __le32_to_cpu(cmd->config_enable),
8018 __le32_to_cpu(cmd->config_valid));
8019 return skb;
8020 }
8021
8022 static struct sk_buff *
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)8023 ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
8024 u32 log_level)
8025 {
8026 struct wmi_10_4_dbglog_cfg_cmd *cmd;
8027 struct sk_buff *skb;
8028 u32 cfg;
8029
8030 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8031 if (!skb)
8032 return ERR_PTR(-ENOMEM);
8033
8034 cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
8035
8036 if (module_enable) {
8037 cfg = SM(log_level,
8038 ATH10K_DBGLOG_CFG_LOG_LVL);
8039 } else {
8040 /* set back defaults, all modules with WARN level */
8041 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
8042 ATH10K_DBGLOG_CFG_LOG_LVL);
8043 module_enable = ~0;
8044 }
8045
8046 cmd->module_enable = __cpu_to_le64(module_enable);
8047 cmd->module_valid = __cpu_to_le64(~0);
8048 cmd->config_enable = __cpu_to_le32(cfg);
8049 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
8050
8051 ath10k_dbg(ar, ATH10K_DBG_WMI,
8052 "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
8053 __le64_to_cpu(cmd->module_enable),
8054 __le64_to_cpu(cmd->module_valid),
8055 __le32_to_cpu(cmd->config_enable),
8056 __le32_to_cpu(cmd->config_valid));
8057 return skb;
8058 }
8059
8060 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k * ar,u32 ev_bitmap)8061 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
8062 {
8063 struct wmi_pdev_pktlog_enable_cmd *cmd;
8064 struct sk_buff *skb;
8065
8066 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8067 if (!skb)
8068 return ERR_PTR(-ENOMEM);
8069
8070 ev_bitmap &= ATH10K_PKTLOG_ANY;
8071
8072 cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
8073 cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
8074
8075 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
8076 ev_bitmap);
8077 return skb;
8078 }
8079
8080 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_disable(struct ath10k * ar)8081 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
8082 {
8083 struct sk_buff *skb;
8084
8085 skb = ath10k_wmi_alloc_skb(ar, 0);
8086 if (!skb)
8087 return ERR_PTR(-ENOMEM);
8088
8089 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
8090 return skb;
8091 }
8092
8093 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k * ar,u32 period,u32 duration,u32 next_offset,u32 enabled)8094 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
8095 u32 duration, u32 next_offset,
8096 u32 enabled)
8097 {
8098 struct wmi_pdev_set_quiet_cmd *cmd;
8099 struct sk_buff *skb;
8100
8101 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8102 if (!skb)
8103 return ERR_PTR(-ENOMEM);
8104
8105 cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
8106 cmd->period = __cpu_to_le32(period);
8107 cmd->duration = __cpu_to_le32(duration);
8108 cmd->next_start = __cpu_to_le32(next_offset);
8109 cmd->enabled = __cpu_to_le32(enabled);
8110
8111 ath10k_dbg(ar, ATH10K_DBG_WMI,
8112 "wmi quiet param: period %u duration %u enabled %d\n",
8113 period, duration, enabled);
8114 return skb;
8115 }
8116
8117 static struct sk_buff *
ath10k_wmi_op_gen_addba_clear_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac)8118 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
8119 const u8 *mac)
8120 {
8121 struct wmi_addba_clear_resp_cmd *cmd;
8122 struct sk_buff *skb;
8123
8124 if (!mac)
8125 return ERR_PTR(-EINVAL);
8126
8127 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8128 if (!skb)
8129 return ERR_PTR(-ENOMEM);
8130
8131 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
8132 cmd->vdev_id = __cpu_to_le32(vdev_id);
8133 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8134
8135 ath10k_dbg(ar, ATH10K_DBG_WMI,
8136 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
8137 vdev_id, mac);
8138 return skb;
8139 }
8140
8141 static struct sk_buff *
ath10k_wmi_op_gen_addba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)8142 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8143 u32 tid, u32 buf_size)
8144 {
8145 struct wmi_addba_send_cmd *cmd;
8146 struct sk_buff *skb;
8147
8148 if (!mac)
8149 return ERR_PTR(-EINVAL);
8150
8151 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8152 if (!skb)
8153 return ERR_PTR(-ENOMEM);
8154
8155 cmd = (struct wmi_addba_send_cmd *)skb->data;
8156 cmd->vdev_id = __cpu_to_le32(vdev_id);
8157 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8158 cmd->tid = __cpu_to_le32(tid);
8159 cmd->buffersize = __cpu_to_le32(buf_size);
8160
8161 ath10k_dbg(ar, ATH10K_DBG_WMI,
8162 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
8163 vdev_id, mac, tid, buf_size);
8164 return skb;
8165 }
8166
8167 static struct sk_buff *
ath10k_wmi_op_gen_addba_set_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)8168 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8169 u32 tid, u32 status)
8170 {
8171 struct wmi_addba_setresponse_cmd *cmd;
8172 struct sk_buff *skb;
8173
8174 if (!mac)
8175 return ERR_PTR(-EINVAL);
8176
8177 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8178 if (!skb)
8179 return ERR_PTR(-ENOMEM);
8180
8181 cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
8182 cmd->vdev_id = __cpu_to_le32(vdev_id);
8183 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8184 cmd->tid = __cpu_to_le32(tid);
8185 cmd->statuscode = __cpu_to_le32(status);
8186
8187 ath10k_dbg(ar, ATH10K_DBG_WMI,
8188 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
8189 vdev_id, mac, tid, status);
8190 return skb;
8191 }
8192
8193 static struct sk_buff *
ath10k_wmi_op_gen_delba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)8194 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8195 u32 tid, u32 initiator, u32 reason)
8196 {
8197 struct wmi_delba_send_cmd *cmd;
8198 struct sk_buff *skb;
8199
8200 if (!mac)
8201 return ERR_PTR(-EINVAL);
8202
8203 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8204 if (!skb)
8205 return ERR_PTR(-ENOMEM);
8206
8207 cmd = (struct wmi_delba_send_cmd *)skb->data;
8208 cmd->vdev_id = __cpu_to_le32(vdev_id);
8209 ether_addr_copy(cmd->peer_macaddr.addr, mac);
8210 cmd->tid = __cpu_to_le32(tid);
8211 cmd->initiator = __cpu_to_le32(initiator);
8212 cmd->reasoncode = __cpu_to_le32(reason);
8213
8214 ath10k_dbg(ar, ATH10K_DBG_WMI,
8215 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
8216 vdev_id, mac, tid, initiator, reason);
8217 return skb;
8218 }
8219
8220 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k * ar,u32 param)8221 ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
8222 {
8223 struct wmi_pdev_get_tpc_config_cmd *cmd;
8224 struct sk_buff *skb;
8225
8226 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8227 if (!skb)
8228 return ERR_PTR(-ENOMEM);
8229
8230 cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
8231 cmd->param = __cpu_to_le32(param);
8232
8233 ath10k_dbg(ar, ATH10K_DBG_WMI,
8234 "wmi pdev get tpc config param %d\n", param);
8235 return skb;
8236 }
8237
8238 static void
ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8239 ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8240 char *buf, u32 *length)
8241 {
8242 u32 len = *length;
8243 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8244
8245 len += scnprintf(buf + len, buf_len - len, "\n");
8246 len += scnprintf(buf + len, buf_len - len, "%30s\n",
8247 "ath10k PDEV stats");
8248 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8249 "=================");
8250
8251 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8252 "Channel noise floor", pdev->ch_noise_floor);
8253 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8254 "Channel TX power", pdev->chan_tx_power);
8255 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8256 "TX frame count", pdev->tx_frame_count);
8257 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8258 "RX frame count", pdev->rx_frame_count);
8259 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8260 "RX clear count", pdev->rx_clear_count);
8261 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8262 "Cycle count", pdev->cycle_count);
8263 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8264 "PHY error count", pdev->phy_err_count);
8265
8266 *length = len;
8267 }
8268
8269 static void
ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8270 ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8271 char *buf, u32 *length)
8272 {
8273 u32 len = *length;
8274 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8275
8276 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8277 "RTS bad count", pdev->rts_bad);
8278 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8279 "RTS good count", pdev->rts_good);
8280 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8281 "FCS bad count", pdev->fcs_bad);
8282 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8283 "No beacon count", pdev->no_beacons);
8284 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8285 "MIB int count", pdev->mib_int_count);
8286
8287 len += scnprintf(buf + len, buf_len - len, "\n");
8288 *length = len;
8289 }
8290
8291 static void
ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8292 ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8293 char *buf, u32 *length)
8294 {
8295 u32 len = *length;
8296 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8297
8298 len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8299 "ath10k PDEV TX stats");
8300 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8301 "=================");
8302
8303 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8304 "HTT cookies queued", pdev->comp_queued);
8305 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8306 "HTT cookies disp.", pdev->comp_delivered);
8307 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8308 "MSDU queued", pdev->msdu_enqued);
8309 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8310 "MPDU queued", pdev->mpdu_enqued);
8311 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8312 "MSDUs dropped", pdev->wmm_drop);
8313 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8314 "Local enqued", pdev->local_enqued);
8315 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8316 "Local freed", pdev->local_freed);
8317 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8318 "HW queued", pdev->hw_queued);
8319 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8320 "PPDUs reaped", pdev->hw_reaped);
8321 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8322 "Num underruns", pdev->underrun);
8323 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8324 "PPDUs cleaned", pdev->tx_abort);
8325 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8326 "MPDUs requeued", pdev->mpdus_requeued);
8327 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8328 "Excessive retries", pdev->tx_ko);
8329 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8330 "HW rate", pdev->data_rc);
8331 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8332 "Sched self triggers", pdev->self_triggers);
8333 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8334 "Dropped due to SW retries",
8335 pdev->sw_retry_failure);
8336 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8337 "Illegal rate phy errors",
8338 pdev->illgl_rate_phy_err);
8339 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8340 "Pdev continuous xretry", pdev->pdev_cont_xretry);
8341 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8342 "TX timeout", pdev->pdev_tx_timeout);
8343 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8344 "PDEV resets", pdev->pdev_resets);
8345 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8346 "PHY underrun", pdev->phy_underrun);
8347 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8348 "MPDU is more than txop limit", pdev->txop_ovf);
8349 *length = len;
8350 }
8351
8352 static void
ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8353 ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8354 char *buf, u32 *length)
8355 {
8356 u32 len = *length;
8357 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8358
8359 len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8360 "ath10k PDEV RX stats");
8361 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8362 "=================");
8363
8364 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8365 "Mid PPDU route change",
8366 pdev->mid_ppdu_route_change);
8367 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8368 "Tot. number of statuses", pdev->status_rcvd);
8369 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8370 "Extra frags on rings 0", pdev->r0_frags);
8371 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8372 "Extra frags on rings 1", pdev->r1_frags);
8373 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8374 "Extra frags on rings 2", pdev->r2_frags);
8375 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8376 "Extra frags on rings 3", pdev->r3_frags);
8377 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8378 "MSDUs delivered to HTT", pdev->htt_msdus);
8379 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8380 "MPDUs delivered to HTT", pdev->htt_mpdus);
8381 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8382 "MSDUs delivered to stack", pdev->loc_msdus);
8383 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8384 "MPDUs delivered to stack", pdev->loc_mpdus);
8385 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8386 "Oversized AMSDUs", pdev->oversize_amsdu);
8387 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8388 "PHY errors", pdev->phy_errs);
8389 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8390 "PHY errors drops", pdev->phy_err_drop);
8391 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8392 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8393 *length = len;
8394 }
8395
8396 static void
ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev * vdev,char * buf,u32 * length)8397 ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
8398 char *buf, u32 *length)
8399 {
8400 u32 len = *length;
8401 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8402 int i;
8403
8404 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8405 "vdev id", vdev->vdev_id);
8406 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8407 "beacon snr", vdev->beacon_snr);
8408 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8409 "data snr", vdev->data_snr);
8410 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8411 "num rx frames", vdev->num_rx_frames);
8412 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8413 "num rts fail", vdev->num_rts_fail);
8414 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8415 "num rts success", vdev->num_rts_success);
8416 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8417 "num rx err", vdev->num_rx_err);
8418 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8419 "num rx discard", vdev->num_rx_discard);
8420 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8421 "num tx not acked", vdev->num_tx_not_acked);
8422
8423 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
8424 len += scnprintf(buf + len, buf_len - len,
8425 "%25s [%02d] %u\n",
8426 "num tx frames", i,
8427 vdev->num_tx_frames[i]);
8428
8429 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
8430 len += scnprintf(buf + len, buf_len - len,
8431 "%25s [%02d] %u\n",
8432 "num tx frames retries", i,
8433 vdev->num_tx_frames_retries[i]);
8434
8435 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
8436 len += scnprintf(buf + len, buf_len - len,
8437 "%25s [%02d] %u\n",
8438 "num tx frames failures", i,
8439 vdev->num_tx_frames_failures[i]);
8440
8441 for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
8442 len += scnprintf(buf + len, buf_len - len,
8443 "%25s [%02d] 0x%08x\n",
8444 "tx rate history", i,
8445 vdev->tx_rate_history[i]);
8446
8447 for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
8448 len += scnprintf(buf + len, buf_len - len,
8449 "%25s [%02d] %u\n",
8450 "beacon rssi history", i,
8451 vdev->beacon_rssi_history[i]);
8452
8453 len += scnprintf(buf + len, buf_len - len, "\n");
8454 *length = len;
8455 }
8456
8457 static void
ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer * peer,char * buf,u32 * length,bool extended_peer)8458 ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
8459 char *buf, u32 *length, bool extended_peer)
8460 {
8461 u32 len = *length;
8462 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8463
8464 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8465 "Peer MAC address", peer->peer_macaddr);
8466 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8467 "Peer RSSI", peer->peer_rssi);
8468 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8469 "Peer TX rate", peer->peer_tx_rate);
8470 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8471 "Peer RX rate", peer->peer_rx_rate);
8472 if (!extended_peer)
8473 len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8474 "Peer RX duration", peer->rx_duration);
8475
8476 len += scnprintf(buf + len, buf_len - len, "\n");
8477 *length = len;
8478 }
8479
8480 static void
ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer * peer,char * buf,u32 * length)8481 ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
8482 char *buf, u32 *length)
8483 {
8484 u32 len = *length;
8485 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8486
8487 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8488 "Peer MAC address", peer->peer_macaddr);
8489 len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8490 "Peer RX duration", peer->rx_duration);
8491 }
8492
ath10k_wmi_main_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8493 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
8494 struct ath10k_fw_stats *fw_stats,
8495 char *buf)
8496 {
8497 u32 len = 0;
8498 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8499 const struct ath10k_fw_stats_pdev *pdev;
8500 const struct ath10k_fw_stats_vdev *vdev;
8501 const struct ath10k_fw_stats_peer *peer;
8502 size_t num_peers;
8503 size_t num_vdevs;
8504
8505 spin_lock_bh(&ar->data_lock);
8506
8507 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8508 struct ath10k_fw_stats_pdev, list);
8509 if (!pdev) {
8510 ath10k_warn(ar, "failed to get pdev stats\n");
8511 goto unlock;
8512 }
8513
8514 num_peers = list_count_nodes(&fw_stats->peers);
8515 num_vdevs = list_count_nodes(&fw_stats->vdevs);
8516
8517 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8518 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8519 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8520
8521 len += scnprintf(buf + len, buf_len - len, "\n");
8522 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8523 "ath10k VDEV stats", num_vdevs);
8524 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8525 "=================");
8526
8527 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8528 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8529 }
8530
8531 len += scnprintf(buf + len, buf_len - len, "\n");
8532 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8533 "ath10k PEER stats", num_peers);
8534 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8535 "=================");
8536
8537 list_for_each_entry(peer, &fw_stats->peers, list) {
8538 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8539 fw_stats->extended);
8540 }
8541
8542 unlock:
8543 spin_unlock_bh(&ar->data_lock);
8544
8545 if (len >= buf_len)
8546 buf[len - 1] = 0;
8547 else
8548 buf[len] = 0;
8549 }
8550
ath10k_wmi_10x_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8551 void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
8552 struct ath10k_fw_stats *fw_stats,
8553 char *buf)
8554 {
8555 unsigned int len = 0;
8556 unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
8557 const struct ath10k_fw_stats_pdev *pdev;
8558 const struct ath10k_fw_stats_vdev *vdev;
8559 const struct ath10k_fw_stats_peer *peer;
8560 size_t num_peers;
8561 size_t num_vdevs;
8562
8563 spin_lock_bh(&ar->data_lock);
8564
8565 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8566 struct ath10k_fw_stats_pdev, list);
8567 if (!pdev) {
8568 ath10k_warn(ar, "failed to get pdev stats\n");
8569 goto unlock;
8570 }
8571
8572 num_peers = list_count_nodes(&fw_stats->peers);
8573 num_vdevs = list_count_nodes(&fw_stats->vdevs);
8574
8575 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8576 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8577 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8578 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8579
8580 len += scnprintf(buf + len, buf_len - len, "\n");
8581 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8582 "ath10k VDEV stats", num_vdevs);
8583 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8584 "=================");
8585
8586 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8587 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8588 }
8589
8590 len += scnprintf(buf + len, buf_len - len, "\n");
8591 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8592 "ath10k PEER stats", num_peers);
8593 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8594 "=================");
8595
8596 list_for_each_entry(peer, &fw_stats->peers, list) {
8597 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8598 fw_stats->extended);
8599 }
8600
8601 unlock:
8602 spin_unlock_bh(&ar->data_lock);
8603
8604 if (len >= buf_len)
8605 buf[len - 1] = 0;
8606 else
8607 buf[len] = 0;
8608 }
8609
8610 static struct sk_buff *
ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k * ar,u8 enable,u32 detect_level,u32 detect_margin)8611 ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
8612 u32 detect_level, u32 detect_margin)
8613 {
8614 struct wmi_pdev_set_adaptive_cca_params *cmd;
8615 struct sk_buff *skb;
8616
8617 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8618 if (!skb)
8619 return ERR_PTR(-ENOMEM);
8620
8621 cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
8622 cmd->enable = __cpu_to_le32(enable);
8623 cmd->cca_detect_level = __cpu_to_le32(detect_level);
8624 cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
8625
8626 ath10k_dbg(ar, ATH10K_DBG_WMI,
8627 "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
8628 enable, detect_level, detect_margin);
8629 return skb;
8630 }
8631
8632 static void
ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd * vdev,char * buf,u32 * length)8633 ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
8634 char *buf, u32 *length)
8635 {
8636 u32 len = *length;
8637 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8638 u32 val;
8639
8640 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8641 "vdev id", vdev->vdev_id);
8642 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8643 "ppdu aggr count", vdev->ppdu_aggr_cnt);
8644 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8645 "ppdu noack", vdev->ppdu_noack);
8646 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8647 "mpdu queued", vdev->mpdu_queued);
8648 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8649 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
8650 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8651 "mpdu sw requeued", vdev->mpdu_sw_requeued);
8652 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8653 "mpdu success retry", vdev->mpdu_suc_retry);
8654 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8655 "mpdu success multitry", vdev->mpdu_suc_multitry);
8656 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8657 "mpdu fail retry", vdev->mpdu_fail_retry);
8658 val = vdev->tx_ftm_suc;
8659 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8660 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8661 "tx ftm success",
8662 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8663 val = vdev->tx_ftm_suc_retry;
8664 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8665 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8666 "tx ftm success retry",
8667 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8668 val = vdev->tx_ftm_fail;
8669 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8670 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8671 "tx ftm fail",
8672 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8673 val = vdev->rx_ftmr_cnt;
8674 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8675 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8676 "rx ftm request count",
8677 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8678 val = vdev->rx_ftmr_dup_cnt;
8679 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8680 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8681 "rx ftm request dup count",
8682 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8683 val = vdev->rx_iftmr_cnt;
8684 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8685 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8686 "rx initial ftm req count",
8687 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8688 val = vdev->rx_iftmr_dup_cnt;
8689 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8690 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8691 "rx initial ftm req dup cnt",
8692 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8693 len += scnprintf(buf + len, buf_len - len, "\n");
8694
8695 *length = len;
8696 }
8697
ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8698 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
8699 struct ath10k_fw_stats *fw_stats,
8700 char *buf)
8701 {
8702 u32 len = 0;
8703 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8704 const struct ath10k_fw_stats_pdev *pdev;
8705 const struct ath10k_fw_stats_vdev_extd *vdev;
8706 const struct ath10k_fw_stats_peer *peer;
8707 const struct ath10k_fw_extd_stats_peer *extd_peer;
8708 size_t num_peers;
8709 size_t num_vdevs;
8710
8711 spin_lock_bh(&ar->data_lock);
8712
8713 pdev = list_first_entry_or_null(&fw_stats->pdevs,
8714 struct ath10k_fw_stats_pdev, list);
8715 if (!pdev) {
8716 ath10k_warn(ar, "failed to get pdev stats\n");
8717 goto unlock;
8718 }
8719
8720 num_peers = list_count_nodes(&fw_stats->peers);
8721 num_vdevs = list_count_nodes(&fw_stats->vdevs);
8722
8723 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8724 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8725 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8726
8727 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8728 "HW paused", pdev->hw_paused);
8729 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8730 "Seqs posted", pdev->seq_posted);
8731 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8732 "Seqs failed queueing", pdev->seq_failed_queueing);
8733 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8734 "Seqs completed", pdev->seq_completed);
8735 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8736 "Seqs restarted", pdev->seq_restarted);
8737 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8738 "MU Seqs posted", pdev->mu_seq_posted);
8739 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8740 "MPDUs SW flushed", pdev->mpdus_sw_flush);
8741 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8742 "MPDUs HW filtered", pdev->mpdus_hw_filter);
8743 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8744 "MPDUs truncated", pdev->mpdus_truncated);
8745 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8746 "MPDUs receive no ACK", pdev->mpdus_ack_failed);
8747 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8748 "MPDUs expired", pdev->mpdus_expired);
8749
8750 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8751 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8752 "Num Rx Overflow errors", pdev->rx_ovfl_errs);
8753
8754 len += scnprintf(buf + len, buf_len - len, "\n");
8755 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8756 "ath10k VDEV stats", num_vdevs);
8757 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8758 "=================");
8759 list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8760 ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
8761 }
8762
8763 len += scnprintf(buf + len, buf_len - len, "\n");
8764 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8765 "ath10k PEER stats", num_peers);
8766 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8767 "=================");
8768
8769 list_for_each_entry(peer, &fw_stats->peers, list) {
8770 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8771 fw_stats->extended);
8772 }
8773
8774 if (fw_stats->extended) {
8775 list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
8776 ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
8777 &len);
8778 }
8779 }
8780
8781 unlock:
8782 spin_unlock_bh(&ar->data_lock);
8783
8784 if (len >= buf_len)
8785 buf[len - 1] = 0;
8786 else
8787 buf[len] = 0;
8788 }
8789
ath10k_wmi_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8790 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
8791 enum wmi_vdev_subtype subtype)
8792 {
8793 switch (subtype) {
8794 case WMI_VDEV_SUBTYPE_NONE:
8795 return WMI_VDEV_SUBTYPE_LEGACY_NONE;
8796 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8797 return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
8798 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8799 return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
8800 case WMI_VDEV_SUBTYPE_P2P_GO:
8801 return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
8802 case WMI_VDEV_SUBTYPE_PROXY_STA:
8803 return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
8804 case WMI_VDEV_SUBTYPE_MESH_11S:
8805 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8806 return -EOPNOTSUPP;
8807 }
8808 return -EOPNOTSUPP;
8809 }
8810
ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8811 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
8812 enum wmi_vdev_subtype subtype)
8813 {
8814 switch (subtype) {
8815 case WMI_VDEV_SUBTYPE_NONE:
8816 return WMI_VDEV_SUBTYPE_10_2_4_NONE;
8817 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8818 return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
8819 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8820 return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
8821 case WMI_VDEV_SUBTYPE_P2P_GO:
8822 return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
8823 case WMI_VDEV_SUBTYPE_PROXY_STA:
8824 return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
8825 case WMI_VDEV_SUBTYPE_MESH_11S:
8826 return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
8827 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8828 return -EOPNOTSUPP;
8829 }
8830 return -EOPNOTSUPP;
8831 }
8832
ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8833 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
8834 enum wmi_vdev_subtype subtype)
8835 {
8836 switch (subtype) {
8837 case WMI_VDEV_SUBTYPE_NONE:
8838 return WMI_VDEV_SUBTYPE_10_4_NONE;
8839 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8840 return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
8841 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8842 return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
8843 case WMI_VDEV_SUBTYPE_P2P_GO:
8844 return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
8845 case WMI_VDEV_SUBTYPE_PROXY_STA:
8846 return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
8847 case WMI_VDEV_SUBTYPE_MESH_11S:
8848 return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
8849 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8850 return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
8851 }
8852 return -EOPNOTSUPP;
8853 }
8854
8855 static struct sk_buff *
ath10k_wmi_10_4_ext_resource_config(struct ath10k * ar,enum wmi_host_platform_type type,u32 fw_feature_bitmap)8856 ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
8857 enum wmi_host_platform_type type,
8858 u32 fw_feature_bitmap)
8859 {
8860 struct wmi_ext_resource_config_10_4_cmd *cmd;
8861 struct sk_buff *skb;
8862 u32 num_tdls_sleep_sta = 0;
8863
8864 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8865 if (!skb)
8866 return ERR_PTR(-ENOMEM);
8867
8868 if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
8869 num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
8870
8871 cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
8872 cmd->host_platform_config = __cpu_to_le32(type);
8873 cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
8874 cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
8875 cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
8876 cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
8877 cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
8878 cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
8879 cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
8880 cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
8881 cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
8882 cmd->max_tdls_concurrent_buffer_sta =
8883 __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
8884
8885 ath10k_dbg(ar, ATH10K_DBG_WMI,
8886 "wmi ext resource config host type %d firmware feature bitmap %08x\n",
8887 type, fw_feature_bitmap);
8888 return skb;
8889 }
8890
8891 static struct sk_buff *
ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k * ar,u32 vdev_id,enum wmi_tdls_state state)8892 ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
8893 enum wmi_tdls_state state)
8894 {
8895 struct wmi_10_4_tdls_set_state_cmd *cmd;
8896 struct sk_buff *skb;
8897 u32 options = 0;
8898
8899 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8900 if (!skb)
8901 return ERR_PTR(-ENOMEM);
8902
8903 if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
8904 state == WMI_TDLS_ENABLE_ACTIVE)
8905 state = WMI_TDLS_ENABLE_PASSIVE;
8906
8907 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8908 options |= WMI_TDLS_BUFFER_STA_EN;
8909
8910 cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
8911 cmd->vdev_id = __cpu_to_le32(vdev_id);
8912 cmd->state = __cpu_to_le32(state);
8913 cmd->notification_interval_ms = __cpu_to_le32(5000);
8914 cmd->tx_discovery_threshold = __cpu_to_le32(100);
8915 cmd->tx_teardown_threshold = __cpu_to_le32(5);
8916 cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
8917 cmd->rssi_delta = __cpu_to_le32(-20);
8918 cmd->tdls_options = __cpu_to_le32(options);
8919 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
8920 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
8921 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
8922 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
8923 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
8924 cmd->teardown_notification_ms = __cpu_to_le32(10);
8925 cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
8926
8927 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
8928 state, vdev_id);
8929 return skb;
8930 }
8931
ath10k_wmi_prepare_peer_qos(u8 uapsd_queues,u8 sp)8932 static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
8933 {
8934 u32 peer_qos = 0;
8935
8936 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
8937 peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
8938 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
8939 peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
8940 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
8941 peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
8942 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
8943 peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
8944
8945 peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
8946
8947 return peer_qos;
8948 }
8949
8950 static struct sk_buff *
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k * ar,u32 param)8951 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
8952 {
8953 struct wmi_pdev_get_tpc_table_cmd *cmd;
8954 struct sk_buff *skb;
8955
8956 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8957 if (!skb)
8958 return ERR_PTR(-ENOMEM);
8959
8960 cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
8961 cmd->param = __cpu_to_le32(param);
8962
8963 ath10k_dbg(ar, ATH10K_DBG_WMI,
8964 "wmi pdev get tpc table param:%d\n", param);
8965 return skb;
8966 }
8967
8968 static struct sk_buff *
ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k * ar,const struct wmi_tdls_peer_update_cmd_arg * arg,const struct wmi_tdls_peer_capab_arg * cap,const struct wmi_channel_arg * chan_arg)8969 ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
8970 const struct wmi_tdls_peer_update_cmd_arg *arg,
8971 const struct wmi_tdls_peer_capab_arg *cap,
8972 const struct wmi_channel_arg *chan_arg)
8973 {
8974 struct wmi_10_4_tdls_peer_update_cmd *cmd;
8975 struct wmi_tdls_peer_capabilities *peer_cap;
8976 struct wmi_channel *chan;
8977 struct sk_buff *skb;
8978 u32 peer_qos;
8979 int len, chan_len;
8980 int i;
8981
8982 /* tdls peer update cmd has place holder for one channel*/
8983 chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
8984
8985 len = sizeof(*cmd) + chan_len * sizeof(*chan);
8986
8987 skb = ath10k_wmi_alloc_skb(ar, len);
8988 if (!skb)
8989 return ERR_PTR(-ENOMEM);
8990
8991 cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
8992 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
8993 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
8994 cmd->peer_state = __cpu_to_le32(arg->peer_state);
8995
8996 peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
8997 cap->peer_max_sp);
8998
8999 peer_cap = &cmd->peer_capab;
9000 peer_cap->peer_qos = __cpu_to_le32(peer_qos);
9001 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
9002 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
9003 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
9004 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
9005 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
9006 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
9007
9008 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
9009 peer_cap->peer_operclass[i] = cap->peer_operclass[i];
9010
9011 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
9012 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
9013 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
9014
9015 for (i = 0; i < cap->peer_chan_len; i++) {
9016 chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
9017 ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
9018 }
9019
9020 ath10k_dbg(ar, ATH10K_DBG_WMI,
9021 "wmi tdls peer update vdev %i state %d n_chans %u\n",
9022 arg->vdev_id, arg->peer_state, cap->peer_chan_len);
9023 return skb;
9024 }
9025
9026 static struct sk_buff *
ath10k_wmi_10_4_gen_radar_found(struct ath10k * ar,const struct ath10k_radar_found_info * arg)9027 ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
9028 const struct ath10k_radar_found_info *arg)
9029 {
9030 struct wmi_radar_found_info *cmd;
9031 struct sk_buff *skb;
9032
9033 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9034 if (!skb)
9035 return ERR_PTR(-ENOMEM);
9036
9037 cmd = (struct wmi_radar_found_info *)skb->data;
9038 cmd->pri_min = __cpu_to_le32(arg->pri_min);
9039 cmd->pri_max = __cpu_to_le32(arg->pri_max);
9040 cmd->width_min = __cpu_to_le32(arg->width_min);
9041 cmd->width_max = __cpu_to_le32(arg->width_max);
9042 cmd->sidx_min = __cpu_to_le32(arg->sidx_min);
9043 cmd->sidx_max = __cpu_to_le32(arg->sidx_max);
9044
9045 ath10k_dbg(ar, ATH10K_DBG_WMI,
9046 "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
9047 arg->pri_min, arg->pri_max, arg->width_min,
9048 arg->width_max, arg->sidx_min, arg->sidx_max);
9049 return skb;
9050 }
9051
9052 static struct sk_buff *
ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k * ar,const struct wmi_per_peer_per_tid_cfg_arg * arg)9053 ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
9054 const struct wmi_per_peer_per_tid_cfg_arg *arg)
9055 {
9056 struct wmi_peer_per_tid_cfg_cmd *cmd;
9057 struct sk_buff *skb;
9058
9059 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9060 if (!skb)
9061 return ERR_PTR(-ENOMEM);
9062
9063 memset(skb->data, 0, sizeof(*cmd));
9064
9065 cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
9066 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9067 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
9068 cmd->tid = cpu_to_le32(arg->tid);
9069 cmd->ack_policy = cpu_to_le32(arg->ack_policy);
9070 cmd->aggr_control = cpu_to_le32(arg->aggr_control);
9071 cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
9072 cmd->retry_count = cpu_to_le32(arg->retry_count);
9073 cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
9074 cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
9075 cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
9076
9077 ath10k_dbg(ar, ATH10K_DBG_WMI,
9078 "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
9079 arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
9080 arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
9081 arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
9082 return skb;
9083 }
9084
9085 static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k * ar,u32 value)9086 ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
9087 {
9088 struct wmi_echo_cmd *cmd;
9089 struct sk_buff *skb;
9090
9091 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9092 if (!skb)
9093 return ERR_PTR(-ENOMEM);
9094
9095 cmd = (struct wmi_echo_cmd *)skb->data;
9096 cmd->value = cpu_to_le32(value);
9097
9098 ath10k_dbg(ar, ATH10K_DBG_WMI,
9099 "wmi echo value 0x%08x\n", value);
9100 return skb;
9101 }
9102
9103 int
ath10k_wmi_barrier(struct ath10k * ar)9104 ath10k_wmi_barrier(struct ath10k *ar)
9105 {
9106 int ret;
9107 int time_left;
9108
9109 spin_lock_bh(&ar->data_lock);
9110 reinit_completion(&ar->wmi.barrier);
9111 spin_unlock_bh(&ar->data_lock);
9112
9113 ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
9114 if (ret) {
9115 ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
9116 return ret;
9117 }
9118
9119 time_left = wait_for_completion_timeout(&ar->wmi.barrier,
9120 ATH10K_WMI_BARRIER_TIMEOUT_HZ);
9121 if (!time_left)
9122 return -ETIMEDOUT;
9123
9124 return 0;
9125 }
9126
9127 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k * ar,const struct wmi_bb_timing_cfg_arg * arg)9128 ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
9129 const struct wmi_bb_timing_cfg_arg *arg)
9130 {
9131 struct wmi_pdev_bb_timing_cfg_cmd *cmd;
9132 struct sk_buff *skb;
9133
9134 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9135 if (!skb)
9136 return ERR_PTR(-ENOMEM);
9137
9138 cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
9139 cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
9140 cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
9141
9142 ath10k_dbg(ar, ATH10K_DBG_WMI,
9143 "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
9144 arg->bb_tx_timing, arg->bb_xpa_timing);
9145 return skb;
9146 }
9147
9148 static const struct wmi_ops wmi_ops = {
9149 .rx = ath10k_wmi_op_rx,
9150 .map_svc = wmi_main_svc_map,
9151
9152 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9153 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9154 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9155 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9156 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9157 .pull_swba = ath10k_wmi_op_pull_swba_ev,
9158 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9159 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9160 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9161 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9162 .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
9163 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9164 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9165
9166 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9167 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9168 .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
9169 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9170 .gen_init = ath10k_wmi_op_gen_init,
9171 .gen_start_scan = ath10k_wmi_op_gen_start_scan,
9172 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9173 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9174 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9175 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9176 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9177 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9178 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9179 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9180 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9181 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9182 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9183 /* .gen_vdev_wmm_conf not implemented */
9184 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9185 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9186 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9187 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9188 .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
9189 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9190 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9191 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9192 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9193 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9194 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9195 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9196 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9197 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9198 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9199 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9200 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9201 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9202 /* .gen_pdev_get_temperature not implemented */
9203 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9204 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9205 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9206 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9207 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
9208 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9209 .gen_echo = ath10k_wmi_op_gen_echo,
9210 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9211 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9212
9213 /* .gen_bcn_tmpl not implemented */
9214 /* .gen_prb_tmpl not implemented */
9215 /* .gen_p2p_go_bcn_ie not implemented */
9216 /* .gen_adaptive_qcs not implemented */
9217 /* .gen_pdev_enable_adaptive_cca not implemented */
9218 };
9219
9220 static const struct wmi_ops wmi_10_1_ops = {
9221 .rx = ath10k_wmi_10_1_op_rx,
9222 .map_svc = wmi_10x_svc_map,
9223 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9224 .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
9225 .gen_init = ath10k_wmi_10_1_op_gen_init,
9226 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9227 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9228 .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
9229 /* .gen_pdev_get_temperature not implemented */
9230
9231 /* shared with main branch */
9232 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9233 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9234 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9235 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9236 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9237 .pull_swba = ath10k_wmi_op_pull_swba_ev,
9238 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9239 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9240 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9241 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9242 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9243
9244 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9245 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9246 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9247 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9248 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9249 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9250 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9251 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9252 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9253 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9254 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9255 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9256 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9257 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9258 /* .gen_vdev_wmm_conf not implemented */
9259 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9260 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9261 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9262 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9263 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9264 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9265 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9266 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9267 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9268 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9269 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9270 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9271 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9272 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9273 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9274 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9275 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9276 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9277 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9278 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9279 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9280 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9281 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9282 .gen_echo = ath10k_wmi_op_gen_echo,
9283 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9284 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9285 /* .gen_bcn_tmpl not implemented */
9286 /* .gen_prb_tmpl not implemented */
9287 /* .gen_p2p_go_bcn_ie not implemented */
9288 /* .gen_adaptive_qcs not implemented */
9289 /* .gen_pdev_enable_adaptive_cca not implemented */
9290 };
9291
9292 static const struct wmi_ops wmi_10_2_ops = {
9293 .rx = ath10k_wmi_10_2_op_rx,
9294 .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
9295 .gen_init = ath10k_wmi_10_2_op_gen_init,
9296 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9297 /* .gen_pdev_get_temperature not implemented */
9298
9299 /* shared with 10.1 */
9300 .map_svc = wmi_10x_svc_map,
9301 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9302 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9303 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9304 .gen_echo = ath10k_wmi_op_gen_echo,
9305
9306 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9307 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9308 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9309 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9310 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9311 .pull_swba = ath10k_wmi_op_pull_swba_ev,
9312 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9313 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9314 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9315 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9316 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9317
9318 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9319 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9320 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9321 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9322 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9323 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9324 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9325 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9326 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9327 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9328 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9329 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9330 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9331 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9332 /* .gen_vdev_wmm_conf not implemented */
9333 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9334 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9335 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9336 .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9337 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9338 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9339 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9340 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9341 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9342 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9343 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9344 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9345 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9346 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9347 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9348 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9349 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9350 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9351 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9352 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9353 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9354 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9355 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9356 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9357 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9358 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9359 /* .gen_pdev_enable_adaptive_cca not implemented */
9360 };
9361
9362 static const struct wmi_ops wmi_10_2_4_ops = {
9363 .rx = ath10k_wmi_10_2_op_rx,
9364 .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
9365 .gen_init = ath10k_wmi_10_2_op_gen_init,
9366 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9367 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9368 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9369
9370 /* shared with 10.1 */
9371 .map_svc = wmi_10x_svc_map,
9372 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9373 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9374 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9375 .gen_echo = ath10k_wmi_op_gen_echo,
9376
9377 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9378 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9379 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9380 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9381 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9382 .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
9383 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9384 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9385 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9386 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9387 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9388
9389 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9390 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9391 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9392 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9393 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9394 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9395 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9396 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9397 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9398 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9399 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9400 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9401 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9402 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9403 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9404 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9405 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9406 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9407 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9408 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9409 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9410 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9411 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9412 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9413 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9414 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9415 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9416 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9417 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9418 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9419 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9420 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9421 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9422 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9423 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9424 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9425 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9426 .gen_pdev_enable_adaptive_cca =
9427 ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
9428 .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
9429 .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
9430 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9431 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9432 /* .gen_bcn_tmpl not implemented */
9433 /* .gen_prb_tmpl not implemented */
9434 /* .gen_p2p_go_bcn_ie not implemented */
9435 /* .gen_adaptive_qcs not implemented */
9436 };
9437
9438 static const struct wmi_ops wmi_10_4_ops = {
9439 .rx = ath10k_wmi_10_4_op_rx,
9440 .map_svc = wmi_10_4_svc_map,
9441
9442 .pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
9443 .pull_scan = ath10k_wmi_op_pull_scan_ev,
9444 .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
9445 .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
9446 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9447 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9448 .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
9449 .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
9450 .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
9451 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9452 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9453 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9454 .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
9455 .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
9456
9457 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9458 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9459 .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9460 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9461 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9462 .gen_init = ath10k_wmi_10_4_op_gen_init,
9463 .gen_start_scan = ath10k_wmi_op_gen_start_scan,
9464 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9465 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9466 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9467 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9468 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9469 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9470 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9471 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9472 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9473 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9474 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9475 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
9476 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9477 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9478 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9479 .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
9480 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9481 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9482 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9483 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9484 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9485 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9486 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9487 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9488 .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
9489 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9490 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9491 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9492 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9493 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
9494 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9495 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
9496 .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
9497 .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
9498 .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
9499 .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
9500 .gen_pdev_get_tpc_table_cmdid =
9501 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
9502 .gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
9503 .gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
9504
9505 /* shared with 10.2 */
9506 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9507 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
9508 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9509 .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
9510 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9511 .gen_echo = ath10k_wmi_op_gen_echo,
9512 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9513 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9514 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9515 };
9516
ath10k_wmi_attach(struct ath10k * ar)9517 int ath10k_wmi_attach(struct ath10k *ar)
9518 {
9519 switch (ar->running_fw->fw_file.wmi_op_version) {
9520 case ATH10K_FW_WMI_OP_VERSION_10_4:
9521 ar->wmi.ops = &wmi_10_4_ops;
9522 ar->wmi.cmd = &wmi_10_4_cmd_map;
9523 ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
9524 ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
9525 ar->wmi.peer_param = &wmi_peer_param_map;
9526 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9527 ar->wmi_key_cipher = wmi_key_cipher_suites;
9528 break;
9529 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
9530 ar->wmi.cmd = &wmi_10_2_4_cmd_map;
9531 ar->wmi.ops = &wmi_10_2_4_ops;
9532 ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
9533 ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
9534 ar->wmi.peer_param = &wmi_peer_param_map;
9535 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9536 ar->wmi_key_cipher = wmi_key_cipher_suites;
9537 break;
9538 case ATH10K_FW_WMI_OP_VERSION_10_2:
9539 ar->wmi.cmd = &wmi_10_2_cmd_map;
9540 ar->wmi.ops = &wmi_10_2_ops;
9541 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9542 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9543 ar->wmi.peer_param = &wmi_peer_param_map;
9544 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9545 ar->wmi_key_cipher = wmi_key_cipher_suites;
9546 break;
9547 case ATH10K_FW_WMI_OP_VERSION_10_1:
9548 ar->wmi.cmd = &wmi_10x_cmd_map;
9549 ar->wmi.ops = &wmi_10_1_ops;
9550 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9551 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9552 ar->wmi.peer_param = &wmi_peer_param_map;
9553 ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
9554 ar->wmi_key_cipher = wmi_key_cipher_suites;
9555 break;
9556 case ATH10K_FW_WMI_OP_VERSION_MAIN:
9557 ar->wmi.cmd = &wmi_cmd_map;
9558 ar->wmi.ops = &wmi_ops;
9559 ar->wmi.vdev_param = &wmi_vdev_param_map;
9560 ar->wmi.pdev_param = &wmi_pdev_param_map;
9561 ar->wmi.peer_param = &wmi_peer_param_map;
9562 ar->wmi.peer_flags = &wmi_peer_flags_map;
9563 ar->wmi_key_cipher = wmi_key_cipher_suites;
9564 break;
9565 case ATH10K_FW_WMI_OP_VERSION_TLV:
9566 ath10k_wmi_tlv_attach(ar);
9567 ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
9568 break;
9569 case ATH10K_FW_WMI_OP_VERSION_UNSET:
9570 case ATH10K_FW_WMI_OP_VERSION_MAX:
9571 ath10k_err(ar, "unsupported WMI op version: %d\n",
9572 ar->running_fw->fw_file.wmi_op_version);
9573 return -EINVAL;
9574 }
9575
9576 init_completion(&ar->wmi.service_ready);
9577 init_completion(&ar->wmi.unified_ready);
9578 init_completion(&ar->wmi.barrier);
9579 init_completion(&ar->wmi.radar_confirm);
9580
9581 INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
9582 INIT_WORK(&ar->radar_confirmation_work,
9583 ath10k_radar_confirmation_work);
9584
9585 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9586 ar->running_fw->fw_file.fw_features)) {
9587 idr_init(&ar->wmi.mgmt_pending_tx);
9588 }
9589
9590 return 0;
9591 }
9592
ath10k_wmi_free_host_mem(struct ath10k * ar)9593 void ath10k_wmi_free_host_mem(struct ath10k *ar)
9594 {
9595 int i;
9596
9597 /* free the host memory chunks requested by firmware */
9598 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
9599 dma_free_coherent(ar->dev,
9600 ar->wmi.mem_chunks[i].len,
9601 ar->wmi.mem_chunks[i].vaddr,
9602 ar->wmi.mem_chunks[i].paddr);
9603 }
9604
9605 ar->wmi.num_mem_chunks = 0;
9606 }
9607
ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id,void * ptr,void * ctx)9608 static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
9609 void *ctx)
9610 {
9611 struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
9612 struct ath10k *ar = ctx;
9613 struct sk_buff *msdu;
9614
9615 ath10k_dbg(ar, ATH10K_DBG_WMI,
9616 "force cleanup mgmt msdu_id %u\n", msdu_id);
9617
9618 msdu = pkt_addr->vaddr;
9619 dma_unmap_single(ar->dev, pkt_addr->paddr,
9620 msdu->len, DMA_TO_DEVICE);
9621 ieee80211_free_txskb(ar->hw, msdu);
9622 kfree(pkt_addr);
9623
9624 return 0;
9625 }
9626
ath10k_wmi_detach(struct ath10k * ar)9627 void ath10k_wmi_detach(struct ath10k *ar)
9628 {
9629 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9630 ar->running_fw->fw_file.fw_features)) {
9631 spin_lock_bh(&ar->data_lock);
9632 idr_for_each(&ar->wmi.mgmt_pending_tx,
9633 ath10k_wmi_mgmt_tx_clean_up_pending, ar);
9634 idr_destroy(&ar->wmi.mgmt_pending_tx);
9635 spin_unlock_bh(&ar->data_lock);
9636 }
9637
9638 cancel_work_sync(&ar->svc_rdy_work);
9639 dev_kfree_skb(ar->svc_rdy_skb);
9640 }
9641