1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/skbuff.h> 19 #include <linux/ctype.h> 20 21 #include "core.h" 22 #include "htc.h" 23 #include "debug.h" 24 #include "wmi.h" 25 #include "mac.h" 26 27 /* MAIN WMI cmd track */ 28 static struct wmi_cmd_map wmi_cmd_map = { 29 .init_cmdid = WMI_INIT_CMDID, 30 .start_scan_cmdid = WMI_START_SCAN_CMDID, 31 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID, 32 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, 33 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, 34 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, 35 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, 36 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, 37 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID, 38 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID, 39 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID, 40 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID, 41 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID, 42 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID, 43 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID, 44 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, 45 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID, 46 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID, 47 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID, 48 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID, 49 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID, 50 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID, 51 .vdev_up_cmdid = WMI_VDEV_UP_CMDID, 52 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID, 53 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID, 54 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID, 55 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID, 56 .peer_create_cmdid = WMI_PEER_CREATE_CMDID, 57 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID, 58 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID, 59 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID, 60 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID, 61 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID, 62 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID, 63 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID, 64 .bcn_tx_cmdid = WMI_BCN_TX_CMDID, 65 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID, 66 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID, 67 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID, 68 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID, 69 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID, 70 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID, 71 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID, 72 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID, 73 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID, 74 .delba_send_cmdid = WMI_DELBA_SEND_CMDID, 75 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID, 76 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID, 77 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID, 78 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID, 79 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID, 80 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID, 81 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID, 82 .roam_scan_mode = WMI_ROAM_SCAN_MODE, 83 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD, 84 .roam_scan_period = WMI_ROAM_SCAN_PERIOD, 85 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 86 .roam_ap_profile = WMI_ROAM_AP_PROFILE, 87 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE, 88 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE, 89 .ofl_scan_period = WMI_OFL_SCAN_PERIOD, 90 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO, 91 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY, 92 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE, 93 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE, 94 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID, 95 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID, 96 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID, 97 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID, 98 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID, 99 .wlan_profile_set_hist_intvl_cmdid = 100 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 101 .wlan_profile_get_profile_data_cmdid = 102 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 103 .wlan_profile_enable_profile_id_cmdid = 104 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 105 .wlan_profile_list_profile_id_cmdid = 106 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 107 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID, 108 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID, 109 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID, 110 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID, 111 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID, 112 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID, 113 .wow_enable_disable_wake_event_cmdid = 114 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 115 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID, 116 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 117 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID, 118 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID, 119 .vdev_spectral_scan_configure_cmdid = 120 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 121 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 122 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID, 123 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID, 124 .network_list_offload_config_cmdid = 125 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, 126 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID, 127 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID, 128 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, 129 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID, 130 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID, 131 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID, 132 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID, 133 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID, 134 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD, 135 .echo_cmdid = WMI_ECHO_CMDID, 136 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID, 137 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID, 138 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID, 139 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID, 140 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID, 141 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID, 142 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID, 143 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, 144 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, 145 }; 146 147 /* 10.X WMI cmd track */ 148 static struct wmi_cmd_map wmi_10x_cmd_map = { 149 .init_cmdid = WMI_10X_INIT_CMDID, 150 .start_scan_cmdid = WMI_10X_START_SCAN_CMDID, 151 .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, 152 .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, 153 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, 154 .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, 155 .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, 156 .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, 157 .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, 158 .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, 159 .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, 160 .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, 161 .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, 162 .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, 163 .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID, 164 .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, 165 .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, 166 .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, 167 .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID, 168 .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID, 169 .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID, 170 .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID, 171 .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID, 172 .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID, 173 .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID, 174 .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID, 175 .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID, 176 .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID, 177 .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID, 178 .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID, 179 .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID, 180 .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID, 181 .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, 182 .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, 183 .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID, 184 .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID, 185 .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID, 186 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 187 .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID, 188 .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID, 189 .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID, 190 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 191 .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID, 192 .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID, 193 .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID, 194 .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID, 195 .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID, 196 .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID, 197 .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID, 198 .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID, 199 .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID, 200 .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID, 201 .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID, 202 .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE, 203 .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, 204 .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD, 205 .roam_scan_rssi_change_threshold = 206 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 207 .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE, 208 .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE, 209 .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, 210 .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD, 211 .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO, 212 .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY, 213 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE, 214 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE, 215 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, 216 .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID, 217 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, 218 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, 219 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, 220 .wlan_profile_set_hist_intvl_cmdid = 221 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 222 .wlan_profile_get_profile_data_cmdid = 223 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 224 .wlan_profile_enable_profile_id_cmdid = 225 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 226 .wlan_profile_list_profile_id_cmdid = 227 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 228 .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID, 229 .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID, 230 .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID, 231 .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID, 232 .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, 233 .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, 234 .wow_enable_disable_wake_event_cmdid = 235 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 236 .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID, 237 .wow_hostwakeup_from_sleep_cmdid = 238 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 239 .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID, 240 .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID, 241 .vdev_spectral_scan_configure_cmdid = 242 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 243 .vdev_spectral_scan_enable_cmdid = 244 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 245 .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID, 246 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, 247 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, 248 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, 249 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, 250 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, 251 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, 252 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, 253 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, 254 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, 255 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, 256 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, 257 .echo_cmdid = WMI_10X_ECHO_CMDID, 258 .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID, 259 .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID, 260 .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID, 261 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, 262 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 263 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 264 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 265 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, 266 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, 267 }; 268 269 /* MAIN WMI VDEV param map */ 270 static struct wmi_vdev_param_map wmi_vdev_param_map = { 271 .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD, 272 .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 273 .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL, 274 .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL, 275 .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE, 276 .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE, 277 .slot_time = WMI_VDEV_PARAM_SLOT_TIME, 278 .preamble = WMI_VDEV_PARAM_PREAMBLE, 279 .swba_time = WMI_VDEV_PARAM_SWBA_TIME, 280 .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD, 281 .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME, 282 .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL, 283 .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD, 284 .wmi_vdev_oc_scheduler_air_time_limit = 285 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 286 .wds = WMI_VDEV_PARAM_WDS, 287 .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW, 288 .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX, 289 .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT, 290 .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT, 291 .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM, 292 .chwidth = WMI_VDEV_PARAM_CHWIDTH, 293 .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET, 294 .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION, 295 .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT, 296 .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE, 297 .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE, 298 .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE, 299 .sgi = WMI_VDEV_PARAM_SGI, 300 .ldpc = WMI_VDEV_PARAM_LDPC, 301 .tx_stbc = WMI_VDEV_PARAM_TX_STBC, 302 .rx_stbc = WMI_VDEV_PARAM_RX_STBC, 303 .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD, 304 .def_keyid = WMI_VDEV_PARAM_DEF_KEYID, 305 .nss = WMI_VDEV_PARAM_NSS, 306 .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE, 307 .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE, 308 .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE, 309 .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE, 310 .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 311 .ap_keepalive_min_idle_inactive_time_secs = 312 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 313 .ap_keepalive_max_idle_inactive_time_secs = 314 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 315 .ap_keepalive_max_unresponsive_time_secs = 316 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 317 .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS, 318 .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED, 319 .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS, 320 .txbf = WMI_VDEV_PARAM_TXBF, 321 .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE, 322 .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY, 323 .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, 324 .ap_detect_out_of_sync_sleeping_sta_time_secs = 325 WMI_VDEV_PARAM_UNSUPPORTED, 326 }; 327 328 /* 10.X WMI VDEV param map */ 329 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { 330 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, 331 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 332 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, 333 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, 334 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, 335 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, 336 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, 337 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE, 338 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, 339 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, 340 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, 341 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, 342 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, 343 .wmi_vdev_oc_scheduler_air_time_limit = 344 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 345 .wds = WMI_10X_VDEV_PARAM_WDS, 346 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, 347 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, 348 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 349 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 350 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, 351 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, 352 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, 353 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, 354 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, 355 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, 356 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, 357 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, 358 .sgi = WMI_10X_VDEV_PARAM_SGI, 359 .ldpc = WMI_10X_VDEV_PARAM_LDPC, 360 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, 361 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, 362 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, 363 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, 364 .nss = WMI_10X_VDEV_PARAM_NSS, 365 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, 366 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, 367 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, 368 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, 369 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 370 .ap_keepalive_min_idle_inactive_time_secs = 371 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 372 .ap_keepalive_max_idle_inactive_time_secs = 373 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 374 .ap_keepalive_max_unresponsive_time_secs = 375 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 376 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, 377 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, 378 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, 379 .txbf = WMI_VDEV_PARAM_UNSUPPORTED, 380 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, 381 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, 382 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, 383 .ap_detect_out_of_sync_sleeping_sta_time_secs = 384 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, 385 }; 386 387 static struct wmi_pdev_param_map wmi_pdev_param_map = { 388 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK, 389 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK, 390 .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G, 391 .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G, 392 .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE, 393 .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE, 394 .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE, 395 .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 396 .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE, 397 .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW, 398 .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 399 .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH, 400 .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH, 401 .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, 402 .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE, 403 .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, 404 .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, 405 .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, 406 .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, 407 .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 408 .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 409 .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE, 410 .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 411 .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE, 412 .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE, 413 .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, 414 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 415 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 416 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, 417 .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 418 .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 419 .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 420 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 421 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS, 422 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 423 .dcs = WMI_PDEV_PARAM_DCS, 424 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE, 425 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD, 426 .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, 427 .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL, 428 .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL, 429 .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN, 430 .proxy_sta = WMI_PDEV_PARAM_PROXY_STA, 431 .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG, 432 .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP, 433 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED, 434 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, 435 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, 436 }; 437 438 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { 439 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, 440 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, 441 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, 442 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, 443 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, 444 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, 445 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, 446 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 447 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, 448 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, 449 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 450 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, 451 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, 452 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, 453 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, 454 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, 455 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, 456 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, 457 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, 458 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 459 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 460 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, 461 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 462 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, 463 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, 464 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, 465 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, 466 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, 467 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, 468 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 469 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 470 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 471 .bcnflt_stats_update_period = 472 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 473 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, 474 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, 475 .dcs = WMI_10X_PDEV_PARAM_DCS, 476 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, 477 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, 478 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, 479 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, 480 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, 481 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, 482 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, 483 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, 484 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, 485 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, 486 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, 487 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, 488 }; 489 490 /* firmware 10.2 specific mappings */ 491 static struct wmi_cmd_map wmi_10_2_cmd_map = { 492 .init_cmdid = WMI_10_2_INIT_CMDID, 493 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID, 494 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, 495 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, 496 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, 497 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, 498 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, 499 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, 500 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, 501 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, 502 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, 503 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, 504 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, 505 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, 506 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, 507 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, 508 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, 509 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID, 510 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID, 511 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID, 512 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID, 513 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID, 514 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID, 515 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID, 516 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID, 517 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID, 518 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID, 519 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID, 520 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID, 521 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID, 522 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID, 523 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, 524 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, 525 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID, 526 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID, 527 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID, 528 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 529 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID, 530 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID, 531 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID, 532 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 533 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID, 534 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID, 535 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID, 536 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID, 537 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID, 538 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID, 539 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID, 540 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID, 541 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID, 542 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID, 543 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID, 544 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE, 545 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, 546 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD, 547 .roam_scan_rssi_change_threshold = 548 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 549 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE, 550 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, 551 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, 552 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD, 553 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO, 554 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, 555 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE, 556 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, 557 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, 558 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID, 559 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, 560 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, 561 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, 562 .wlan_profile_set_hist_intvl_cmdid = 563 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 564 .wlan_profile_get_profile_data_cmdid = 565 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 566 .wlan_profile_enable_profile_id_cmdid = 567 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 568 .wlan_profile_list_profile_id_cmdid = 569 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 570 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID, 571 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID, 572 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID, 573 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID, 574 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, 575 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, 576 .wow_enable_disable_wake_event_cmdid = 577 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 578 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID, 579 .wow_hostwakeup_from_sleep_cmdid = 580 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 581 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID, 582 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID, 583 .vdev_spectral_scan_configure_cmdid = 584 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 585 .vdev_spectral_scan_enable_cmdid = 586 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 587 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID, 588 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, 589 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, 590 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, 591 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, 592 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, 593 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, 594 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, 595 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, 596 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, 597 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, 598 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, 599 .echo_cmdid = WMI_10_2_ECHO_CMDID, 600 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID, 601 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID, 602 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID, 603 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, 604 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 605 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 606 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 607 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, 608 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, 609 }; 610 611 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 612 { 613 int ret; 614 ret = wait_for_completion_timeout(&ar->wmi.service_ready, 615 WMI_SERVICE_READY_TIMEOUT_HZ); 616 return ret; 617 } 618 619 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) 620 { 621 int ret; 622 ret = wait_for_completion_timeout(&ar->wmi.unified_ready, 623 WMI_UNIFIED_READY_TIMEOUT_HZ); 624 return ret; 625 } 626 627 static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) 628 { 629 struct sk_buff *skb; 630 u32 round_len = roundup(len, 4); 631 632 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); 633 if (!skb) 634 return NULL; 635 636 skb_reserve(skb, WMI_SKB_HEADROOM); 637 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 638 ath10k_warn("Unaligned WMI skb\n"); 639 640 skb_put(skb, round_len); 641 memset(skb->data, 0, round_len); 642 643 return skb; 644 } 645 646 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 647 { 648 dev_kfree_skb(skb); 649 } 650 651 static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, 652 u32 cmd_id) 653 { 654 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 655 struct wmi_cmd_hdr *cmd_hdr; 656 int ret; 657 u32 cmd = 0; 658 659 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 660 return -ENOMEM; 661 662 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID); 663 664 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 665 cmd_hdr->cmd_id = __cpu_to_le32(cmd); 666 667 memset(skb_cb, 0, sizeof(*skb_cb)); 668 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); 669 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret); 670 671 if (ret) 672 goto err_pull; 673 674 return 0; 675 676 err_pull: 677 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 678 return ret; 679 } 680 681 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) 682 { 683 int ret; 684 685 lockdep_assert_held(&arvif->ar->data_lock); 686 687 if (arvif->beacon == NULL) 688 return; 689 690 if (arvif->beacon_sent) 691 return; 692 693 ret = ath10k_wmi_beacon_send_ref_nowait(arvif); 694 if (ret) 695 return; 696 697 /* We need to retain the arvif->beacon reference for DMA unmapping and 698 * freeing the skbuff later. */ 699 arvif->beacon_sent = true; 700 } 701 702 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, 703 struct ieee80211_vif *vif) 704 { 705 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 706 707 ath10k_wmi_tx_beacon_nowait(arvif); 708 } 709 710 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) 711 { 712 spin_lock_bh(&ar->data_lock); 713 ieee80211_iterate_active_interfaces_atomic(ar->hw, 714 IEEE80211_IFACE_ITER_NORMAL, 715 ath10k_wmi_tx_beacons_iter, 716 NULL); 717 spin_unlock_bh(&ar->data_lock); 718 } 719 720 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) 721 { 722 /* try to send pending beacons first. they take priority */ 723 ath10k_wmi_tx_beacons_nowait(ar); 724 725 wake_up(&ar->wmi.tx_credits_wq); 726 } 727 728 static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, 729 u32 cmd_id) 730 { 731 int ret = -EOPNOTSUPP; 732 733 might_sleep(); 734 735 if (cmd_id == WMI_CMD_UNSUPPORTED) { 736 ath10k_warn("wmi command %d is not supported by firmware\n", 737 cmd_id); 738 return ret; 739 } 740 741 wait_event_timeout(ar->wmi.tx_credits_wq, ({ 742 /* try to send pending beacons first. they take priority */ 743 ath10k_wmi_tx_beacons_nowait(ar); 744 745 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); 746 (ret != -EAGAIN); 747 }), 3*HZ); 748 749 if (ret) 750 dev_kfree_skb_any(skb); 751 752 return ret; 753 } 754 755 int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) 756 { 757 int ret = 0; 758 struct wmi_mgmt_tx_cmd *cmd; 759 struct ieee80211_hdr *hdr; 760 struct sk_buff *wmi_skb; 761 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 762 int len; 763 u32 buf_len = skb->len; 764 u16 fc; 765 766 hdr = (struct ieee80211_hdr *)skb->data; 767 fc = le16_to_cpu(hdr->frame_control); 768 769 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) 770 return -EINVAL; 771 772 len = sizeof(cmd->hdr) + skb->len; 773 774 if ((ieee80211_is_action(hdr->frame_control) || 775 ieee80211_is_deauth(hdr->frame_control) || 776 ieee80211_is_disassoc(hdr->frame_control)) && 777 ieee80211_has_protected(hdr->frame_control)) { 778 len += IEEE80211_CCMP_MIC_LEN; 779 buf_len += IEEE80211_CCMP_MIC_LEN; 780 } 781 782 len = round_up(len, 4); 783 784 wmi_skb = ath10k_wmi_alloc_skb(len); 785 if (!wmi_skb) 786 return -ENOMEM; 787 788 cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data; 789 790 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); 791 cmd->hdr.tx_rate = 0; 792 cmd->hdr.tx_power = 0; 793 cmd->hdr.buf_len = __cpu_to_le32(buf_len); 794 795 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); 796 memcpy(cmd->buf, skb->data, skb->len); 797 798 ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", 799 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, 800 fc & IEEE80211_FCTL_STYPE); 801 802 /* Send the management frame buffer to the target */ 803 ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); 804 if (ret) 805 return ret; 806 807 /* TODO: report tx status to mac80211 - temporary just ACK */ 808 info->flags |= IEEE80211_TX_STAT_ACK; 809 ieee80211_tx_status_irqsafe(ar->hw, skb); 810 811 return ret; 812 } 813 814 static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 815 { 816 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; 817 enum wmi_scan_event_type event_type; 818 enum wmi_scan_completion_reason reason; 819 u32 freq; 820 u32 req_id; 821 u32 scan_id; 822 u32 vdev_id; 823 824 event_type = __le32_to_cpu(event->event_type); 825 reason = __le32_to_cpu(event->reason); 826 freq = __le32_to_cpu(event->channel_freq); 827 req_id = __le32_to_cpu(event->scan_req_id); 828 scan_id = __le32_to_cpu(event->scan_id); 829 vdev_id = __le32_to_cpu(event->vdev_id); 830 831 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); 832 ath10k_dbg(ATH10K_DBG_WMI, 833 "scan event type %d reason %d freq %d req_id %d " 834 "scan_id %d vdev_id %d\n", 835 event_type, reason, freq, req_id, scan_id, vdev_id); 836 837 spin_lock_bh(&ar->data_lock); 838 839 switch (event_type) { 840 case WMI_SCAN_EVENT_STARTED: 841 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); 842 if (ar->scan.in_progress && ar->scan.is_roc) 843 ieee80211_ready_on_channel(ar->hw); 844 845 complete(&ar->scan.started); 846 break; 847 case WMI_SCAN_EVENT_COMPLETED: 848 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); 849 switch (reason) { 850 case WMI_SCAN_REASON_COMPLETED: 851 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); 852 break; 853 case WMI_SCAN_REASON_CANCELLED: 854 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); 855 break; 856 case WMI_SCAN_REASON_PREEMPTED: 857 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); 858 break; 859 case WMI_SCAN_REASON_TIMEDOUT: 860 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); 861 break; 862 default: 863 break; 864 } 865 866 ar->scan_channel = NULL; 867 if (!ar->scan.in_progress) { 868 ath10k_warn("no scan requested, ignoring\n"); 869 break; 870 } 871 872 if (ar->scan.is_roc) { 873 ath10k_offchan_tx_purge(ar); 874 875 if (!ar->scan.aborting) 876 ieee80211_remain_on_channel_expired(ar->hw); 877 } else { 878 ieee80211_scan_completed(ar->hw, ar->scan.aborting); 879 } 880 881 del_timer(&ar->scan.timeout); 882 complete_all(&ar->scan.completed); 883 ar->scan.in_progress = false; 884 break; 885 case WMI_SCAN_EVENT_BSS_CHANNEL: 886 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); 887 ar->scan_channel = NULL; 888 break; 889 case WMI_SCAN_EVENT_FOREIGN_CHANNEL: 890 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); 891 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); 892 if (ar->scan.in_progress && ar->scan.is_roc && 893 ar->scan.roc_freq == freq) { 894 complete(&ar->scan.on_channel); 895 } 896 break; 897 case WMI_SCAN_EVENT_DEQUEUED: 898 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); 899 break; 900 case WMI_SCAN_EVENT_PREEMPTED: 901 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); 902 break; 903 case WMI_SCAN_EVENT_START_FAILED: 904 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); 905 break; 906 default: 907 break; 908 } 909 910 spin_unlock_bh(&ar->data_lock); 911 return 0; 912 } 913 914 static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) 915 { 916 enum ieee80211_band band; 917 918 switch (phy_mode) { 919 case MODE_11A: 920 case MODE_11NA_HT20: 921 case MODE_11NA_HT40: 922 case MODE_11AC_VHT20: 923 case MODE_11AC_VHT40: 924 case MODE_11AC_VHT80: 925 band = IEEE80211_BAND_5GHZ; 926 break; 927 case MODE_11G: 928 case MODE_11B: 929 case MODE_11GONLY: 930 case MODE_11NG_HT20: 931 case MODE_11NG_HT40: 932 case MODE_11AC_VHT20_2G: 933 case MODE_11AC_VHT40_2G: 934 case MODE_11AC_VHT80_2G: 935 default: 936 band = IEEE80211_BAND_2GHZ; 937 } 938 939 return band; 940 } 941 942 static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) 943 { 944 u8 rate_idx = 0; 945 946 /* rate in Kbps */ 947 switch (rate) { 948 case 1000: 949 rate_idx = 0; 950 break; 951 case 2000: 952 rate_idx = 1; 953 break; 954 case 5500: 955 rate_idx = 2; 956 break; 957 case 11000: 958 rate_idx = 3; 959 break; 960 case 6000: 961 rate_idx = 4; 962 break; 963 case 9000: 964 rate_idx = 5; 965 break; 966 case 12000: 967 rate_idx = 6; 968 break; 969 case 18000: 970 rate_idx = 7; 971 break; 972 case 24000: 973 rate_idx = 8; 974 break; 975 case 36000: 976 rate_idx = 9; 977 break; 978 case 48000: 979 rate_idx = 10; 980 break; 981 case 54000: 982 rate_idx = 11; 983 break; 984 default: 985 break; 986 } 987 988 if (band == IEEE80211_BAND_5GHZ) { 989 if (rate_idx > 3) 990 /* Omit CCK rates */ 991 rate_idx -= 4; 992 else 993 rate_idx = 0; 994 } 995 996 return rate_idx; 997 } 998 999 static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) 1000 { 1001 struct wmi_mgmt_rx_event_v1 *ev_v1; 1002 struct wmi_mgmt_rx_event_v2 *ev_v2; 1003 struct wmi_mgmt_rx_hdr_v1 *ev_hdr; 1004 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1005 struct ieee80211_channel *ch; 1006 struct ieee80211_hdr *hdr; 1007 u32 rx_status; 1008 u32 channel; 1009 u32 phy_mode; 1010 u32 snr; 1011 u32 rate; 1012 u32 buf_len; 1013 u16 fc; 1014 int pull_len; 1015 1016 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { 1017 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; 1018 ev_hdr = &ev_v2->hdr.v1; 1019 pull_len = sizeof(*ev_v2); 1020 } else { 1021 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data; 1022 ev_hdr = &ev_v1->hdr; 1023 pull_len = sizeof(*ev_v1); 1024 } 1025 1026 channel = __le32_to_cpu(ev_hdr->channel); 1027 buf_len = __le32_to_cpu(ev_hdr->buf_len); 1028 rx_status = __le32_to_cpu(ev_hdr->status); 1029 snr = __le32_to_cpu(ev_hdr->snr); 1030 phy_mode = __le32_to_cpu(ev_hdr->phy_mode); 1031 rate = __le32_to_cpu(ev_hdr->rate); 1032 1033 memset(status, 0, sizeof(*status)); 1034 1035 ath10k_dbg(ATH10K_DBG_MGMT, 1036 "event mgmt rx status %08x\n", rx_status); 1037 1038 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1039 dev_kfree_skb(skb); 1040 return 0; 1041 } 1042 1043 if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { 1044 dev_kfree_skb(skb); 1045 return 0; 1046 } 1047 1048 if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { 1049 dev_kfree_skb(skb); 1050 return 0; 1051 } 1052 1053 if (rx_status & WMI_RX_STATUS_ERR_CRC) 1054 status->flag |= RX_FLAG_FAILED_FCS_CRC; 1055 if (rx_status & WMI_RX_STATUS_ERR_MIC) 1056 status->flag |= RX_FLAG_MMIC_ERROR; 1057 1058 /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to 1059 * MODE_11B. This means phy_mode is not a reliable source for the band 1060 * of mgmt rx. */ 1061 1062 ch = ar->scan_channel; 1063 if (!ch) 1064 ch = ar->rx_channel; 1065 1066 if (ch) { 1067 status->band = ch->band; 1068 1069 if (phy_mode == MODE_11B && 1070 status->band == IEEE80211_BAND_5GHZ) 1071 ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); 1072 } else { 1073 ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n"); 1074 status->band = phy_mode_to_band(phy_mode); 1075 } 1076 1077 status->freq = ieee80211_channel_to_frequency(channel, status->band); 1078 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; 1079 status->rate_idx = get_rate_idx(rate, status->band); 1080 1081 skb_pull(skb, pull_len); 1082 1083 hdr = (struct ieee80211_hdr *)skb->data; 1084 fc = le16_to_cpu(hdr->frame_control); 1085 1086 /* FW delivers WEP Shared Auth frame with Protected Bit set and 1087 * encrypted payload. However in case of PMF it delivers decrypted 1088 * frames with Protected Bit set. */ 1089 if (ieee80211_has_protected(hdr->frame_control) && 1090 !ieee80211_is_auth(hdr->frame_control)) { 1091 status->flag |= RX_FLAG_DECRYPTED; 1092 1093 if (!ieee80211_is_action(hdr->frame_control) && 1094 !ieee80211_is_deauth(hdr->frame_control) && 1095 !ieee80211_is_disassoc(hdr->frame_control)) { 1096 status->flag |= RX_FLAG_IV_STRIPPED | 1097 RX_FLAG_MMIC_STRIPPED; 1098 hdr->frame_control = __cpu_to_le16(fc & 1099 ~IEEE80211_FCTL_PROTECTED); 1100 } 1101 } 1102 1103 ath10k_dbg(ATH10K_DBG_MGMT, 1104 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 1105 skb, skb->len, 1106 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 1107 1108 ath10k_dbg(ATH10K_DBG_MGMT, 1109 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 1110 status->freq, status->band, status->signal, 1111 status->rate_idx); 1112 1113 /* 1114 * packets from HTC come aligned to 4byte boundaries 1115 * because they can originally come in along with a trailer 1116 */ 1117 skb_trim(skb, buf_len); 1118 1119 ieee80211_rx(ar->hw, skb); 1120 return 0; 1121 } 1122 1123 static int freq_to_idx(struct ath10k *ar, int freq) 1124 { 1125 struct ieee80211_supported_band *sband; 1126 int band, ch, idx = 0; 1127 1128 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 1129 sband = ar->hw->wiphy->bands[band]; 1130 if (!sband) 1131 continue; 1132 1133 for (ch = 0; ch < sband->n_channels; ch++, idx++) 1134 if (sband->channels[ch].center_freq == freq) 1135 goto exit; 1136 } 1137 1138 exit: 1139 return idx; 1140 } 1141 1142 static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) 1143 { 1144 struct wmi_chan_info_event *ev; 1145 struct survey_info *survey; 1146 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count; 1147 int idx; 1148 1149 ev = (struct wmi_chan_info_event *)skb->data; 1150 1151 err_code = __le32_to_cpu(ev->err_code); 1152 freq = __le32_to_cpu(ev->freq); 1153 cmd_flags = __le32_to_cpu(ev->cmd_flags); 1154 noise_floor = __le32_to_cpu(ev->noise_floor); 1155 rx_clear_count = __le32_to_cpu(ev->rx_clear_count); 1156 cycle_count = __le32_to_cpu(ev->cycle_count); 1157 1158 ath10k_dbg(ATH10K_DBG_WMI, 1159 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", 1160 err_code, freq, cmd_flags, noise_floor, rx_clear_count, 1161 cycle_count); 1162 1163 spin_lock_bh(&ar->data_lock); 1164 1165 if (!ar->scan.in_progress) { 1166 ath10k_warn("chan info event without a scan request?\n"); 1167 goto exit; 1168 } 1169 1170 idx = freq_to_idx(ar, freq); 1171 if (idx >= ARRAY_SIZE(ar->survey)) { 1172 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", 1173 freq, idx); 1174 goto exit; 1175 } 1176 1177 if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { 1178 /* During scanning chan info is reported twice for each 1179 * visited channel. The reported cycle count is global 1180 * and per-channel cycle count must be calculated */ 1181 1182 cycle_count -= ar->survey_last_cycle_count; 1183 rx_clear_count -= ar->survey_last_rx_clear_count; 1184 1185 survey = &ar->survey[idx]; 1186 survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count); 1187 survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count); 1188 survey->noise = noise_floor; 1189 survey->filled = SURVEY_INFO_CHANNEL_TIME | 1190 SURVEY_INFO_CHANNEL_TIME_RX | 1191 SURVEY_INFO_NOISE_DBM; 1192 } 1193 1194 ar->survey_last_rx_clear_count = rx_clear_count; 1195 ar->survey_last_cycle_count = cycle_count; 1196 1197 exit: 1198 spin_unlock_bh(&ar->data_lock); 1199 } 1200 1201 static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 1202 { 1203 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); 1204 } 1205 1206 static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) 1207 { 1208 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", 1209 skb->len); 1210 1211 trace_ath10k_wmi_dbglog(skb->data, skb->len); 1212 1213 return 0; 1214 } 1215 1216 static void ath10k_wmi_event_update_stats(struct ath10k *ar, 1217 struct sk_buff *skb) 1218 { 1219 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; 1220 1221 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); 1222 1223 ath10k_debug_read_target_stats(ar, ev); 1224 } 1225 1226 static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, 1227 struct sk_buff *skb) 1228 { 1229 struct wmi_vdev_start_response_event *ev; 1230 1231 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); 1232 1233 ev = (struct wmi_vdev_start_response_event *)skb->data; 1234 1235 if (WARN_ON(__le32_to_cpu(ev->status))) 1236 return; 1237 1238 complete(&ar->vdev_setup_done); 1239 } 1240 1241 static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, 1242 struct sk_buff *skb) 1243 { 1244 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); 1245 complete(&ar->vdev_setup_done); 1246 } 1247 1248 static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, 1249 struct sk_buff *skb) 1250 { 1251 struct wmi_peer_sta_kickout_event *ev; 1252 struct ieee80211_sta *sta; 1253 1254 ev = (struct wmi_peer_sta_kickout_event *)skb->data; 1255 1256 ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", 1257 ev->peer_macaddr.addr); 1258 1259 rcu_read_lock(); 1260 1261 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); 1262 if (!sta) { 1263 ath10k_warn("Spurious quick kickout for STA %pM\n", 1264 ev->peer_macaddr.addr); 1265 goto exit; 1266 } 1267 1268 ieee80211_report_low_ack(sta, 10); 1269 1270 exit: 1271 rcu_read_unlock(); 1272 } 1273 1274 /* 1275 * FIXME 1276 * 1277 * We don't report to mac80211 sleep state of connected 1278 * stations. Due to this mac80211 can't fill in TIM IE 1279 * correctly. 1280 * 1281 * I know of no way of getting nullfunc frames that contain 1282 * sleep transition from connected stations - these do not 1283 * seem to be sent from the target to the host. There also 1284 * doesn't seem to be a dedicated event for that. So the 1285 * only way left to do this would be to read tim_bitmap 1286 * during SWBA. 1287 * 1288 * We could probably try using tim_bitmap from SWBA to tell 1289 * mac80211 which stations are asleep and which are not. The 1290 * problem here is calling mac80211 functions so many times 1291 * could take too long and make us miss the time to submit 1292 * the beacon to the target. 1293 * 1294 * So as a workaround we try to extend the TIM IE if there 1295 * is unicast buffered for stations with aid > 7 and fill it 1296 * in ourselves. 1297 */ 1298 static void ath10k_wmi_update_tim(struct ath10k *ar, 1299 struct ath10k_vif *arvif, 1300 struct sk_buff *bcn, 1301 struct wmi_bcn_info *bcn_info) 1302 { 1303 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; 1304 struct ieee80211_tim_ie *tim; 1305 u8 *ies, *ie; 1306 u8 ie_len, pvm_len; 1307 1308 /* if next SWBA has no tim_changed the tim_bitmap is garbage. 1309 * we must copy the bitmap upon change and reuse it later */ 1310 if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) { 1311 int i; 1312 1313 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != 1314 sizeof(bcn_info->tim_info.tim_bitmap)); 1315 1316 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { 1317 __le32 t = bcn_info->tim_info.tim_bitmap[i / 4]; 1318 u32 v = __le32_to_cpu(t); 1319 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; 1320 } 1321 1322 /* FW reports either length 0 or 16 1323 * so we calculate this on our own */ 1324 arvif->u.ap.tim_len = 0; 1325 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) 1326 if (arvif->u.ap.tim_bitmap[i]) 1327 arvif->u.ap.tim_len = i; 1328 1329 arvif->u.ap.tim_len++; 1330 } 1331 1332 ies = bcn->data; 1333 ies += ieee80211_hdrlen(hdr->frame_control); 1334 ies += 12; /* fixed parameters */ 1335 1336 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, 1337 (u8 *)skb_tail_pointer(bcn) - ies); 1338 if (!ie) { 1339 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1340 ath10k_warn("no tim ie found;\n"); 1341 return; 1342 } 1343 1344 tim = (void *)ie + 2; 1345 ie_len = ie[1]; 1346 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ 1347 1348 if (pvm_len < arvif->u.ap.tim_len) { 1349 int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; 1350 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); 1351 void *next_ie = ie + 2 + ie_len; 1352 1353 if (skb_put(bcn, expand_size)) { 1354 memmove(next_ie + expand_size, next_ie, move_size); 1355 1356 ie[1] += expand_size; 1357 ie_len += expand_size; 1358 pvm_len += expand_size; 1359 } else { 1360 ath10k_warn("tim expansion failed\n"); 1361 } 1362 } 1363 1364 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { 1365 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); 1366 return; 1367 } 1368 1369 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); 1370 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); 1371 1372 if (tim->dtim_count == 0) { 1373 ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true; 1374 1375 if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1) 1376 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; 1377 } 1378 1379 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", 1380 tim->dtim_count, tim->dtim_period, 1381 tim->bitmap_ctrl, pvm_len); 1382 } 1383 1384 static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len, 1385 struct wmi_p2p_noa_info *noa) 1386 { 1387 struct ieee80211_p2p_noa_attr *noa_attr; 1388 u8 ctwindow_oppps = noa->ctwindow_oppps; 1389 u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET; 1390 bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT); 1391 __le16 *noa_attr_len; 1392 u16 attr_len; 1393 u8 noa_descriptors = noa->num_descriptors; 1394 int i; 1395 1396 /* P2P IE */ 1397 data[0] = WLAN_EID_VENDOR_SPECIFIC; 1398 data[1] = len - 2; 1399 data[2] = (WLAN_OUI_WFA >> 16) & 0xff; 1400 data[3] = (WLAN_OUI_WFA >> 8) & 0xff; 1401 data[4] = (WLAN_OUI_WFA >> 0) & 0xff; 1402 data[5] = WLAN_OUI_TYPE_WFA_P2P; 1403 1404 /* NOA ATTR */ 1405 data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; 1406 noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ 1407 noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; 1408 1409 noa_attr->index = noa->index; 1410 noa_attr->oppps_ctwindow = ctwindow; 1411 if (oppps) 1412 noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; 1413 1414 for (i = 0; i < noa_descriptors; i++) { 1415 noa_attr->desc[i].count = 1416 __le32_to_cpu(noa->descriptors[i].type_count); 1417 noa_attr->desc[i].duration = noa->descriptors[i].duration; 1418 noa_attr->desc[i].interval = noa->descriptors[i].interval; 1419 noa_attr->desc[i].start_time = noa->descriptors[i].start_time; 1420 } 1421 1422 attr_len = 2; /* index + oppps_ctwindow */ 1423 attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); 1424 *noa_attr_len = __cpu_to_le16(attr_len); 1425 } 1426 1427 static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) 1428 { 1429 u32 len = 0; 1430 u8 noa_descriptors = noa->num_descriptors; 1431 u8 opp_ps_info = noa->ctwindow_oppps; 1432 bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT); 1433 1434 1435 if (!noa_descriptors && !opps_enabled) 1436 return len; 1437 1438 len += 1 + 1 + 4; /* EID + len + OUI */ 1439 len += 1 + 2; /* noa attr + attr len */ 1440 len += 1 + 1; /* index + oppps_ctwindow */ 1441 len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); 1442 1443 return len; 1444 } 1445 1446 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, 1447 struct sk_buff *bcn, 1448 struct wmi_bcn_info *bcn_info) 1449 { 1450 struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info; 1451 u8 *new_data, *old_data = arvif->u.ap.noa_data; 1452 u32 new_len; 1453 1454 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 1455 return; 1456 1457 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); 1458 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { 1459 new_len = ath10k_p2p_calc_noa_ie_len(noa); 1460 if (!new_len) 1461 goto cleanup; 1462 1463 new_data = kmalloc(new_len, GFP_ATOMIC); 1464 if (!new_data) 1465 goto cleanup; 1466 1467 ath10k_p2p_fill_noa_ie(new_data, new_len, noa); 1468 1469 spin_lock_bh(&ar->data_lock); 1470 arvif->u.ap.noa_data = new_data; 1471 arvif->u.ap.noa_len = new_len; 1472 spin_unlock_bh(&ar->data_lock); 1473 kfree(old_data); 1474 } 1475 1476 if (arvif->u.ap.noa_data) 1477 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) 1478 memcpy(skb_put(bcn, arvif->u.ap.noa_len), 1479 arvif->u.ap.noa_data, 1480 arvif->u.ap.noa_len); 1481 return; 1482 1483 cleanup: 1484 spin_lock_bh(&ar->data_lock); 1485 arvif->u.ap.noa_data = NULL; 1486 arvif->u.ap.noa_len = 0; 1487 spin_unlock_bh(&ar->data_lock); 1488 kfree(old_data); 1489 } 1490 1491 1492 static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) 1493 { 1494 struct wmi_host_swba_event *ev; 1495 u32 map; 1496 int i = -1; 1497 struct wmi_bcn_info *bcn_info; 1498 struct ath10k_vif *arvif; 1499 struct sk_buff *bcn; 1500 int ret, vdev_id = 0; 1501 1502 ev = (struct wmi_host_swba_event *)skb->data; 1503 map = __le32_to_cpu(ev->vdev_map); 1504 1505 ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", 1506 ev->vdev_map); 1507 1508 for (; map; map >>= 1, vdev_id++) { 1509 if (!(map & 0x1)) 1510 continue; 1511 1512 i++; 1513 1514 if (i >= WMI_MAX_AP_VDEV) { 1515 ath10k_warn("swba has corrupted vdev map\n"); 1516 break; 1517 } 1518 1519 bcn_info = &ev->bcn_info[i]; 1520 1521 ath10k_dbg(ATH10K_DBG_MGMT, 1522 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", 1523 i, 1524 __le32_to_cpu(bcn_info->tim_info.tim_len), 1525 __le32_to_cpu(bcn_info->tim_info.tim_mcast), 1526 __le32_to_cpu(bcn_info->tim_info.tim_changed), 1527 __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending), 1528 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]), 1529 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]), 1530 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]), 1531 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0])); 1532 1533 arvif = ath10k_get_arvif(ar, vdev_id); 1534 if (arvif == NULL) { 1535 ath10k_warn("no vif for vdev_id %d found\n", vdev_id); 1536 continue; 1537 } 1538 1539 /* There are no completions for beacons so wait for next SWBA 1540 * before telling mac80211 to decrement CSA counter 1541 * 1542 * Once CSA counter is completed stop sending beacons until 1543 * actual channel switch is done */ 1544 if (arvif->vif->csa_active && 1545 ieee80211_csa_is_complete(arvif->vif)) { 1546 ieee80211_csa_finish(arvif->vif); 1547 continue; 1548 } 1549 1550 bcn = ieee80211_beacon_get(ar->hw, arvif->vif); 1551 if (!bcn) { 1552 ath10k_warn("could not get mac80211 beacon\n"); 1553 continue; 1554 } 1555 1556 ath10k_tx_h_seq_no(arvif->vif, bcn); 1557 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); 1558 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); 1559 1560 spin_lock_bh(&ar->data_lock); 1561 1562 if (arvif->beacon) { 1563 if (!arvif->beacon_sent) 1564 ath10k_warn("SWBA overrun on vdev %d\n", 1565 arvif->vdev_id); 1566 1567 dma_unmap_single(arvif->ar->dev, 1568 ATH10K_SKB_CB(arvif->beacon)->paddr, 1569 arvif->beacon->len, DMA_TO_DEVICE); 1570 dev_kfree_skb_any(arvif->beacon); 1571 arvif->beacon = NULL; 1572 } 1573 1574 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev, 1575 bcn->data, bcn->len, 1576 DMA_TO_DEVICE); 1577 ret = dma_mapping_error(arvif->ar->dev, 1578 ATH10K_SKB_CB(bcn)->paddr); 1579 if (ret) { 1580 ath10k_warn("failed to map beacon: %d\n", ret); 1581 dev_kfree_skb_any(bcn); 1582 goto skip; 1583 } 1584 1585 arvif->beacon = bcn; 1586 arvif->beacon_sent = false; 1587 1588 ath10k_wmi_tx_beacon_nowait(arvif); 1589 skip: 1590 spin_unlock_bh(&ar->data_lock); 1591 } 1592 } 1593 1594 static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, 1595 struct sk_buff *skb) 1596 { 1597 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); 1598 } 1599 1600 static void ath10k_dfs_radar_report(struct ath10k *ar, 1601 struct wmi_single_phyerr_rx_event *event, 1602 struct phyerr_radar_report *rr, 1603 u64 tsf) 1604 { 1605 u32 reg0, reg1, tsf32l; 1606 struct pulse_event pe; 1607 u64 tsf64; 1608 u8 rssi, width; 1609 1610 reg0 = __le32_to_cpu(rr->reg0); 1611 reg1 = __le32_to_cpu(rr->reg1); 1612 1613 ath10k_dbg(ATH10K_DBG_REGULATORY, 1614 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", 1615 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), 1616 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), 1617 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), 1618 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); 1619 ath10k_dbg(ATH10K_DBG_REGULATORY, 1620 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", 1621 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), 1622 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), 1623 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), 1624 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), 1625 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); 1626 ath10k_dbg(ATH10K_DBG_REGULATORY, 1627 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", 1628 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), 1629 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); 1630 1631 if (!ar->dfs_detector) 1632 return; 1633 1634 /* report event to DFS pattern detector */ 1635 tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp); 1636 tsf64 = tsf & (~0xFFFFFFFFULL); 1637 tsf64 |= tsf32l; 1638 1639 width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); 1640 rssi = event->hdr.rssi_combined; 1641 1642 /* hardware store this as 8 bit signed value, 1643 * set to zero if negative number 1644 */ 1645 if (rssi & 0x80) 1646 rssi = 0; 1647 1648 pe.ts = tsf64; 1649 pe.freq = ar->hw->conf.chandef.chan->center_freq; 1650 pe.width = width; 1651 pe.rssi = rssi; 1652 1653 ath10k_dbg(ATH10K_DBG_REGULATORY, 1654 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", 1655 pe.freq, pe.width, pe.rssi, pe.ts); 1656 1657 ATH10K_DFS_STAT_INC(ar, pulses_detected); 1658 1659 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { 1660 ath10k_dbg(ATH10K_DBG_REGULATORY, 1661 "dfs no pulse pattern detected, yet\n"); 1662 return; 1663 } 1664 1665 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); 1666 ATH10K_DFS_STAT_INC(ar, radar_detected); 1667 1668 /* Control radar events reporting in debugfs file 1669 dfs_block_radar_events */ 1670 if (ar->dfs_block_radar_events) { 1671 ath10k_info("DFS Radar detected, but ignored as requested\n"); 1672 return; 1673 } 1674 1675 ieee80211_radar_detected(ar->hw); 1676 } 1677 1678 static int ath10k_dfs_fft_report(struct ath10k *ar, 1679 struct wmi_single_phyerr_rx_event *event, 1680 struct phyerr_fft_report *fftr, 1681 u64 tsf) 1682 { 1683 u32 reg0, reg1; 1684 u8 rssi, peak_mag; 1685 1686 reg0 = __le32_to_cpu(fftr->reg0); 1687 reg1 = __le32_to_cpu(fftr->reg1); 1688 rssi = event->hdr.rssi_combined; 1689 1690 ath10k_dbg(ATH10K_DBG_REGULATORY, 1691 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", 1692 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), 1693 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), 1694 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), 1695 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); 1696 ath10k_dbg(ATH10K_DBG_REGULATORY, 1697 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", 1698 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), 1699 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), 1700 MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG), 1701 MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB)); 1702 1703 peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); 1704 1705 /* false event detection */ 1706 if (rssi == DFS_RSSI_POSSIBLY_FALSE && 1707 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { 1708 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); 1709 ATH10K_DFS_STAT_INC(ar, pulses_discarded); 1710 return -EINVAL; 1711 } 1712 1713 return 0; 1714 } 1715 1716 static void ath10k_wmi_event_dfs(struct ath10k *ar, 1717 struct wmi_single_phyerr_rx_event *event, 1718 u64 tsf) 1719 { 1720 int buf_len, tlv_len, res, i = 0; 1721 struct phyerr_tlv *tlv; 1722 struct phyerr_radar_report *rr; 1723 struct phyerr_fft_report *fftr; 1724 u8 *tlv_buf; 1725 1726 buf_len = __le32_to_cpu(event->hdr.buf_len); 1727 ath10k_dbg(ATH10K_DBG_REGULATORY, 1728 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", 1729 event->hdr.phy_err_code, event->hdr.rssi_combined, 1730 __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); 1731 1732 /* Skip event if DFS disabled */ 1733 if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) 1734 return; 1735 1736 ATH10K_DFS_STAT_INC(ar, pulses_total); 1737 1738 while (i < buf_len) { 1739 if (i + sizeof(*tlv) > buf_len) { 1740 ath10k_warn("too short buf for tlv header (%d)\n", i); 1741 return; 1742 } 1743 1744 tlv = (struct phyerr_tlv *)&event->bufp[i]; 1745 tlv_len = __le16_to_cpu(tlv->len); 1746 tlv_buf = &event->bufp[i + sizeof(*tlv)]; 1747 ath10k_dbg(ATH10K_DBG_REGULATORY, 1748 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", 1749 tlv_len, tlv->tag, tlv->sig); 1750 1751 switch (tlv->tag) { 1752 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: 1753 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { 1754 ath10k_warn("too short radar pulse summary (%d)\n", 1755 i); 1756 return; 1757 } 1758 1759 rr = (struct phyerr_radar_report *)tlv_buf; 1760 ath10k_dfs_radar_report(ar, event, rr, tsf); 1761 break; 1762 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 1763 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { 1764 ath10k_warn("too short fft report (%d)\n", i); 1765 return; 1766 } 1767 1768 fftr = (struct phyerr_fft_report *)tlv_buf; 1769 res = ath10k_dfs_fft_report(ar, event, fftr, tsf); 1770 if (res) 1771 return; 1772 break; 1773 } 1774 1775 i += sizeof(*tlv) + tlv_len; 1776 } 1777 } 1778 1779 static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, 1780 struct wmi_single_phyerr_rx_event *event, 1781 u64 tsf) 1782 { 1783 int buf_len, tlv_len, res, i = 0; 1784 struct phyerr_tlv *tlv; 1785 u8 *tlv_buf; 1786 struct phyerr_fft_report *fftr; 1787 size_t fftr_len; 1788 1789 buf_len = __le32_to_cpu(event->hdr.buf_len); 1790 1791 while (i < buf_len) { 1792 if (i + sizeof(*tlv) > buf_len) { 1793 ath10k_warn("failed to parse phyerr tlv header at byte %d\n", 1794 i); 1795 return; 1796 } 1797 1798 tlv = (struct phyerr_tlv *)&event->bufp[i]; 1799 tlv_len = __le16_to_cpu(tlv->len); 1800 tlv_buf = &event->bufp[i + sizeof(*tlv)]; 1801 1802 if (i + sizeof(*tlv) + tlv_len > buf_len) { 1803 ath10k_warn("failed to parse phyerr tlv payload at byte %d\n", 1804 i); 1805 return; 1806 } 1807 1808 switch (tlv->tag) { 1809 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 1810 if (sizeof(*fftr) > tlv_len) { 1811 ath10k_warn("failed to parse fft report at byte %d\n", 1812 i); 1813 return; 1814 } 1815 1816 fftr_len = tlv_len - sizeof(*fftr); 1817 fftr = (struct phyerr_fft_report *)tlv_buf; 1818 res = ath10k_spectral_process_fft(ar, event, 1819 fftr, fftr_len, 1820 tsf); 1821 if (res < 0) { 1822 ath10k_warn("failed to process fft report: %d\n", 1823 res); 1824 return; 1825 } 1826 break; 1827 } 1828 1829 i += sizeof(*tlv) + tlv_len; 1830 } 1831 } 1832 1833 static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) 1834 { 1835 struct wmi_comb_phyerr_rx_event *comb_event; 1836 struct wmi_single_phyerr_rx_event *event; 1837 u32 count, i, buf_len, phy_err_code; 1838 u64 tsf; 1839 int left_len = skb->len; 1840 1841 ATH10K_DFS_STAT_INC(ar, phy_errors); 1842 1843 /* Check if combined event available */ 1844 if (left_len < sizeof(*comb_event)) { 1845 ath10k_warn("wmi phyerr combined event wrong len\n"); 1846 return; 1847 } 1848 1849 left_len -= sizeof(*comb_event); 1850 1851 /* Check number of included events */ 1852 comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data; 1853 count = __le32_to_cpu(comb_event->hdr.num_phyerr_events); 1854 1855 tsf = __le32_to_cpu(comb_event->hdr.tsf_u32); 1856 tsf <<= 32; 1857 tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); 1858 1859 ath10k_dbg(ATH10K_DBG_WMI, 1860 "wmi event phyerr count %d tsf64 0x%llX\n", 1861 count, tsf); 1862 1863 event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp; 1864 for (i = 0; i < count; i++) { 1865 /* Check if we can read event header */ 1866 if (left_len < sizeof(*event)) { 1867 ath10k_warn("single event (%d) wrong head len\n", i); 1868 return; 1869 } 1870 1871 left_len -= sizeof(*event); 1872 1873 buf_len = __le32_to_cpu(event->hdr.buf_len); 1874 phy_err_code = event->hdr.phy_err_code; 1875 1876 if (left_len < buf_len) { 1877 ath10k_warn("single event (%d) wrong buf len\n", i); 1878 return; 1879 } 1880 1881 left_len -= buf_len; 1882 1883 switch (phy_err_code) { 1884 case PHY_ERROR_RADAR: 1885 ath10k_wmi_event_dfs(ar, event, tsf); 1886 break; 1887 case PHY_ERROR_SPECTRAL_SCAN: 1888 ath10k_wmi_event_spectral_scan(ar, event, tsf); 1889 break; 1890 case PHY_ERROR_FALSE_RADAR_EXT: 1891 ath10k_wmi_event_dfs(ar, event, tsf); 1892 ath10k_wmi_event_spectral_scan(ar, event, tsf); 1893 break; 1894 default: 1895 break; 1896 } 1897 1898 event += sizeof(*event) + buf_len; 1899 } 1900 } 1901 1902 static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) 1903 { 1904 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); 1905 } 1906 1907 static void ath10k_wmi_event_profile_match(struct ath10k *ar, 1908 struct sk_buff *skb) 1909 { 1910 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); 1911 } 1912 1913 static void ath10k_wmi_event_debug_print(struct ath10k *ar, 1914 struct sk_buff *skb) 1915 { 1916 char buf[101], c; 1917 int i; 1918 1919 for (i = 0; i < sizeof(buf) - 1; i++) { 1920 if (i >= skb->len) 1921 break; 1922 1923 c = skb->data[i]; 1924 1925 if (c == '\0') 1926 break; 1927 1928 if (isascii(c) && isprint(c)) 1929 buf[i] = c; 1930 else 1931 buf[i] = '.'; 1932 } 1933 1934 if (i == sizeof(buf) - 1) 1935 ath10k_warn("wmi debug print truncated: %d\n", skb->len); 1936 1937 /* for some reason the debug prints end with \n, remove that */ 1938 if (skb->data[i - 1] == '\n') 1939 i--; 1940 1941 /* the last byte is always reserved for the null character */ 1942 buf[i] = '\0'; 1943 1944 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); 1945 } 1946 1947 static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) 1948 { 1949 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); 1950 } 1951 1952 static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, 1953 struct sk_buff *skb) 1954 { 1955 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); 1956 } 1957 1958 static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, 1959 struct sk_buff *skb) 1960 { 1961 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); 1962 } 1963 1964 static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, 1965 struct sk_buff *skb) 1966 { 1967 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); 1968 } 1969 1970 static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, 1971 struct sk_buff *skb) 1972 { 1973 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); 1974 } 1975 1976 static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, 1977 struct sk_buff *skb) 1978 { 1979 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); 1980 } 1981 1982 static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, 1983 struct sk_buff *skb) 1984 { 1985 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); 1986 } 1987 1988 static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, 1989 struct sk_buff *skb) 1990 { 1991 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); 1992 } 1993 1994 static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, 1995 struct sk_buff *skb) 1996 { 1997 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); 1998 } 1999 2000 static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, 2001 struct sk_buff *skb) 2002 { 2003 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); 2004 } 2005 2006 static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, 2007 struct sk_buff *skb) 2008 { 2009 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); 2010 } 2011 2012 static void ath10k_wmi_event_delba_complete(struct ath10k *ar, 2013 struct sk_buff *skb) 2014 { 2015 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); 2016 } 2017 2018 static void ath10k_wmi_event_addba_complete(struct ath10k *ar, 2019 struct sk_buff *skb) 2020 { 2021 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); 2022 } 2023 2024 static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, 2025 struct sk_buff *skb) 2026 { 2027 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 2028 } 2029 2030 static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, 2031 struct sk_buff *skb) 2032 { 2033 ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); 2034 } 2035 2036 static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, 2037 struct sk_buff *skb) 2038 { 2039 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); 2040 } 2041 2042 static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, 2043 struct sk_buff *skb) 2044 { 2045 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); 2046 } 2047 2048 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, 2049 u32 num_units, u32 unit_len) 2050 { 2051 dma_addr_t paddr; 2052 u32 pool_size; 2053 int idx = ar->wmi.num_mem_chunks; 2054 2055 pool_size = num_units * round_up(unit_len, 4); 2056 2057 if (!pool_size) 2058 return -EINVAL; 2059 2060 ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, 2061 pool_size, 2062 &paddr, 2063 GFP_ATOMIC); 2064 if (!ar->wmi.mem_chunks[idx].vaddr) { 2065 ath10k_warn("failed to allocate memory chunk\n"); 2066 return -ENOMEM; 2067 } 2068 2069 memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); 2070 2071 ar->wmi.mem_chunks[idx].paddr = paddr; 2072 ar->wmi.mem_chunks[idx].len = pool_size; 2073 ar->wmi.mem_chunks[idx].req_id = req_id; 2074 ar->wmi.num_mem_chunks++; 2075 2076 return 0; 2077 } 2078 2079 static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, 2080 struct sk_buff *skb) 2081 { 2082 struct wmi_service_ready_event *ev = (void *)skb->data; 2083 2084 if (skb->len < sizeof(*ev)) { 2085 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 2086 skb->len, sizeof(*ev)); 2087 return; 2088 } 2089 2090 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); 2091 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); 2092 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); 2093 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); 2094 ar->fw_version_major = 2095 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; 2096 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); 2097 ar->fw_version_release = 2098 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; 2099 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); 2100 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 2101 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 2102 2103 /* only manually set fw features when not using FW IE format */ 2104 if (ar->fw_api == 1 && ar->fw_version_build > 636) 2105 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); 2106 2107 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 2108 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 2109 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 2110 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 2111 } 2112 2113 ar->ath_common.regulatory.current_rd = 2114 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 2115 2116 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, 2117 sizeof(ev->wmi_service_bitmap)); 2118 2119 if (strlen(ar->hw->wiphy->fw_version) == 0) { 2120 snprintf(ar->hw->wiphy->fw_version, 2121 sizeof(ar->hw->wiphy->fw_version), 2122 "%u.%u.%u.%u", 2123 ar->fw_version_major, 2124 ar->fw_version_minor, 2125 ar->fw_version_release, 2126 ar->fw_version_build); 2127 } 2128 2129 /* FIXME: it probably should be better to support this */ 2130 if (__le32_to_cpu(ev->num_mem_reqs) > 0) { 2131 ath10k_warn("target requested %d memory chunks; ignoring\n", 2132 __le32_to_cpu(ev->num_mem_reqs)); 2133 } 2134 2135 ath10k_dbg(ATH10K_DBG_WMI, 2136 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 2137 __le32_to_cpu(ev->sw_version), 2138 __le32_to_cpu(ev->sw_version_1), 2139 __le32_to_cpu(ev->abi_version), 2140 __le32_to_cpu(ev->phy_capability), 2141 __le32_to_cpu(ev->ht_cap_info), 2142 __le32_to_cpu(ev->vht_cap_info), 2143 __le32_to_cpu(ev->vht_supp_mcs), 2144 __le32_to_cpu(ev->sys_cap_info), 2145 __le32_to_cpu(ev->num_mem_reqs), 2146 __le32_to_cpu(ev->num_rf_chains)); 2147 2148 complete(&ar->wmi.service_ready); 2149 } 2150 2151 static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, 2152 struct sk_buff *skb) 2153 { 2154 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; 2155 int ret; 2156 struct wmi_service_ready_event_10x *ev = (void *)skb->data; 2157 2158 if (skb->len < sizeof(*ev)) { 2159 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 2160 skb->len, sizeof(*ev)); 2161 return; 2162 } 2163 2164 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); 2165 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); 2166 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); 2167 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); 2168 ar->fw_version_major = 2169 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; 2170 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); 2171 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 2172 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 2173 2174 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 2175 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 2176 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 2177 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 2178 } 2179 2180 ar->ath_common.regulatory.current_rd = 2181 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 2182 2183 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, 2184 sizeof(ev->wmi_service_bitmap)); 2185 2186 if (strlen(ar->hw->wiphy->fw_version) == 0) { 2187 snprintf(ar->hw->wiphy->fw_version, 2188 sizeof(ar->hw->wiphy->fw_version), 2189 "%u.%u", 2190 ar->fw_version_major, 2191 ar->fw_version_minor); 2192 } 2193 2194 num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); 2195 2196 if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { 2197 ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", 2198 num_mem_reqs); 2199 return; 2200 } 2201 2202 if (!num_mem_reqs) 2203 goto exit; 2204 2205 ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", 2206 num_mem_reqs); 2207 2208 for (i = 0; i < num_mem_reqs; ++i) { 2209 req_id = __le32_to_cpu(ev->mem_reqs[i].req_id); 2210 num_units = __le32_to_cpu(ev->mem_reqs[i].num_units); 2211 unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size); 2212 num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info); 2213 2214 if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) 2215 /* number of units to allocate is number of 2216 * peers, 1 extra for self peer on target */ 2217 /* this needs to be tied, host and target 2218 * can get out of sync */ 2219 num_units = TARGET_10X_NUM_PEERS + 1; 2220 else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) 2221 num_units = TARGET_10X_NUM_VDEVS + 1; 2222 2223 ath10k_dbg(ATH10K_DBG_WMI, 2224 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", 2225 req_id, 2226 __le32_to_cpu(ev->mem_reqs[i].num_units), 2227 num_unit_info, 2228 unit_size, 2229 num_units); 2230 2231 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units, 2232 unit_size); 2233 if (ret) 2234 return; 2235 } 2236 2237 exit: 2238 ath10k_dbg(ATH10K_DBG_WMI, 2239 "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 2240 __le32_to_cpu(ev->sw_version), 2241 __le32_to_cpu(ev->abi_version), 2242 __le32_to_cpu(ev->phy_capability), 2243 __le32_to_cpu(ev->ht_cap_info), 2244 __le32_to_cpu(ev->vht_cap_info), 2245 __le32_to_cpu(ev->vht_supp_mcs), 2246 __le32_to_cpu(ev->sys_cap_info), 2247 __le32_to_cpu(ev->num_mem_reqs), 2248 __le32_to_cpu(ev->num_rf_chains)); 2249 2250 complete(&ar->wmi.service_ready); 2251 } 2252 2253 static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) 2254 { 2255 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; 2256 2257 if (WARN_ON(skb->len < sizeof(*ev))) 2258 return -EINVAL; 2259 2260 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); 2261 2262 ath10k_dbg(ATH10K_DBG_WMI, 2263 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n", 2264 __le32_to_cpu(ev->sw_version), 2265 __le32_to_cpu(ev->abi_version), 2266 ev->mac_addr.addr, 2267 __le32_to_cpu(ev->status), skb->len, sizeof(*ev)); 2268 2269 complete(&ar->wmi.unified_ready); 2270 return 0; 2271 } 2272 2273 static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) 2274 { 2275 struct wmi_cmd_hdr *cmd_hdr; 2276 enum wmi_event_id id; 2277 2278 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 2279 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 2280 2281 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 2282 return; 2283 2284 trace_ath10k_wmi_event(id, skb->data, skb->len); 2285 2286 switch (id) { 2287 case WMI_MGMT_RX_EVENTID: 2288 ath10k_wmi_event_mgmt_rx(ar, skb); 2289 /* mgmt_rx() owns the skb now! */ 2290 return; 2291 case WMI_SCAN_EVENTID: 2292 ath10k_wmi_event_scan(ar, skb); 2293 break; 2294 case WMI_CHAN_INFO_EVENTID: 2295 ath10k_wmi_event_chan_info(ar, skb); 2296 break; 2297 case WMI_ECHO_EVENTID: 2298 ath10k_wmi_event_echo(ar, skb); 2299 break; 2300 case WMI_DEBUG_MESG_EVENTID: 2301 ath10k_wmi_event_debug_mesg(ar, skb); 2302 break; 2303 case WMI_UPDATE_STATS_EVENTID: 2304 ath10k_wmi_event_update_stats(ar, skb); 2305 break; 2306 case WMI_VDEV_START_RESP_EVENTID: 2307 ath10k_wmi_event_vdev_start_resp(ar, skb); 2308 break; 2309 case WMI_VDEV_STOPPED_EVENTID: 2310 ath10k_wmi_event_vdev_stopped(ar, skb); 2311 break; 2312 case WMI_PEER_STA_KICKOUT_EVENTID: 2313 ath10k_wmi_event_peer_sta_kickout(ar, skb); 2314 break; 2315 case WMI_HOST_SWBA_EVENTID: 2316 ath10k_wmi_event_host_swba(ar, skb); 2317 break; 2318 case WMI_TBTTOFFSET_UPDATE_EVENTID: 2319 ath10k_wmi_event_tbttoffset_update(ar, skb); 2320 break; 2321 case WMI_PHYERR_EVENTID: 2322 ath10k_wmi_event_phyerr(ar, skb); 2323 break; 2324 case WMI_ROAM_EVENTID: 2325 ath10k_wmi_event_roam(ar, skb); 2326 break; 2327 case WMI_PROFILE_MATCH: 2328 ath10k_wmi_event_profile_match(ar, skb); 2329 break; 2330 case WMI_DEBUG_PRINT_EVENTID: 2331 ath10k_wmi_event_debug_print(ar, skb); 2332 break; 2333 case WMI_PDEV_QVIT_EVENTID: 2334 ath10k_wmi_event_pdev_qvit(ar, skb); 2335 break; 2336 case WMI_WLAN_PROFILE_DATA_EVENTID: 2337 ath10k_wmi_event_wlan_profile_data(ar, skb); 2338 break; 2339 case WMI_RTT_MEASUREMENT_REPORT_EVENTID: 2340 ath10k_wmi_event_rtt_measurement_report(ar, skb); 2341 break; 2342 case WMI_TSF_MEASUREMENT_REPORT_EVENTID: 2343 ath10k_wmi_event_tsf_measurement_report(ar, skb); 2344 break; 2345 case WMI_RTT_ERROR_REPORT_EVENTID: 2346 ath10k_wmi_event_rtt_error_report(ar, skb); 2347 break; 2348 case WMI_WOW_WAKEUP_HOST_EVENTID: 2349 ath10k_wmi_event_wow_wakeup_host(ar, skb); 2350 break; 2351 case WMI_DCS_INTERFERENCE_EVENTID: 2352 ath10k_wmi_event_dcs_interference(ar, skb); 2353 break; 2354 case WMI_PDEV_TPC_CONFIG_EVENTID: 2355 ath10k_wmi_event_pdev_tpc_config(ar, skb); 2356 break; 2357 case WMI_PDEV_FTM_INTG_EVENTID: 2358 ath10k_wmi_event_pdev_ftm_intg(ar, skb); 2359 break; 2360 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 2361 ath10k_wmi_event_gtk_offload_status(ar, skb); 2362 break; 2363 case WMI_GTK_REKEY_FAIL_EVENTID: 2364 ath10k_wmi_event_gtk_rekey_fail(ar, skb); 2365 break; 2366 case WMI_TX_DELBA_COMPLETE_EVENTID: 2367 ath10k_wmi_event_delba_complete(ar, skb); 2368 break; 2369 case WMI_TX_ADDBA_COMPLETE_EVENTID: 2370 ath10k_wmi_event_addba_complete(ar, skb); 2371 break; 2372 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 2373 ath10k_wmi_event_vdev_install_key_complete(ar, skb); 2374 break; 2375 case WMI_SERVICE_READY_EVENTID: 2376 ath10k_wmi_service_ready_event_rx(ar, skb); 2377 break; 2378 case WMI_READY_EVENTID: 2379 ath10k_wmi_ready_event_rx(ar, skb); 2380 break; 2381 default: 2382 ath10k_warn("Unknown eventid: %d\n", id); 2383 break; 2384 } 2385 2386 dev_kfree_skb(skb); 2387 } 2388 2389 static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) 2390 { 2391 struct wmi_cmd_hdr *cmd_hdr; 2392 enum wmi_10x_event_id id; 2393 2394 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 2395 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 2396 2397 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 2398 return; 2399 2400 trace_ath10k_wmi_event(id, skb->data, skb->len); 2401 2402 switch (id) { 2403 case WMI_10X_MGMT_RX_EVENTID: 2404 ath10k_wmi_event_mgmt_rx(ar, skb); 2405 /* mgmt_rx() owns the skb now! */ 2406 return; 2407 case WMI_10X_SCAN_EVENTID: 2408 ath10k_wmi_event_scan(ar, skb); 2409 break; 2410 case WMI_10X_CHAN_INFO_EVENTID: 2411 ath10k_wmi_event_chan_info(ar, skb); 2412 break; 2413 case WMI_10X_ECHO_EVENTID: 2414 ath10k_wmi_event_echo(ar, skb); 2415 break; 2416 case WMI_10X_DEBUG_MESG_EVENTID: 2417 ath10k_wmi_event_debug_mesg(ar, skb); 2418 break; 2419 case WMI_10X_UPDATE_STATS_EVENTID: 2420 ath10k_wmi_event_update_stats(ar, skb); 2421 break; 2422 case WMI_10X_VDEV_START_RESP_EVENTID: 2423 ath10k_wmi_event_vdev_start_resp(ar, skb); 2424 break; 2425 case WMI_10X_VDEV_STOPPED_EVENTID: 2426 ath10k_wmi_event_vdev_stopped(ar, skb); 2427 break; 2428 case WMI_10X_PEER_STA_KICKOUT_EVENTID: 2429 ath10k_wmi_event_peer_sta_kickout(ar, skb); 2430 break; 2431 case WMI_10X_HOST_SWBA_EVENTID: 2432 ath10k_wmi_event_host_swba(ar, skb); 2433 break; 2434 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID: 2435 ath10k_wmi_event_tbttoffset_update(ar, skb); 2436 break; 2437 case WMI_10X_PHYERR_EVENTID: 2438 ath10k_wmi_event_phyerr(ar, skb); 2439 break; 2440 case WMI_10X_ROAM_EVENTID: 2441 ath10k_wmi_event_roam(ar, skb); 2442 break; 2443 case WMI_10X_PROFILE_MATCH: 2444 ath10k_wmi_event_profile_match(ar, skb); 2445 break; 2446 case WMI_10X_DEBUG_PRINT_EVENTID: 2447 ath10k_wmi_event_debug_print(ar, skb); 2448 break; 2449 case WMI_10X_PDEV_QVIT_EVENTID: 2450 ath10k_wmi_event_pdev_qvit(ar, skb); 2451 break; 2452 case WMI_10X_WLAN_PROFILE_DATA_EVENTID: 2453 ath10k_wmi_event_wlan_profile_data(ar, skb); 2454 break; 2455 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID: 2456 ath10k_wmi_event_rtt_measurement_report(ar, skb); 2457 break; 2458 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID: 2459 ath10k_wmi_event_tsf_measurement_report(ar, skb); 2460 break; 2461 case WMI_10X_RTT_ERROR_REPORT_EVENTID: 2462 ath10k_wmi_event_rtt_error_report(ar, skb); 2463 break; 2464 case WMI_10X_WOW_WAKEUP_HOST_EVENTID: 2465 ath10k_wmi_event_wow_wakeup_host(ar, skb); 2466 break; 2467 case WMI_10X_DCS_INTERFERENCE_EVENTID: 2468 ath10k_wmi_event_dcs_interference(ar, skb); 2469 break; 2470 case WMI_10X_PDEV_TPC_CONFIG_EVENTID: 2471 ath10k_wmi_event_pdev_tpc_config(ar, skb); 2472 break; 2473 case WMI_10X_INST_RSSI_STATS_EVENTID: 2474 ath10k_wmi_event_inst_rssi_stats(ar, skb); 2475 break; 2476 case WMI_10X_VDEV_STANDBY_REQ_EVENTID: 2477 ath10k_wmi_event_vdev_standby_req(ar, skb); 2478 break; 2479 case WMI_10X_VDEV_RESUME_REQ_EVENTID: 2480 ath10k_wmi_event_vdev_resume_req(ar, skb); 2481 break; 2482 case WMI_10X_SERVICE_READY_EVENTID: 2483 ath10k_wmi_10x_service_ready_event_rx(ar, skb); 2484 break; 2485 case WMI_10X_READY_EVENTID: 2486 ath10k_wmi_ready_event_rx(ar, skb); 2487 break; 2488 default: 2489 ath10k_warn("Unknown eventid: %d\n", id); 2490 break; 2491 } 2492 2493 dev_kfree_skb(skb); 2494 } 2495 2496 static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb) 2497 { 2498 struct wmi_cmd_hdr *cmd_hdr; 2499 enum wmi_10_2_event_id id; 2500 2501 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 2502 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 2503 2504 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 2505 return; 2506 2507 trace_ath10k_wmi_event(id, skb->data, skb->len); 2508 2509 switch (id) { 2510 case WMI_10_2_MGMT_RX_EVENTID: 2511 ath10k_wmi_event_mgmt_rx(ar, skb); 2512 /* mgmt_rx() owns the skb now! */ 2513 return; 2514 case WMI_10_2_SCAN_EVENTID: 2515 ath10k_wmi_event_scan(ar, skb); 2516 break; 2517 case WMI_10_2_CHAN_INFO_EVENTID: 2518 ath10k_wmi_event_chan_info(ar, skb); 2519 break; 2520 case WMI_10_2_ECHO_EVENTID: 2521 ath10k_wmi_event_echo(ar, skb); 2522 break; 2523 case WMI_10_2_DEBUG_MESG_EVENTID: 2524 ath10k_wmi_event_debug_mesg(ar, skb); 2525 break; 2526 case WMI_10_2_UPDATE_STATS_EVENTID: 2527 ath10k_wmi_event_update_stats(ar, skb); 2528 break; 2529 case WMI_10_2_VDEV_START_RESP_EVENTID: 2530 ath10k_wmi_event_vdev_start_resp(ar, skb); 2531 break; 2532 case WMI_10_2_VDEV_STOPPED_EVENTID: 2533 ath10k_wmi_event_vdev_stopped(ar, skb); 2534 break; 2535 case WMI_10_2_PEER_STA_KICKOUT_EVENTID: 2536 ath10k_wmi_event_peer_sta_kickout(ar, skb); 2537 break; 2538 case WMI_10_2_HOST_SWBA_EVENTID: 2539 ath10k_wmi_event_host_swba(ar, skb); 2540 break; 2541 case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID: 2542 ath10k_wmi_event_tbttoffset_update(ar, skb); 2543 break; 2544 case WMI_10_2_PHYERR_EVENTID: 2545 ath10k_wmi_event_phyerr(ar, skb); 2546 break; 2547 case WMI_10_2_ROAM_EVENTID: 2548 ath10k_wmi_event_roam(ar, skb); 2549 break; 2550 case WMI_10_2_PROFILE_MATCH: 2551 ath10k_wmi_event_profile_match(ar, skb); 2552 break; 2553 case WMI_10_2_DEBUG_PRINT_EVENTID: 2554 ath10k_wmi_event_debug_print(ar, skb); 2555 break; 2556 case WMI_10_2_PDEV_QVIT_EVENTID: 2557 ath10k_wmi_event_pdev_qvit(ar, skb); 2558 break; 2559 case WMI_10_2_WLAN_PROFILE_DATA_EVENTID: 2560 ath10k_wmi_event_wlan_profile_data(ar, skb); 2561 break; 2562 case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID: 2563 ath10k_wmi_event_rtt_measurement_report(ar, skb); 2564 break; 2565 case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID: 2566 ath10k_wmi_event_tsf_measurement_report(ar, skb); 2567 break; 2568 case WMI_10_2_RTT_ERROR_REPORT_EVENTID: 2569 ath10k_wmi_event_rtt_error_report(ar, skb); 2570 break; 2571 case WMI_10_2_WOW_WAKEUP_HOST_EVENTID: 2572 ath10k_wmi_event_wow_wakeup_host(ar, skb); 2573 break; 2574 case WMI_10_2_DCS_INTERFERENCE_EVENTID: 2575 ath10k_wmi_event_dcs_interference(ar, skb); 2576 break; 2577 case WMI_10_2_PDEV_TPC_CONFIG_EVENTID: 2578 ath10k_wmi_event_pdev_tpc_config(ar, skb); 2579 break; 2580 case WMI_10_2_INST_RSSI_STATS_EVENTID: 2581 ath10k_wmi_event_inst_rssi_stats(ar, skb); 2582 break; 2583 case WMI_10_2_VDEV_STANDBY_REQ_EVENTID: 2584 ath10k_wmi_event_vdev_standby_req(ar, skb); 2585 break; 2586 case WMI_10_2_VDEV_RESUME_REQ_EVENTID: 2587 ath10k_wmi_event_vdev_resume_req(ar, skb); 2588 break; 2589 case WMI_10_2_SERVICE_READY_EVENTID: 2590 ath10k_wmi_10x_service_ready_event_rx(ar, skb); 2591 break; 2592 case WMI_10_2_READY_EVENTID: 2593 ath10k_wmi_ready_event_rx(ar, skb); 2594 break; 2595 case WMI_10_2_RTT_KEEPALIVE_EVENTID: 2596 case WMI_10_2_GPIO_INPUT_EVENTID: 2597 case WMI_10_2_PEER_RATECODE_LIST_EVENTID: 2598 case WMI_10_2_GENERIC_BUFFER_EVENTID: 2599 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID: 2600 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID: 2601 case WMI_10_2_WDS_PEER_EVENTID: 2602 ath10k_dbg(ATH10K_DBG_WMI, 2603 "received event id %d not implemented\n", id); 2604 break; 2605 default: 2606 ath10k_warn("Unknown eventid: %d\n", id); 2607 break; 2608 } 2609 2610 dev_kfree_skb(skb); 2611 } 2612 2613 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) 2614 { 2615 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 2616 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 2617 ath10k_wmi_10_2_process_rx(ar, skb); 2618 else 2619 ath10k_wmi_10x_process_rx(ar, skb); 2620 } else { 2621 ath10k_wmi_main_process_rx(ar, skb); 2622 } 2623 } 2624 2625 /* WMI Initialization functions */ 2626 int ath10k_wmi_attach(struct ath10k *ar) 2627 { 2628 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 2629 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 2630 ar->wmi.cmd = &wmi_10_2_cmd_map; 2631 else 2632 ar->wmi.cmd = &wmi_10x_cmd_map; 2633 2634 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; 2635 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; 2636 } else { 2637 ar->wmi.cmd = &wmi_cmd_map; 2638 ar->wmi.vdev_param = &wmi_vdev_param_map; 2639 ar->wmi.pdev_param = &wmi_pdev_param_map; 2640 } 2641 2642 init_completion(&ar->wmi.service_ready); 2643 init_completion(&ar->wmi.unified_ready); 2644 init_waitqueue_head(&ar->wmi.tx_credits_wq); 2645 2646 return 0; 2647 } 2648 2649 void ath10k_wmi_detach(struct ath10k *ar) 2650 { 2651 int i; 2652 2653 /* free the host memory chunks requested by firmware */ 2654 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 2655 dma_free_coherent(ar->dev, 2656 ar->wmi.mem_chunks[i].len, 2657 ar->wmi.mem_chunks[i].vaddr, 2658 ar->wmi.mem_chunks[i].paddr); 2659 } 2660 2661 ar->wmi.num_mem_chunks = 0; 2662 } 2663 2664 int ath10k_wmi_connect(struct ath10k *ar) 2665 { 2666 int status; 2667 struct ath10k_htc_svc_conn_req conn_req; 2668 struct ath10k_htc_svc_conn_resp conn_resp; 2669 2670 memset(&conn_req, 0, sizeof(conn_req)); 2671 memset(&conn_resp, 0, sizeof(conn_resp)); 2672 2673 /* these fields are the same for all service endpoints */ 2674 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; 2675 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; 2676 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits; 2677 2678 /* connect to control service */ 2679 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; 2680 2681 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); 2682 if (status) { 2683 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", 2684 status); 2685 return status; 2686 } 2687 2688 ar->wmi.eid = conn_resp.eid; 2689 return 0; 2690 } 2691 2692 static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd, 2693 u16 rd2g, u16 rd5g, u16 ctl2g, 2694 u16 ctl5g) 2695 { 2696 struct wmi_pdev_set_regdomain_cmd *cmd; 2697 struct sk_buff *skb; 2698 2699 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2700 if (!skb) 2701 return -ENOMEM; 2702 2703 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 2704 cmd->reg_domain = __cpu_to_le32(rd); 2705 cmd->reg_domain_2G = __cpu_to_le32(rd2g); 2706 cmd->reg_domain_5G = __cpu_to_le32(rd5g); 2707 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 2708 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 2709 2710 ath10k_dbg(ATH10K_DBG_WMI, 2711 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 2712 rd, rd2g, rd5g, ctl2g, ctl5g); 2713 2714 return ath10k_wmi_cmd_send(ar, skb, 2715 ar->wmi.cmd->pdev_set_regdomain_cmdid); 2716 } 2717 2718 static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd, 2719 u16 rd2g, u16 rd5g, 2720 u16 ctl2g, u16 ctl5g, 2721 enum wmi_dfs_region dfs_reg) 2722 { 2723 struct wmi_pdev_set_regdomain_cmd_10x *cmd; 2724 struct sk_buff *skb; 2725 2726 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2727 if (!skb) 2728 return -ENOMEM; 2729 2730 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data; 2731 cmd->reg_domain = __cpu_to_le32(rd); 2732 cmd->reg_domain_2G = __cpu_to_le32(rd2g); 2733 cmd->reg_domain_5G = __cpu_to_le32(rd5g); 2734 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 2735 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 2736 cmd->dfs_domain = __cpu_to_le32(dfs_reg); 2737 2738 ath10k_dbg(ATH10K_DBG_WMI, 2739 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", 2740 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); 2741 2742 return ath10k_wmi_cmd_send(ar, skb, 2743 ar->wmi.cmd->pdev_set_regdomain_cmdid); 2744 } 2745 2746 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 2747 u16 rd5g, u16 ctl2g, u16 ctl5g, 2748 enum wmi_dfs_region dfs_reg) 2749 { 2750 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 2751 return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g, 2752 ctl2g, ctl5g, dfs_reg); 2753 else 2754 return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g, 2755 ctl2g, ctl5g); 2756 } 2757 2758 int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 2759 const struct wmi_channel_arg *arg) 2760 { 2761 struct wmi_set_channel_cmd *cmd; 2762 struct sk_buff *skb; 2763 u32 ch_flags = 0; 2764 2765 if (arg->passive) 2766 return -EINVAL; 2767 2768 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2769 if (!skb) 2770 return -ENOMEM; 2771 2772 if (arg->chan_radar) 2773 ch_flags |= WMI_CHAN_FLAG_DFS; 2774 2775 cmd = (struct wmi_set_channel_cmd *)skb->data; 2776 cmd->chan.mhz = __cpu_to_le32(arg->freq); 2777 cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); 2778 cmd->chan.mode = arg->mode; 2779 cmd->chan.flags |= __cpu_to_le32(ch_flags); 2780 cmd->chan.min_power = arg->min_power; 2781 cmd->chan.max_power = arg->max_power; 2782 cmd->chan.reg_power = arg->max_reg_power; 2783 cmd->chan.reg_classid = arg->reg_class_id; 2784 cmd->chan.antenna_max = arg->max_antenna_gain; 2785 2786 ath10k_dbg(ATH10K_DBG_WMI, 2787 "wmi set channel mode %d freq %d\n", 2788 arg->mode, arg->freq); 2789 2790 return ath10k_wmi_cmd_send(ar, skb, 2791 ar->wmi.cmd->pdev_set_channel_cmdid); 2792 } 2793 2794 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 2795 { 2796 struct wmi_pdev_suspend_cmd *cmd; 2797 struct sk_buff *skb; 2798 2799 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2800 if (!skb) 2801 return -ENOMEM; 2802 2803 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 2804 cmd->suspend_opt = __cpu_to_le32(suspend_opt); 2805 2806 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 2807 } 2808 2809 int ath10k_wmi_pdev_resume_target(struct ath10k *ar) 2810 { 2811 struct sk_buff *skb; 2812 2813 skb = ath10k_wmi_alloc_skb(0); 2814 if (skb == NULL) 2815 return -ENOMEM; 2816 2817 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 2818 } 2819 2820 int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 2821 { 2822 struct wmi_pdev_set_param_cmd *cmd; 2823 struct sk_buff *skb; 2824 2825 if (id == WMI_PDEV_PARAM_UNSUPPORTED) { 2826 ath10k_warn("pdev param %d not supported by firmware\n", id); 2827 return -EOPNOTSUPP; 2828 } 2829 2830 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2831 if (!skb) 2832 return -ENOMEM; 2833 2834 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 2835 cmd->param_id = __cpu_to_le32(id); 2836 cmd->param_value = __cpu_to_le32(value); 2837 2838 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 2839 id, value); 2840 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 2841 } 2842 2843 static int ath10k_wmi_main_cmd_init(struct ath10k *ar) 2844 { 2845 struct wmi_init_cmd *cmd; 2846 struct sk_buff *buf; 2847 struct wmi_resource_config config = {}; 2848 u32 len, val; 2849 int i; 2850 2851 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); 2852 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); 2853 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); 2854 2855 config.num_offload_reorder_bufs = 2856 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS); 2857 2858 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS); 2859 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS); 2860 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT); 2861 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK); 2862 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK); 2863 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 2864 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 2865 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 2866 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); 2867 config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); 2868 2869 config.scan_max_pending_reqs = 2870 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); 2871 2872 config.bmiss_offload_max_vdev = 2873 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV); 2874 2875 config.roam_offload_max_vdev = 2876 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV); 2877 2878 config.roam_offload_max_ap_profiles = 2879 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES); 2880 2881 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS); 2882 config.num_mcast_table_elems = 2883 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS); 2884 2885 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE); 2886 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE); 2887 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES); 2888 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE); 2889 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM); 2890 2891 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 2892 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 2893 2894 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG); 2895 2896 config.gtk_offload_max_vdev = 2897 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV); 2898 2899 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); 2900 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); 2901 2902 len = sizeof(*cmd) + 2903 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 2904 2905 buf = ath10k_wmi_alloc_skb(len); 2906 if (!buf) 2907 return -ENOMEM; 2908 2909 cmd = (struct wmi_init_cmd *)buf->data; 2910 2911 if (ar->wmi.num_mem_chunks == 0) { 2912 cmd->num_host_mem_chunks = 0; 2913 goto out; 2914 } 2915 2916 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 2917 ar->wmi.num_mem_chunks); 2918 2919 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 2920 2921 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 2922 cmd->host_mem_chunks[i].ptr = 2923 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 2924 cmd->host_mem_chunks[i].size = 2925 __cpu_to_le32(ar->wmi.mem_chunks[i].len); 2926 cmd->host_mem_chunks[i].req_id = 2927 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 2928 2929 ath10k_dbg(ATH10K_DBG_WMI, 2930 "wmi chunk %d len %d requested, addr 0x%llx\n", 2931 i, 2932 ar->wmi.mem_chunks[i].len, 2933 (unsigned long long)ar->wmi.mem_chunks[i].paddr); 2934 } 2935 out: 2936 memcpy(&cmd->resource_config, &config, sizeof(config)); 2937 2938 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); 2939 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 2940 } 2941 2942 static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) 2943 { 2944 struct wmi_init_cmd_10x *cmd; 2945 struct sk_buff *buf; 2946 struct wmi_resource_config_10x config = {}; 2947 u32 len, val; 2948 int i; 2949 2950 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 2951 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 2952 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); 2953 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); 2954 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); 2955 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); 2956 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); 2957 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 2958 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 2959 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 2960 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); 2961 config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); 2962 2963 config.scan_max_pending_reqs = 2964 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); 2965 2966 config.bmiss_offload_max_vdev = 2967 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); 2968 2969 config.roam_offload_max_vdev = 2970 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); 2971 2972 config.roam_offload_max_ap_profiles = 2973 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); 2974 2975 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); 2976 config.num_mcast_table_elems = 2977 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); 2978 2979 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); 2980 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); 2981 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); 2982 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); 2983 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); 2984 2985 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 2986 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 2987 2988 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); 2989 2990 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); 2991 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); 2992 2993 len = sizeof(*cmd) + 2994 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 2995 2996 buf = ath10k_wmi_alloc_skb(len); 2997 if (!buf) 2998 return -ENOMEM; 2999 3000 cmd = (struct wmi_init_cmd_10x *)buf->data; 3001 3002 if (ar->wmi.num_mem_chunks == 0) { 3003 cmd->num_host_mem_chunks = 0; 3004 goto out; 3005 } 3006 3007 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 3008 ar->wmi.num_mem_chunks); 3009 3010 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 3011 3012 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 3013 cmd->host_mem_chunks[i].ptr = 3014 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 3015 cmd->host_mem_chunks[i].size = 3016 __cpu_to_le32(ar->wmi.mem_chunks[i].len); 3017 cmd->host_mem_chunks[i].req_id = 3018 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 3019 3020 ath10k_dbg(ATH10K_DBG_WMI, 3021 "wmi chunk %d len %d requested, addr 0x%llx\n", 3022 i, 3023 ar->wmi.mem_chunks[i].len, 3024 (unsigned long long)ar->wmi.mem_chunks[i].paddr); 3025 } 3026 out: 3027 memcpy(&cmd->resource_config, &config, sizeof(config)); 3028 3029 ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); 3030 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3031 } 3032 3033 static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) 3034 { 3035 struct wmi_init_cmd_10_2 *cmd; 3036 struct sk_buff *buf; 3037 struct wmi_resource_config_10x config = {}; 3038 u32 len, val; 3039 int i; 3040 3041 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 3042 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 3043 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); 3044 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); 3045 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); 3046 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); 3047 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); 3048 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 3049 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 3050 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 3051 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); 3052 config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); 3053 3054 config.scan_max_pending_reqs = 3055 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); 3056 3057 config.bmiss_offload_max_vdev = 3058 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); 3059 3060 config.roam_offload_max_vdev = 3061 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); 3062 3063 config.roam_offload_max_ap_profiles = 3064 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); 3065 3066 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); 3067 config.num_mcast_table_elems = 3068 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); 3069 3070 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); 3071 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); 3072 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); 3073 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); 3074 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); 3075 3076 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 3077 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 3078 3079 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); 3080 3081 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); 3082 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); 3083 3084 len = sizeof(*cmd) + 3085 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 3086 3087 buf = ath10k_wmi_alloc_skb(len); 3088 if (!buf) 3089 return -ENOMEM; 3090 3091 cmd = (struct wmi_init_cmd_10_2 *)buf->data; 3092 3093 if (ar->wmi.num_mem_chunks == 0) { 3094 cmd->num_host_mem_chunks = 0; 3095 goto out; 3096 } 3097 3098 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 3099 ar->wmi.num_mem_chunks); 3100 3101 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 3102 3103 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 3104 cmd->host_mem_chunks[i].ptr = 3105 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 3106 cmd->host_mem_chunks[i].size = 3107 __cpu_to_le32(ar->wmi.mem_chunks[i].len); 3108 cmd->host_mem_chunks[i].req_id = 3109 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 3110 3111 ath10k_dbg(ATH10K_DBG_WMI, 3112 "wmi chunk %d len %d requested, addr 0x%llx\n", 3113 i, 3114 ar->wmi.mem_chunks[i].len, 3115 (unsigned long long)ar->wmi.mem_chunks[i].paddr); 3116 } 3117 out: 3118 memcpy(&cmd->resource_config.common, &config, sizeof(config)); 3119 3120 ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10.2\n"); 3121 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3122 } 3123 3124 int ath10k_wmi_cmd_init(struct ath10k *ar) 3125 { 3126 int ret; 3127 3128 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 3129 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 3130 ret = ath10k_wmi_10_2_cmd_init(ar); 3131 else 3132 ret = ath10k_wmi_10x_cmd_init(ar); 3133 } else { 3134 ret = ath10k_wmi_main_cmd_init(ar); 3135 } 3136 3137 return ret; 3138 } 3139 3140 static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, 3141 const struct wmi_start_scan_arg *arg) 3142 { 3143 int len; 3144 3145 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 3146 len = sizeof(struct wmi_start_scan_cmd_10x); 3147 else 3148 len = sizeof(struct wmi_start_scan_cmd); 3149 3150 if (arg->ie_len) { 3151 if (!arg->ie) 3152 return -EINVAL; 3153 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) 3154 return -EINVAL; 3155 3156 len += sizeof(struct wmi_ie_data); 3157 len += roundup(arg->ie_len, 4); 3158 } 3159 3160 if (arg->n_channels) { 3161 if (!arg->channels) 3162 return -EINVAL; 3163 if (arg->n_channels > ARRAY_SIZE(arg->channels)) 3164 return -EINVAL; 3165 3166 len += sizeof(struct wmi_chan_list); 3167 len += sizeof(__le32) * arg->n_channels; 3168 } 3169 3170 if (arg->n_ssids) { 3171 if (!arg->ssids) 3172 return -EINVAL; 3173 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) 3174 return -EINVAL; 3175 3176 len += sizeof(struct wmi_ssid_list); 3177 len += sizeof(struct wmi_ssid) * arg->n_ssids; 3178 } 3179 3180 if (arg->n_bssids) { 3181 if (!arg->bssids) 3182 return -EINVAL; 3183 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) 3184 return -EINVAL; 3185 3186 len += sizeof(struct wmi_bssid_list); 3187 len += sizeof(struct wmi_mac_addr) * arg->n_bssids; 3188 } 3189 3190 return len; 3191 } 3192 3193 int ath10k_wmi_start_scan(struct ath10k *ar, 3194 const struct wmi_start_scan_arg *arg) 3195 { 3196 struct wmi_start_scan_cmd *cmd; 3197 struct sk_buff *skb; 3198 struct wmi_ie_data *ie; 3199 struct wmi_chan_list *channels; 3200 struct wmi_ssid_list *ssids; 3201 struct wmi_bssid_list *bssids; 3202 u32 scan_id; 3203 u32 scan_req_id; 3204 int off; 3205 int len = 0; 3206 int i; 3207 3208 len = ath10k_wmi_start_scan_calc_len(ar, arg); 3209 if (len < 0) 3210 return len; /* len contains error code here */ 3211 3212 skb = ath10k_wmi_alloc_skb(len); 3213 if (!skb) 3214 return -ENOMEM; 3215 3216 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; 3217 scan_id |= arg->scan_id; 3218 3219 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 3220 scan_req_id |= arg->scan_req_id; 3221 3222 cmd = (struct wmi_start_scan_cmd *)skb->data; 3223 cmd->scan_id = __cpu_to_le32(scan_id); 3224 cmd->scan_req_id = __cpu_to_le32(scan_req_id); 3225 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3226 cmd->scan_priority = __cpu_to_le32(arg->scan_priority); 3227 cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); 3228 cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); 3229 cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); 3230 cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); 3231 cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); 3232 cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); 3233 cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); 3234 cmd->idle_time = __cpu_to_le32(arg->idle_time); 3235 cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); 3236 cmd->probe_delay = __cpu_to_le32(arg->probe_delay); 3237 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); 3238 3239 /* TLV list starts after fields included in the struct */ 3240 /* There's just one filed that differes the two start_scan 3241 * structures - burst_duration, which we are not using btw, 3242 no point to make the split here, just shift the buffer to fit with 3243 given FW */ 3244 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 3245 off = sizeof(struct wmi_start_scan_cmd_10x); 3246 else 3247 off = sizeof(struct wmi_start_scan_cmd); 3248 3249 if (arg->n_channels) { 3250 channels = (void *)skb->data + off; 3251 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); 3252 channels->num_chan = __cpu_to_le32(arg->n_channels); 3253 3254 for (i = 0; i < arg->n_channels; i++) 3255 channels->channel_list[i].freq = 3256 __cpu_to_le16(arg->channels[i]); 3257 3258 off += sizeof(*channels); 3259 off += sizeof(__le32) * arg->n_channels; 3260 } 3261 3262 if (arg->n_ssids) { 3263 ssids = (void *)skb->data + off; 3264 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); 3265 ssids->num_ssids = __cpu_to_le32(arg->n_ssids); 3266 3267 for (i = 0; i < arg->n_ssids; i++) { 3268 ssids->ssids[i].ssid_len = 3269 __cpu_to_le32(arg->ssids[i].len); 3270 memcpy(&ssids->ssids[i].ssid, 3271 arg->ssids[i].ssid, 3272 arg->ssids[i].len); 3273 } 3274 3275 off += sizeof(*ssids); 3276 off += sizeof(struct wmi_ssid) * arg->n_ssids; 3277 } 3278 3279 if (arg->n_bssids) { 3280 bssids = (void *)skb->data + off; 3281 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); 3282 bssids->num_bssid = __cpu_to_le32(arg->n_bssids); 3283 3284 for (i = 0; i < arg->n_bssids; i++) 3285 memcpy(&bssids->bssid_list[i], 3286 arg->bssids[i].bssid, 3287 ETH_ALEN); 3288 3289 off += sizeof(*bssids); 3290 off += sizeof(struct wmi_mac_addr) * arg->n_bssids; 3291 } 3292 3293 if (arg->ie_len) { 3294 ie = (void *)skb->data + off; 3295 ie->tag = __cpu_to_le32(WMI_IE_TAG); 3296 ie->ie_len = __cpu_to_le32(arg->ie_len); 3297 memcpy(ie->ie_data, arg->ie, arg->ie_len); 3298 3299 off += sizeof(*ie); 3300 off += roundup(arg->ie_len, 4); 3301 } 3302 3303 if (off != skb->len) { 3304 dev_kfree_skb(skb); 3305 return -EINVAL; 3306 } 3307 3308 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); 3309 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 3310 } 3311 3312 void ath10k_wmi_start_scan_init(struct ath10k *ar, 3313 struct wmi_start_scan_arg *arg) 3314 { 3315 /* setup commonly used values */ 3316 arg->scan_req_id = 1; 3317 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 3318 arg->dwell_time_active = 50; 3319 arg->dwell_time_passive = 150; 3320 arg->min_rest_time = 50; 3321 arg->max_rest_time = 500; 3322 arg->repeat_probe_time = 0; 3323 arg->probe_spacing_time = 0; 3324 arg->idle_time = 0; 3325 arg->max_scan_time = 20000; 3326 arg->probe_delay = 5; 3327 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED 3328 | WMI_SCAN_EVENT_COMPLETED 3329 | WMI_SCAN_EVENT_BSS_CHANNEL 3330 | WMI_SCAN_EVENT_FOREIGN_CHANNEL 3331 | WMI_SCAN_EVENT_DEQUEUED; 3332 arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; 3333 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; 3334 arg->n_bssids = 1; 3335 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; 3336 } 3337 3338 int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 3339 { 3340 struct wmi_stop_scan_cmd *cmd; 3341 struct sk_buff *skb; 3342 u32 scan_id; 3343 u32 req_id; 3344 3345 if (arg->req_id > 0xFFF) 3346 return -EINVAL; 3347 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 3348 return -EINVAL; 3349 3350 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3351 if (!skb) 3352 return -ENOMEM; 3353 3354 scan_id = arg->u.scan_id; 3355 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; 3356 3357 req_id = arg->req_id; 3358 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 3359 3360 cmd = (struct wmi_stop_scan_cmd *)skb->data; 3361 cmd->req_type = __cpu_to_le32(arg->req_type); 3362 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); 3363 cmd->scan_id = __cpu_to_le32(scan_id); 3364 cmd->scan_req_id = __cpu_to_le32(req_id); 3365 3366 ath10k_dbg(ATH10K_DBG_WMI, 3367 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 3368 arg->req_id, arg->req_type, arg->u.scan_id); 3369 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 3370 } 3371 3372 int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 3373 enum wmi_vdev_type type, 3374 enum wmi_vdev_subtype subtype, 3375 const u8 macaddr[ETH_ALEN]) 3376 { 3377 struct wmi_vdev_create_cmd *cmd; 3378 struct sk_buff *skb; 3379 3380 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3381 if (!skb) 3382 return -ENOMEM; 3383 3384 cmd = (struct wmi_vdev_create_cmd *)skb->data; 3385 cmd->vdev_id = __cpu_to_le32(vdev_id); 3386 cmd->vdev_type = __cpu_to_le32(type); 3387 cmd->vdev_subtype = __cpu_to_le32(subtype); 3388 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); 3389 3390 ath10k_dbg(ATH10K_DBG_WMI, 3391 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 3392 vdev_id, type, subtype, macaddr); 3393 3394 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 3395 } 3396 3397 int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 3398 { 3399 struct wmi_vdev_delete_cmd *cmd; 3400 struct sk_buff *skb; 3401 3402 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3403 if (!skb) 3404 return -ENOMEM; 3405 3406 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 3407 cmd->vdev_id = __cpu_to_le32(vdev_id); 3408 3409 ath10k_dbg(ATH10K_DBG_WMI, 3410 "WMI vdev delete id %d\n", vdev_id); 3411 3412 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 3413 } 3414 3415 static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, 3416 const struct wmi_vdev_start_request_arg *arg, 3417 u32 cmd_id) 3418 { 3419 struct wmi_vdev_start_request_cmd *cmd; 3420 struct sk_buff *skb; 3421 const char *cmdname; 3422 u32 flags = 0; 3423 u32 ch_flags = 0; 3424 3425 if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && 3426 cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid) 3427 return -EINVAL; 3428 if (WARN_ON(arg->ssid && arg->ssid_len == 0)) 3429 return -EINVAL; 3430 if (WARN_ON(arg->hidden_ssid && !arg->ssid)) 3431 return -EINVAL; 3432 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 3433 return -EINVAL; 3434 3435 if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid) 3436 cmdname = "start"; 3437 else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid) 3438 cmdname = "restart"; 3439 else 3440 return -EINVAL; /* should not happen, we already check cmd_id */ 3441 3442 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3443 if (!skb) 3444 return -ENOMEM; 3445 3446 if (arg->hidden_ssid) 3447 flags |= WMI_VDEV_START_HIDDEN_SSID; 3448 if (arg->pmf_enabled) 3449 flags |= WMI_VDEV_START_PMF_ENABLED; 3450 if (arg->channel.chan_radar) 3451 ch_flags |= WMI_CHAN_FLAG_DFS; 3452 3453 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 3454 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3455 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); 3456 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval); 3457 cmd->dtim_period = __cpu_to_le32(arg->dtim_period); 3458 cmd->flags = __cpu_to_le32(flags); 3459 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); 3460 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); 3461 3462 if (arg->ssid) { 3463 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); 3464 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 3465 } 3466 3467 cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); 3468 3469 cmd->chan.band_center_freq1 = 3470 __cpu_to_le32(arg->channel.band_center_freq1); 3471 3472 cmd->chan.mode = arg->channel.mode; 3473 cmd->chan.flags |= __cpu_to_le32(ch_flags); 3474 cmd->chan.min_power = arg->channel.min_power; 3475 cmd->chan.max_power = arg->channel.max_power; 3476 cmd->chan.reg_power = arg->channel.max_reg_power; 3477 cmd->chan.reg_classid = arg->channel.reg_class_id; 3478 cmd->chan.antenna_max = arg->channel.max_antenna_gain; 3479 3480 ath10k_dbg(ATH10K_DBG_WMI, 3481 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " 3482 "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, 3483 flags, arg->channel.freq, arg->channel.mode, 3484 cmd->chan.flags, arg->channel.max_power); 3485 3486 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 3487 } 3488 3489 int ath10k_wmi_vdev_start(struct ath10k *ar, 3490 const struct wmi_vdev_start_request_arg *arg) 3491 { 3492 u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid; 3493 3494 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id); 3495 } 3496 3497 int ath10k_wmi_vdev_restart(struct ath10k *ar, 3498 const struct wmi_vdev_start_request_arg *arg) 3499 { 3500 u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid; 3501 3502 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id); 3503 } 3504 3505 int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 3506 { 3507 struct wmi_vdev_stop_cmd *cmd; 3508 struct sk_buff *skb; 3509 3510 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3511 if (!skb) 3512 return -ENOMEM; 3513 3514 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 3515 cmd->vdev_id = __cpu_to_le32(vdev_id); 3516 3517 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 3518 3519 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 3520 } 3521 3522 int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 3523 { 3524 struct wmi_vdev_up_cmd *cmd; 3525 struct sk_buff *skb; 3526 3527 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3528 if (!skb) 3529 return -ENOMEM; 3530 3531 cmd = (struct wmi_vdev_up_cmd *)skb->data; 3532 cmd->vdev_id = __cpu_to_le32(vdev_id); 3533 cmd->vdev_assoc_id = __cpu_to_le32(aid); 3534 memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN); 3535 3536 ath10k_dbg(ATH10K_DBG_WMI, 3537 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 3538 vdev_id, aid, bssid); 3539 3540 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 3541 } 3542 3543 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 3544 { 3545 struct wmi_vdev_down_cmd *cmd; 3546 struct sk_buff *skb; 3547 3548 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3549 if (!skb) 3550 return -ENOMEM; 3551 3552 cmd = (struct wmi_vdev_down_cmd *)skb->data; 3553 cmd->vdev_id = __cpu_to_le32(vdev_id); 3554 3555 ath10k_dbg(ATH10K_DBG_WMI, 3556 "wmi mgmt vdev down id 0x%x\n", vdev_id); 3557 3558 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 3559 } 3560 3561 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, 3562 u32 param_id, u32 param_value) 3563 { 3564 struct wmi_vdev_set_param_cmd *cmd; 3565 struct sk_buff *skb; 3566 3567 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { 3568 ath10k_dbg(ATH10K_DBG_WMI, 3569 "vdev param %d not supported by firmware\n", 3570 param_id); 3571 return -EOPNOTSUPP; 3572 } 3573 3574 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3575 if (!skb) 3576 return -ENOMEM; 3577 3578 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 3579 cmd->vdev_id = __cpu_to_le32(vdev_id); 3580 cmd->param_id = __cpu_to_le32(param_id); 3581 cmd->param_value = __cpu_to_le32(param_value); 3582 3583 ath10k_dbg(ATH10K_DBG_WMI, 3584 "wmi vdev id 0x%x set param %d value %d\n", 3585 vdev_id, param_id, param_value); 3586 3587 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 3588 } 3589 3590 int ath10k_wmi_vdev_install_key(struct ath10k *ar, 3591 const struct wmi_vdev_install_key_arg *arg) 3592 { 3593 struct wmi_vdev_install_key_cmd *cmd; 3594 struct sk_buff *skb; 3595 3596 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) 3597 return -EINVAL; 3598 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 3599 return -EINVAL; 3600 3601 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); 3602 if (!skb) 3603 return -ENOMEM; 3604 3605 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 3606 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3607 cmd->key_idx = __cpu_to_le32(arg->key_idx); 3608 cmd->key_flags = __cpu_to_le32(arg->key_flags); 3609 cmd->key_cipher = __cpu_to_le32(arg->key_cipher); 3610 cmd->key_len = __cpu_to_le32(arg->key_len); 3611 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); 3612 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); 3613 3614 if (arg->macaddr) 3615 memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN); 3616 if (arg->key_data) 3617 memcpy(cmd->key_data, arg->key_data, arg->key_len); 3618 3619 ath10k_dbg(ATH10K_DBG_WMI, 3620 "wmi vdev install key idx %d cipher %d len %d\n", 3621 arg->key_idx, arg->key_cipher, arg->key_len); 3622 return ath10k_wmi_cmd_send(ar, skb, 3623 ar->wmi.cmd->vdev_install_key_cmdid); 3624 } 3625 3626 int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 3627 const struct wmi_vdev_spectral_conf_arg *arg) 3628 { 3629 struct wmi_vdev_spectral_conf_cmd *cmd; 3630 struct sk_buff *skb; 3631 u32 cmdid; 3632 3633 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3634 if (!skb) 3635 return -ENOMEM; 3636 3637 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data; 3638 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3639 cmd->scan_count = __cpu_to_le32(arg->scan_count); 3640 cmd->scan_period = __cpu_to_le32(arg->scan_period); 3641 cmd->scan_priority = __cpu_to_le32(arg->scan_priority); 3642 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); 3643 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); 3644 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); 3645 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); 3646 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); 3647 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); 3648 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); 3649 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); 3650 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); 3651 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); 3652 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); 3653 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); 3654 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); 3655 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); 3656 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); 3657 3658 cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 3659 return ath10k_wmi_cmd_send(ar, skb, cmdid); 3660 } 3661 3662 int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 3663 u32 enable) 3664 { 3665 struct wmi_vdev_spectral_enable_cmd *cmd; 3666 struct sk_buff *skb; 3667 u32 cmdid; 3668 3669 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3670 if (!skb) 3671 return -ENOMEM; 3672 3673 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data; 3674 cmd->vdev_id = __cpu_to_le32(vdev_id); 3675 cmd->trigger_cmd = __cpu_to_le32(trigger); 3676 cmd->enable_cmd = __cpu_to_le32(enable); 3677 3678 cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 3679 return ath10k_wmi_cmd_send(ar, skb, cmdid); 3680 } 3681 3682 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 3683 const u8 peer_addr[ETH_ALEN]) 3684 { 3685 struct wmi_peer_create_cmd *cmd; 3686 struct sk_buff *skb; 3687 3688 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3689 if (!skb) 3690 return -ENOMEM; 3691 3692 cmd = (struct wmi_peer_create_cmd *)skb->data; 3693 cmd->vdev_id = __cpu_to_le32(vdev_id); 3694 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3695 3696 ath10k_dbg(ATH10K_DBG_WMI, 3697 "wmi peer create vdev_id %d peer_addr %pM\n", 3698 vdev_id, peer_addr); 3699 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 3700 } 3701 3702 int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 3703 const u8 peer_addr[ETH_ALEN]) 3704 { 3705 struct wmi_peer_delete_cmd *cmd; 3706 struct sk_buff *skb; 3707 3708 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3709 if (!skb) 3710 return -ENOMEM; 3711 3712 cmd = (struct wmi_peer_delete_cmd *)skb->data; 3713 cmd->vdev_id = __cpu_to_le32(vdev_id); 3714 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3715 3716 ath10k_dbg(ATH10K_DBG_WMI, 3717 "wmi peer delete vdev_id %d peer_addr %pM\n", 3718 vdev_id, peer_addr); 3719 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 3720 } 3721 3722 int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 3723 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 3724 { 3725 struct wmi_peer_flush_tids_cmd *cmd; 3726 struct sk_buff *skb; 3727 3728 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3729 if (!skb) 3730 return -ENOMEM; 3731 3732 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 3733 cmd->vdev_id = __cpu_to_le32(vdev_id); 3734 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 3735 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3736 3737 ath10k_dbg(ATH10K_DBG_WMI, 3738 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 3739 vdev_id, peer_addr, tid_bitmap); 3740 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 3741 } 3742 3743 int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, 3744 const u8 *peer_addr, enum wmi_peer_param param_id, 3745 u32 param_value) 3746 { 3747 struct wmi_peer_set_param_cmd *cmd; 3748 struct sk_buff *skb; 3749 3750 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3751 if (!skb) 3752 return -ENOMEM; 3753 3754 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 3755 cmd->vdev_id = __cpu_to_le32(vdev_id); 3756 cmd->param_id = __cpu_to_le32(param_id); 3757 cmd->param_value = __cpu_to_le32(param_value); 3758 memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3759 3760 ath10k_dbg(ATH10K_DBG_WMI, 3761 "wmi vdev %d peer 0x%pM set param %d value %d\n", 3762 vdev_id, peer_addr, param_id, param_value); 3763 3764 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 3765 } 3766 3767 int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 3768 enum wmi_sta_ps_mode psmode) 3769 { 3770 struct wmi_sta_powersave_mode_cmd *cmd; 3771 struct sk_buff *skb; 3772 3773 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3774 if (!skb) 3775 return -ENOMEM; 3776 3777 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; 3778 cmd->vdev_id = __cpu_to_le32(vdev_id); 3779 cmd->sta_ps_mode = __cpu_to_le32(psmode); 3780 3781 ath10k_dbg(ATH10K_DBG_WMI, 3782 "wmi set powersave id 0x%x mode %d\n", 3783 vdev_id, psmode); 3784 3785 return ath10k_wmi_cmd_send(ar, skb, 3786 ar->wmi.cmd->sta_powersave_mode_cmdid); 3787 } 3788 3789 int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 3790 enum wmi_sta_powersave_param param_id, 3791 u32 value) 3792 { 3793 struct wmi_sta_powersave_param_cmd *cmd; 3794 struct sk_buff *skb; 3795 3796 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3797 if (!skb) 3798 return -ENOMEM; 3799 3800 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 3801 cmd->vdev_id = __cpu_to_le32(vdev_id); 3802 cmd->param_id = __cpu_to_le32(param_id); 3803 cmd->param_value = __cpu_to_le32(value); 3804 3805 ath10k_dbg(ATH10K_DBG_WMI, 3806 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 3807 vdev_id, param_id, value); 3808 return ath10k_wmi_cmd_send(ar, skb, 3809 ar->wmi.cmd->sta_powersave_param_cmdid); 3810 } 3811 3812 int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 3813 enum wmi_ap_ps_peer_param param_id, u32 value) 3814 { 3815 struct wmi_ap_ps_peer_cmd *cmd; 3816 struct sk_buff *skb; 3817 3818 if (!mac) 3819 return -EINVAL; 3820 3821 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3822 if (!skb) 3823 return -ENOMEM; 3824 3825 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 3826 cmd->vdev_id = __cpu_to_le32(vdev_id); 3827 cmd->param_id = __cpu_to_le32(param_id); 3828 cmd->param_value = __cpu_to_le32(value); 3829 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); 3830 3831 ath10k_dbg(ATH10K_DBG_WMI, 3832 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 3833 vdev_id, param_id, value, mac); 3834 3835 return ath10k_wmi_cmd_send(ar, skb, 3836 ar->wmi.cmd->ap_ps_peer_param_cmdid); 3837 } 3838 3839 int ath10k_wmi_scan_chan_list(struct ath10k *ar, 3840 const struct wmi_scan_chan_list_arg *arg) 3841 { 3842 struct wmi_scan_chan_list_cmd *cmd; 3843 struct sk_buff *skb; 3844 struct wmi_channel_arg *ch; 3845 struct wmi_channel *ci; 3846 int len; 3847 int i; 3848 3849 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); 3850 3851 skb = ath10k_wmi_alloc_skb(len); 3852 if (!skb) 3853 return -EINVAL; 3854 3855 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 3856 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); 3857 3858 for (i = 0; i < arg->n_channels; i++) { 3859 u32 flags = 0; 3860 3861 ch = &arg->channels[i]; 3862 ci = &cmd->chan_info[i]; 3863 3864 if (ch->passive) 3865 flags |= WMI_CHAN_FLAG_PASSIVE; 3866 if (ch->allow_ibss) 3867 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; 3868 if (ch->allow_ht) 3869 flags |= WMI_CHAN_FLAG_ALLOW_HT; 3870 if (ch->allow_vht) 3871 flags |= WMI_CHAN_FLAG_ALLOW_VHT; 3872 if (ch->ht40plus) 3873 flags |= WMI_CHAN_FLAG_HT40_PLUS; 3874 if (ch->chan_radar) 3875 flags |= WMI_CHAN_FLAG_DFS; 3876 3877 ci->mhz = __cpu_to_le32(ch->freq); 3878 ci->band_center_freq1 = __cpu_to_le32(ch->freq); 3879 ci->band_center_freq2 = 0; 3880 ci->min_power = ch->min_power; 3881 ci->max_power = ch->max_power; 3882 ci->reg_power = ch->max_reg_power; 3883 ci->antenna_max = ch->max_antenna_gain; 3884 3885 /* mode & flags share storage */ 3886 ci->mode = ch->mode; 3887 ci->flags |= __cpu_to_le32(flags); 3888 } 3889 3890 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 3891 } 3892 3893 static void 3894 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf, 3895 const struct wmi_peer_assoc_complete_arg *arg) 3896 { 3897 struct wmi_common_peer_assoc_complete_cmd *cmd = buf; 3898 3899 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3900 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); 3901 cmd->peer_associd = __cpu_to_le32(arg->peer_aid); 3902 cmd->peer_flags = __cpu_to_le32(arg->peer_flags); 3903 cmd->peer_caps = __cpu_to_le32(arg->peer_caps); 3904 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval); 3905 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps); 3906 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); 3907 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); 3908 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps); 3909 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams); 3910 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); 3911 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); 3912 3913 memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN); 3914 3915 cmd->peer_legacy_rates.num_rates = 3916 __cpu_to_le32(arg->peer_legacy_rates.num_rates); 3917 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates, 3918 arg->peer_legacy_rates.num_rates); 3919 3920 cmd->peer_ht_rates.num_rates = 3921 __cpu_to_le32(arg->peer_ht_rates.num_rates); 3922 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates, 3923 arg->peer_ht_rates.num_rates); 3924 3925 cmd->peer_vht_rates.rx_max_rate = 3926 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); 3927 cmd->peer_vht_rates.rx_mcs_set = 3928 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); 3929 cmd->peer_vht_rates.tx_max_rate = 3930 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); 3931 cmd->peer_vht_rates.tx_mcs_set = 3932 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 3933 } 3934 3935 static void 3936 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf, 3937 const struct wmi_peer_assoc_complete_arg *arg) 3938 { 3939 struct wmi_main_peer_assoc_complete_cmd *cmd = buf; 3940 3941 ath10k_wmi_peer_assoc_fill(ar, buf, arg); 3942 memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info)); 3943 } 3944 3945 static void 3946 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf, 3947 const struct wmi_peer_assoc_complete_arg *arg) 3948 { 3949 ath10k_wmi_peer_assoc_fill(ar, buf, arg); 3950 } 3951 3952 static void 3953 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, 3954 const struct wmi_peer_assoc_complete_arg *arg) 3955 { 3956 struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf; 3957 int max_mcs, max_nss; 3958 u32 info0; 3959 3960 /* TODO: Is using max values okay with firmware? */ 3961 max_mcs = 0xf; 3962 max_nss = 0xf; 3963 3964 info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) | 3965 SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS); 3966 3967 ath10k_wmi_peer_assoc_fill(ar, buf, arg); 3968 cmd->info0 = __cpu_to_le32(info0); 3969 } 3970 3971 int ath10k_wmi_peer_assoc(struct ath10k *ar, 3972 const struct wmi_peer_assoc_complete_arg *arg) 3973 { 3974 struct sk_buff *skb; 3975 int len; 3976 3977 if (arg->peer_mpdu_density > 16) 3978 return -EINVAL; 3979 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) 3980 return -EINVAL; 3981 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) 3982 return -EINVAL; 3983 3984 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 3985 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 3986 len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd); 3987 else 3988 len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd); 3989 } else { 3990 len = sizeof(struct wmi_main_peer_assoc_complete_cmd); 3991 } 3992 3993 skb = ath10k_wmi_alloc_skb(len); 3994 if (!skb) 3995 return -ENOMEM; 3996 3997 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 3998 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 3999 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); 4000 else 4001 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); 4002 } else { 4003 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); 4004 } 4005 4006 ath10k_dbg(ATH10K_DBG_WMI, 4007 "wmi peer assoc vdev %d addr %pM (%s)\n", 4008 arg->vdev_id, arg->addr, 4009 arg->peer_reassoc ? "reassociate" : "new"); 4010 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 4011 } 4012 4013 /* This function assumes the beacon is already DMA mapped */ 4014 int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) 4015 { 4016 struct wmi_bcn_tx_ref_cmd *cmd; 4017 struct sk_buff *skb; 4018 struct sk_buff *beacon = arvif->beacon; 4019 struct ath10k *ar = arvif->ar; 4020 struct ieee80211_hdr *hdr; 4021 int ret; 4022 u16 fc; 4023 4024 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4025 if (!skb) 4026 return -ENOMEM; 4027 4028 hdr = (struct ieee80211_hdr *)beacon->data; 4029 fc = le16_to_cpu(hdr->frame_control); 4030 4031 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; 4032 cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); 4033 cmd->data_len = __cpu_to_le32(beacon->len); 4034 cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr); 4035 cmd->msdu_id = 0; 4036 cmd->frame_control = __cpu_to_le32(fc); 4037 cmd->flags = 0; 4038 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); 4039 4040 if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) 4041 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); 4042 4043 if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab) 4044 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); 4045 4046 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 4047 ar->wmi.cmd->pdev_send_bcn_cmdid); 4048 4049 if (ret) 4050 dev_kfree_skb(skb); 4051 4052 return ret; 4053 } 4054 4055 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, 4056 const struct wmi_wmm_params_arg *arg) 4057 { 4058 params->cwmin = __cpu_to_le32(arg->cwmin); 4059 params->cwmax = __cpu_to_le32(arg->cwmax); 4060 params->aifs = __cpu_to_le32(arg->aifs); 4061 params->txop = __cpu_to_le32(arg->txop); 4062 params->acm = __cpu_to_le32(arg->acm); 4063 params->no_ack = __cpu_to_le32(arg->no_ack); 4064 } 4065 4066 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 4067 const struct wmi_pdev_set_wmm_params_arg *arg) 4068 { 4069 struct wmi_pdev_set_wmm_params *cmd; 4070 struct sk_buff *skb; 4071 4072 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4073 if (!skb) 4074 return -ENOMEM; 4075 4076 cmd = (struct wmi_pdev_set_wmm_params *)skb->data; 4077 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); 4078 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); 4079 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); 4080 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 4081 4082 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 4083 return ath10k_wmi_cmd_send(ar, skb, 4084 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 4085 } 4086 4087 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) 4088 { 4089 struct wmi_request_stats_cmd *cmd; 4090 struct sk_buff *skb; 4091 4092 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4093 if (!skb) 4094 return -ENOMEM; 4095 4096 cmd = (struct wmi_request_stats_cmd *)skb->data; 4097 cmd->stats_id = __cpu_to_le32(stats_id); 4098 4099 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 4100 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 4101 } 4102 4103 int ath10k_wmi_force_fw_hang(struct ath10k *ar, 4104 enum wmi_force_fw_hang_type type, u32 delay_ms) 4105 { 4106 struct wmi_force_fw_hang_cmd *cmd; 4107 struct sk_buff *skb; 4108 4109 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4110 if (!skb) 4111 return -ENOMEM; 4112 4113 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 4114 cmd->type = __cpu_to_le32(type); 4115 cmd->delay_ms = __cpu_to_le32(delay_ms); 4116 4117 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 4118 type, delay_ms); 4119 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 4120 } 4121 4122 int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) 4123 { 4124 struct wmi_dbglog_cfg_cmd *cmd; 4125 struct sk_buff *skb; 4126 u32 cfg; 4127 4128 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4129 if (!skb) 4130 return -ENOMEM; 4131 4132 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; 4133 4134 if (module_enable) { 4135 cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE, 4136 ATH10K_DBGLOG_CFG_LOG_LVL); 4137 } else { 4138 /* set back defaults, all modules with WARN level */ 4139 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, 4140 ATH10K_DBGLOG_CFG_LOG_LVL); 4141 module_enable = ~0; 4142 } 4143 4144 cmd->module_enable = __cpu_to_le32(module_enable); 4145 cmd->module_valid = __cpu_to_le32(~0); 4146 cmd->config_enable = __cpu_to_le32(cfg); 4147 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); 4148 4149 ath10k_dbg(ATH10K_DBG_WMI, 4150 "wmi dbglog cfg modules %08x %08x config %08x %08x\n", 4151 __le32_to_cpu(cmd->module_enable), 4152 __le32_to_cpu(cmd->module_valid), 4153 __le32_to_cpu(cmd->config_enable), 4154 __le32_to_cpu(cmd->config_valid)); 4155 4156 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 4157 } 4158