1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 7 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 8 */ 9 10 #include <linux/skbuff.h> 11 #include <linux/ctype.h> 12 13 #include "core.h" 14 #include "htc.h" 15 #include "debug.h" 16 #include "wmi.h" 17 #include "wmi-tlv.h" 18 #include "mac.h" 19 #include "testmode.h" 20 #include "wmi-ops.h" 21 #include "p2p.h" 22 #include "hw.h" 23 #include "hif.h" 24 #include "txrx.h" 25 26 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 27 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) 28 #define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6) 29 30 /* MAIN WMI cmd track */ 31 static struct wmi_cmd_map wmi_cmd_map = { 32 .init_cmdid = WMI_INIT_CMDID, 33 .start_scan_cmdid = WMI_START_SCAN_CMDID, 34 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID, 35 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, 36 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, 37 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, 38 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, 39 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, 40 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, 41 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID, 42 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID, 43 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID, 44 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID, 45 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID, 46 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID, 47 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID, 48 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, 49 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID, 50 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID, 51 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID, 52 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID, 53 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID, 54 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID, 55 .vdev_up_cmdid = WMI_VDEV_UP_CMDID, 56 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID, 57 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID, 58 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID, 59 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID, 60 .peer_create_cmdid = WMI_PEER_CREATE_CMDID, 61 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID, 62 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID, 63 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID, 64 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID, 65 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID, 66 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID, 67 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID, 68 .bcn_tx_cmdid = WMI_BCN_TX_CMDID, 69 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID, 70 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID, 71 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID, 72 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID, 73 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID, 74 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID, 75 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID, 76 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID, 77 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID, 78 .delba_send_cmdid = WMI_DELBA_SEND_CMDID, 79 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID, 80 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID, 81 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID, 82 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID, 83 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID, 84 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID, 85 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID, 86 .roam_scan_mode = WMI_ROAM_SCAN_MODE, 87 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD, 88 .roam_scan_period = WMI_ROAM_SCAN_PERIOD, 89 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 90 .roam_ap_profile = WMI_ROAM_AP_PROFILE, 91 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE, 92 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE, 93 .ofl_scan_period = WMI_OFL_SCAN_PERIOD, 94 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO, 95 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY, 96 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE, 97 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE, 98 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID, 99 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID, 100 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID, 101 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID, 102 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID, 103 .wlan_profile_set_hist_intvl_cmdid = 104 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 105 .wlan_profile_get_profile_data_cmdid = 106 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 107 .wlan_profile_enable_profile_id_cmdid = 108 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 109 .wlan_profile_list_profile_id_cmdid = 110 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 111 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID, 112 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID, 113 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID, 114 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID, 115 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID, 116 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID, 117 .wow_enable_disable_wake_event_cmdid = 118 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 119 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID, 120 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 121 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID, 122 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID, 123 .vdev_spectral_scan_configure_cmdid = 124 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 125 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 126 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID, 127 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID, 128 .network_list_offload_config_cmdid = 129 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, 130 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID, 131 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID, 132 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, 133 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID, 134 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID, 135 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID, 136 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID, 137 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID, 138 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD, 139 .echo_cmdid = WMI_ECHO_CMDID, 140 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID, 141 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID, 142 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID, 143 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID, 144 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID, 145 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID, 146 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID, 147 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, 148 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, 149 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, 150 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED, 151 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, 152 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, 153 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, 154 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, 155 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, 156 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, 157 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, 158 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, 159 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, 160 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 161 .oem_req_cmdid = WMI_CMD_UNSUPPORTED, 162 .nan_cmdid = WMI_CMD_UNSUPPORTED, 163 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, 164 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, 165 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, 166 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 167 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 168 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, 169 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, 170 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, 171 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, 172 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, 173 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, 174 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, 175 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, 176 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, 177 .fwtest_cmdid = WMI_CMD_UNSUPPORTED, 178 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 179 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 180 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, 181 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, 182 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, 183 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, 184 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, 185 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, 186 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, 187 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, 188 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, 189 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, 190 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, 191 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, 192 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, 193 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, 194 .radar_found_cmdid = WMI_CMD_UNSUPPORTED, 195 }; 196 197 /* 10.X WMI cmd track */ 198 static struct wmi_cmd_map wmi_10x_cmd_map = { 199 .init_cmdid = WMI_10X_INIT_CMDID, 200 .start_scan_cmdid = WMI_10X_START_SCAN_CMDID, 201 .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, 202 .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, 203 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, 204 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, 205 .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, 206 .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, 207 .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, 208 .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, 209 .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, 210 .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, 211 .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, 212 .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, 213 .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, 214 .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID, 215 .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, 216 .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, 217 .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, 218 .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID, 219 .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID, 220 .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID, 221 .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID, 222 .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID, 223 .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID, 224 .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID, 225 .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID, 226 .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID, 227 .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID, 228 .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID, 229 .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID, 230 .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID, 231 .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID, 232 .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, 233 .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, 234 .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID, 235 .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID, 236 .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID, 237 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 238 .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID, 239 .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID, 240 .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID, 241 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 242 .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID, 243 .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID, 244 .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID, 245 .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID, 246 .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID, 247 .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID, 248 .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID, 249 .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID, 250 .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID, 251 .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID, 252 .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID, 253 .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE, 254 .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, 255 .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD, 256 .roam_scan_rssi_change_threshold = 257 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 258 .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE, 259 .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE, 260 .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, 261 .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD, 262 .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO, 263 .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY, 264 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE, 265 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE, 266 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, 267 .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID, 268 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, 269 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, 270 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, 271 .wlan_profile_set_hist_intvl_cmdid = 272 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 273 .wlan_profile_get_profile_data_cmdid = 274 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 275 .wlan_profile_enable_profile_id_cmdid = 276 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 277 .wlan_profile_list_profile_id_cmdid = 278 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 279 .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID, 280 .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID, 281 .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID, 282 .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID, 283 .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, 284 .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, 285 .wow_enable_disable_wake_event_cmdid = 286 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 287 .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID, 288 .wow_hostwakeup_from_sleep_cmdid = 289 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 290 .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID, 291 .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID, 292 .vdev_spectral_scan_configure_cmdid = 293 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 294 .vdev_spectral_scan_enable_cmdid = 295 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 296 .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID, 297 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, 298 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, 299 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, 300 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, 301 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, 302 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, 303 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, 304 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, 305 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, 306 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, 307 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, 308 .echo_cmdid = WMI_10X_ECHO_CMDID, 309 .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID, 310 .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID, 311 .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID, 312 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, 313 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 314 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 315 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 316 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, 317 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, 318 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, 319 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED, 320 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, 321 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, 322 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, 323 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, 324 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, 325 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, 326 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, 327 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, 328 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, 329 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 330 .oem_req_cmdid = WMI_CMD_UNSUPPORTED, 331 .nan_cmdid = WMI_CMD_UNSUPPORTED, 332 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, 333 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, 334 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, 335 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 336 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 337 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, 338 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, 339 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, 340 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, 341 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, 342 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, 343 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, 344 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, 345 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, 346 .fwtest_cmdid = WMI_CMD_UNSUPPORTED, 347 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 348 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 349 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, 350 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, 351 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, 352 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, 353 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, 354 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, 355 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, 356 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, 357 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, 358 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, 359 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, 360 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, 361 .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, 362 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, 363 .radar_found_cmdid = WMI_CMD_UNSUPPORTED, 364 }; 365 366 /* 10.2.4 WMI cmd track */ 367 static struct wmi_cmd_map wmi_10_2_4_cmd_map = { 368 .init_cmdid = WMI_10_2_INIT_CMDID, 369 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID, 370 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, 371 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, 372 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, 373 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, 374 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, 375 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, 376 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, 377 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, 378 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, 379 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, 380 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, 381 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, 382 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, 383 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, 384 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, 385 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, 386 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID, 387 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID, 388 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID, 389 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID, 390 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID, 391 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID, 392 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID, 393 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID, 394 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID, 395 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID, 396 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID, 397 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID, 398 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID, 399 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID, 400 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, 401 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, 402 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID, 403 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID, 404 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID, 405 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 406 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID, 407 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID, 408 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID, 409 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 410 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID, 411 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID, 412 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID, 413 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID, 414 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID, 415 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID, 416 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID, 417 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID, 418 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID, 419 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID, 420 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID, 421 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE, 422 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, 423 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD, 424 .roam_scan_rssi_change_threshold = 425 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 426 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE, 427 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, 428 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, 429 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD, 430 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO, 431 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, 432 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE, 433 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, 434 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, 435 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID, 436 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, 437 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, 438 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, 439 .wlan_profile_set_hist_intvl_cmdid = 440 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 441 .wlan_profile_get_profile_data_cmdid = 442 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 443 .wlan_profile_enable_profile_id_cmdid = 444 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 445 .wlan_profile_list_profile_id_cmdid = 446 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 447 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID, 448 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID, 449 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID, 450 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID, 451 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, 452 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, 453 .wow_enable_disable_wake_event_cmdid = 454 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 455 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID, 456 .wow_hostwakeup_from_sleep_cmdid = 457 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 458 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID, 459 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID, 460 .vdev_spectral_scan_configure_cmdid = 461 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 462 .vdev_spectral_scan_enable_cmdid = 463 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 464 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID, 465 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, 466 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, 467 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, 468 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, 469 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, 470 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, 471 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, 472 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, 473 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, 474 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, 475 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, 476 .echo_cmdid = WMI_10_2_ECHO_CMDID, 477 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID, 478 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID, 479 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID, 480 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, 481 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 482 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 483 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 484 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, 485 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, 486 .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID, 487 .pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS, 488 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, 489 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, 490 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, 491 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, 492 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, 493 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, 494 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, 495 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, 496 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, 497 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 498 .oem_req_cmdid = WMI_CMD_UNSUPPORTED, 499 .nan_cmdid = WMI_CMD_UNSUPPORTED, 500 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, 501 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, 502 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, 503 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 504 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 505 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, 506 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, 507 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, 508 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, 509 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, 510 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, 511 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, 512 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, 513 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, 514 .fwtest_cmdid = WMI_CMD_UNSUPPORTED, 515 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 516 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 517 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, 518 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, 519 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, 520 .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, 521 .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, 522 .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, 523 .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, 524 .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, 525 .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, 526 .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, 527 .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, 528 .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, 529 .pdev_bss_chan_info_request_cmdid = 530 WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, 531 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, 532 .radar_found_cmdid = WMI_CMD_UNSUPPORTED, 533 .set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID, 534 }; 535 536 /* 10.4 WMI cmd track */ 537 static struct wmi_cmd_map wmi_10_4_cmd_map = { 538 .init_cmdid = WMI_10_4_INIT_CMDID, 539 .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID, 540 .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID, 541 .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID, 542 .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, 543 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, 544 .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, 545 .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID, 546 .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID, 547 .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID, 548 .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID, 549 .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID, 550 .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID, 551 .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID, 552 .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID, 553 .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID, 554 .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID, 555 .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID, 556 .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID, 557 .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID, 558 .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID, 559 .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID, 560 .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID, 561 .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID, 562 .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID, 563 .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID, 564 .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID, 565 .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID, 566 .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID, 567 .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID, 568 .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID, 569 .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID, 570 .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID, 571 .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID, 572 .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID, 573 .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID, 574 .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID, 575 .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID, 576 .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID, 577 .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID, 578 .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID, 579 .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID, 580 .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID, 581 .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID, 582 .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID, 583 .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID, 584 .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID, 585 .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID, 586 .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID, 587 .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID, 588 .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID, 589 .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID, 590 .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID, 591 .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID, 592 .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE, 593 .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD, 594 .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD, 595 .roam_scan_rssi_change_threshold = 596 WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 597 .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE, 598 .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE, 599 .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE, 600 .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD, 601 .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO, 602 .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY, 603 .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE, 604 .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE, 605 .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID, 606 .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID, 607 .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID, 608 .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID, 609 .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID, 610 .wlan_profile_set_hist_intvl_cmdid = 611 WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 612 .wlan_profile_get_profile_data_cmdid = 613 WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 614 .wlan_profile_enable_profile_id_cmdid = 615 WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 616 .wlan_profile_list_profile_id_cmdid = 617 WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 618 .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID, 619 .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID, 620 .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID, 621 .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID, 622 .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID, 623 .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID, 624 .wow_enable_disable_wake_event_cmdid = 625 WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 626 .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID, 627 .wow_hostwakeup_from_sleep_cmdid = 628 WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 629 .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID, 630 .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID, 631 .vdev_spectral_scan_configure_cmdid = 632 WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 633 .vdev_spectral_scan_enable_cmdid = 634 WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 635 .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID, 636 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, 637 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, 638 .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID, 639 .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID, 640 .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID, 641 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, 642 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, 643 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, 644 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, 645 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, 646 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, 647 .echo_cmdid = WMI_10_4_ECHO_CMDID, 648 .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID, 649 .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID, 650 .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID, 651 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, 652 .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID, 653 .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID, 654 .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID, 655 .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID, 656 .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID, 657 .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, 658 .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED, 659 .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED, 660 .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, 661 .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, 662 .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID, 663 .wlan_peer_caching_add_peer_cmdid = 664 WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID, 665 .wlan_peer_caching_evict_peer_cmdid = 666 WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID, 667 .wlan_peer_caching_restore_peer_cmdid = 668 WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID, 669 .wlan_peer_caching_print_all_peers_info_cmdid = 670 WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID, 671 .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID, 672 .peer_add_proxy_sta_entry_cmdid = 673 WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID, 674 .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID, 675 .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID, 676 .nan_cmdid = WMI_10_4_NAN_CMDID, 677 .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID, 678 .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID, 679 .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID, 680 .pdev_smart_ant_set_rx_antenna_cmdid = 681 WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, 682 .peer_smart_ant_set_tx_antenna_cmdid = 683 WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, 684 .peer_smart_ant_set_train_info_cmdid = 685 WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, 686 .peer_smart_ant_set_node_config_ops_cmdid = 687 WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, 688 .pdev_set_antenna_switch_table_cmdid = 689 WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, 690 .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID, 691 .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID, 692 .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID, 693 .pdev_ratepwr_chainmsk_table_cmdid = 694 WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, 695 .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID, 696 .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID, 697 .fwtest_cmdid = WMI_10_4_FWTEST_CMDID, 698 .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID, 699 .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID, 700 .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID, 701 .pdev_get_ani_ofdm_config_cmdid = 702 WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID, 703 .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID, 704 .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID, 705 .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID, 706 .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID, 707 .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID, 708 .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID, 709 .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID, 710 .vdev_filter_neighbor_rx_packets_cmdid = 711 WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, 712 .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID, 713 .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID, 714 .pdev_bss_chan_info_request_cmdid = 715 WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, 716 .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID, 717 .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID, 718 .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID, 719 .atf_ssid_grouping_request_cmdid = 720 WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID, 721 .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID, 722 .set_periodic_channel_stats_cfg_cmdid = 723 WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG, 724 .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID, 725 .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID, 726 .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID, 727 .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID, 728 .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID, 729 .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID, 730 .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID, 731 .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID, 732 .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID, 733 .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID, 734 .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID, 735 .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID, 736 .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID, 737 .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID, 738 .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID, 739 .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID, 740 .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID, 741 .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID, 742 .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID, 743 .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, 744 .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID, 745 .per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID, 746 }; 747 748 static struct wmi_peer_param_map wmi_peer_param_map = { 749 .smps_state = WMI_PEER_SMPS_STATE, 750 .ampdu = WMI_PEER_AMPDU, 751 .authorize = WMI_PEER_AUTHORIZE, 752 .chan_width = WMI_PEER_CHAN_WIDTH, 753 .nss = WMI_PEER_NSS, 754 .use_4addr = WMI_PEER_USE_4ADDR, 755 .use_fixed_power = WMI_PEER_USE_FIXED_PWR, 756 .debug = WMI_PEER_DEBUG, 757 .phymode = WMI_PEER_PHYMODE, 758 .dummy_var = WMI_PEER_DUMMY_VAR, 759 }; 760 761 /* MAIN WMI VDEV param map */ 762 static struct wmi_vdev_param_map wmi_vdev_param_map = { 763 .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD, 764 .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 765 .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL, 766 .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL, 767 .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE, 768 .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE, 769 .slot_time = WMI_VDEV_PARAM_SLOT_TIME, 770 .preamble = WMI_VDEV_PARAM_PREAMBLE, 771 .swba_time = WMI_VDEV_PARAM_SWBA_TIME, 772 .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD, 773 .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME, 774 .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL, 775 .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD, 776 .wmi_vdev_oc_scheduler_air_time_limit = 777 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 778 .wds = WMI_VDEV_PARAM_WDS, 779 .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW, 780 .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX, 781 .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT, 782 .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT, 783 .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM, 784 .chwidth = WMI_VDEV_PARAM_CHWIDTH, 785 .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET, 786 .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION, 787 .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT, 788 .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE, 789 .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE, 790 .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE, 791 .sgi = WMI_VDEV_PARAM_SGI, 792 .ldpc = WMI_VDEV_PARAM_LDPC, 793 .tx_stbc = WMI_VDEV_PARAM_TX_STBC, 794 .rx_stbc = WMI_VDEV_PARAM_RX_STBC, 795 .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD, 796 .def_keyid = WMI_VDEV_PARAM_DEF_KEYID, 797 .nss = WMI_VDEV_PARAM_NSS, 798 .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE, 799 .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE, 800 .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE, 801 .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE, 802 .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 803 .ap_keepalive_min_idle_inactive_time_secs = 804 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 805 .ap_keepalive_max_idle_inactive_time_secs = 806 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 807 .ap_keepalive_max_unresponsive_time_secs = 808 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 809 .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS, 810 .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED, 811 .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS, 812 .txbf = WMI_VDEV_PARAM_TXBF, 813 .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE, 814 .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY, 815 .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, 816 .ap_detect_out_of_sync_sleeping_sta_time_secs = 817 WMI_VDEV_PARAM_UNSUPPORTED, 818 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, 819 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, 820 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, 821 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, 822 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, 823 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 824 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, 825 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, 826 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, 827 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, 828 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, 829 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, 830 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, 831 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, 832 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, 833 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 834 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, 835 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, 836 }; 837 838 /* 10.X WMI VDEV param map */ 839 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { 840 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, 841 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 842 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, 843 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, 844 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, 845 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, 846 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, 847 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE, 848 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, 849 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, 850 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, 851 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, 852 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, 853 .wmi_vdev_oc_scheduler_air_time_limit = 854 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 855 .wds = WMI_10X_VDEV_PARAM_WDS, 856 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, 857 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, 858 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 859 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 860 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, 861 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, 862 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, 863 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, 864 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, 865 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, 866 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, 867 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, 868 .sgi = WMI_10X_VDEV_PARAM_SGI, 869 .ldpc = WMI_10X_VDEV_PARAM_LDPC, 870 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, 871 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, 872 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, 873 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, 874 .nss = WMI_10X_VDEV_PARAM_NSS, 875 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, 876 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, 877 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, 878 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, 879 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 880 .ap_keepalive_min_idle_inactive_time_secs = 881 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 882 .ap_keepalive_max_idle_inactive_time_secs = 883 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 884 .ap_keepalive_max_unresponsive_time_secs = 885 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 886 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, 887 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, 888 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, 889 .txbf = WMI_VDEV_PARAM_UNSUPPORTED, 890 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, 891 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, 892 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, 893 .ap_detect_out_of_sync_sleeping_sta_time_secs = 894 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, 895 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, 896 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, 897 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, 898 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, 899 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, 900 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 901 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, 902 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, 903 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, 904 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, 905 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, 906 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, 907 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, 908 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, 909 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, 910 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 911 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, 912 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, 913 }; 914 915 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { 916 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, 917 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 918 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, 919 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, 920 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, 921 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, 922 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, 923 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE, 924 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, 925 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, 926 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, 927 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, 928 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, 929 .wmi_vdev_oc_scheduler_air_time_limit = 930 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 931 .wds = WMI_10X_VDEV_PARAM_WDS, 932 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, 933 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, 934 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 935 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 936 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, 937 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, 938 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, 939 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, 940 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, 941 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, 942 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, 943 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, 944 .sgi = WMI_10X_VDEV_PARAM_SGI, 945 .ldpc = WMI_10X_VDEV_PARAM_LDPC, 946 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, 947 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, 948 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, 949 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, 950 .nss = WMI_10X_VDEV_PARAM_NSS, 951 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, 952 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, 953 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, 954 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, 955 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 956 .ap_keepalive_min_idle_inactive_time_secs = 957 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 958 .ap_keepalive_max_idle_inactive_time_secs = 959 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 960 .ap_keepalive_max_unresponsive_time_secs = 961 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 962 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, 963 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, 964 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, 965 .txbf = WMI_VDEV_PARAM_UNSUPPORTED, 966 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, 967 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, 968 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, 969 .ap_detect_out_of_sync_sleeping_sta_time_secs = 970 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, 971 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, 972 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, 973 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, 974 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, 975 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, 976 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 977 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, 978 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, 979 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, 980 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, 981 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, 982 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, 983 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, 984 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, 985 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, 986 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 987 .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, 988 .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, 989 }; 990 991 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { 992 .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD, 993 .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 994 .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL, 995 .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL, 996 .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE, 997 .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE, 998 .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME, 999 .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE, 1000 .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME, 1001 .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD, 1002 .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME, 1003 .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL, 1004 .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD, 1005 .wmi_vdev_oc_scheduler_air_time_limit = 1006 WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 1007 .wds = WMI_10_4_VDEV_PARAM_WDS, 1008 .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW, 1009 .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX, 1010 .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT, 1011 .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT, 1012 .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM, 1013 .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH, 1014 .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET, 1015 .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION, 1016 .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT, 1017 .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE, 1018 .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE, 1019 .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE, 1020 .sgi = WMI_10_4_VDEV_PARAM_SGI, 1021 .ldpc = WMI_10_4_VDEV_PARAM_LDPC, 1022 .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC, 1023 .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC, 1024 .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD, 1025 .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID, 1026 .nss = WMI_10_4_VDEV_PARAM_NSS, 1027 .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE, 1028 .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE, 1029 .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE, 1030 .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE, 1031 .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 1032 .ap_keepalive_min_idle_inactive_time_secs = 1033 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 1034 .ap_keepalive_max_idle_inactive_time_secs = 1035 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 1036 .ap_keepalive_max_unresponsive_time_secs = 1037 WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 1038 .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS, 1039 .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET, 1040 .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS, 1041 .txbf = WMI_10_4_VDEV_PARAM_TXBF, 1042 .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE, 1043 .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY, 1044 .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE, 1045 .ap_detect_out_of_sync_sleeping_sta_time_secs = 1046 WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, 1047 .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES, 1048 .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR, 1049 .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET, 1050 .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE, 1051 .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK, 1052 .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK, 1053 .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, 1054 .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, 1055 .early_rx_bmiss_sample_cycle = 1056 WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, 1057 .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP, 1058 .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP, 1059 .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, 1060 .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA, 1061 .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC, 1062 .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, 1063 .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, 1064 .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT, 1065 .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT, 1066 .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN, 1067 .rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE, 1068 }; 1069 1070 static struct wmi_pdev_param_map wmi_pdev_param_map = { 1071 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK, 1072 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK, 1073 .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G, 1074 .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G, 1075 .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE, 1076 .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE, 1077 .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE, 1078 .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 1079 .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE, 1080 .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW, 1081 .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 1082 .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH, 1083 .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH, 1084 .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, 1085 .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE, 1086 .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, 1087 .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, 1088 .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, 1089 .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, 1090 .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 1091 .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 1092 .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE, 1093 .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 1094 .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE, 1095 .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE, 1096 .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, 1097 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 1098 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 1099 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, 1100 .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 1101 .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 1102 .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 1103 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 1104 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS, 1105 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 1106 .dcs = WMI_PDEV_PARAM_DCS, 1107 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE, 1108 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD, 1109 .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, 1110 .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL, 1111 .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL, 1112 .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN, 1113 .proxy_sta = WMI_PDEV_PARAM_PROXY_STA, 1114 .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG, 1115 .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP, 1116 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED, 1117 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, 1118 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, 1119 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, 1120 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, 1121 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1122 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, 1123 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, 1124 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, 1125 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, 1126 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, 1127 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, 1128 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1129 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1130 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 1131 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 1132 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, 1133 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, 1134 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, 1135 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1136 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1137 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1138 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1139 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1140 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1141 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, 1142 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, 1143 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, 1144 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 1145 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, 1146 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, 1147 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, 1148 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, 1149 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, 1150 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, 1151 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, 1152 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, 1153 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, 1154 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, 1155 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, 1156 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 1157 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, 1158 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, 1159 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 1160 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1161 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1162 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, 1163 }; 1164 1165 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { 1166 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, 1167 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, 1168 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, 1169 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, 1170 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, 1171 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, 1172 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, 1173 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 1174 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, 1175 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, 1176 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 1177 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, 1178 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, 1179 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, 1180 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, 1181 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, 1182 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, 1183 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, 1184 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, 1185 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 1186 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 1187 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, 1188 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 1189 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, 1190 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, 1191 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, 1192 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, 1193 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, 1194 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, 1195 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 1196 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 1197 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 1198 .bcnflt_stats_update_period = 1199 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 1200 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, 1201 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, 1202 .dcs = WMI_10X_PDEV_PARAM_DCS, 1203 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, 1204 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, 1205 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, 1206 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, 1207 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, 1208 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, 1209 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, 1210 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, 1211 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, 1212 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, 1213 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, 1214 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, 1215 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, 1216 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, 1217 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1218 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, 1219 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, 1220 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, 1221 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, 1222 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, 1223 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, 1224 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1225 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1226 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 1227 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 1228 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, 1229 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, 1230 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, 1231 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1232 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1233 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1234 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1235 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1236 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1237 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, 1238 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, 1239 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, 1240 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 1241 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, 1242 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, 1243 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, 1244 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, 1245 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, 1246 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, 1247 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, 1248 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, 1249 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, 1250 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, 1251 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, 1252 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 1253 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, 1254 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, 1255 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 1256 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1257 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1258 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, 1259 }; 1260 1261 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { 1262 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, 1263 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, 1264 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, 1265 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, 1266 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, 1267 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, 1268 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, 1269 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 1270 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, 1271 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, 1272 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 1273 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, 1274 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, 1275 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, 1276 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, 1277 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, 1278 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, 1279 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, 1280 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, 1281 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 1282 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 1283 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, 1284 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 1285 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, 1286 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, 1287 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, 1288 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, 1289 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, 1290 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, 1291 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 1292 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 1293 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 1294 .bcnflt_stats_update_period = 1295 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 1296 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, 1297 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, 1298 .dcs = WMI_10X_PDEV_PARAM_DCS, 1299 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, 1300 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, 1301 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, 1302 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, 1303 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, 1304 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, 1305 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, 1306 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, 1307 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, 1308 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, 1309 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, 1310 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, 1311 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, 1312 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, 1313 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1314 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, 1315 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, 1316 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, 1317 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, 1318 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, 1319 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, 1320 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1321 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, 1322 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 1323 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 1324 .peer_sta_ps_statechg_enable = 1325 WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE, 1326 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, 1327 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, 1328 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1329 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1330 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1331 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1332 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1333 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 1334 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, 1335 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, 1336 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, 1337 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 1338 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, 1339 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, 1340 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, 1341 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, 1342 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, 1343 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, 1344 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, 1345 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, 1346 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, 1347 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, 1348 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, 1349 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 1350 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, 1351 .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET, 1352 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 1353 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1354 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 1355 .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, 1356 }; 1357 1358 /* firmware 10.2 specific mappings */ 1359 static struct wmi_cmd_map wmi_10_2_cmd_map = { 1360 .init_cmdid = WMI_10_2_INIT_CMDID, 1361 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID, 1362 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, 1363 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, 1364 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, 1365 .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, 1366 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, 1367 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, 1368 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, 1369 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, 1370 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, 1371 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, 1372 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, 1373 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, 1374 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, 1375 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, 1376 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, 1377 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, 1378 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID, 1379 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID, 1380 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID, 1381 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID, 1382 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID, 1383 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID, 1384 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID, 1385 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID, 1386 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID, 1387 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID, 1388 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID, 1389 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID, 1390 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID, 1391 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID, 1392 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, 1393 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, 1394 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID, 1395 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID, 1396 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID, 1397 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 1398 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID, 1399 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID, 1400 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID, 1401 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 1402 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID, 1403 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID, 1404 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID, 1405 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID, 1406 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID, 1407 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID, 1408 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID, 1409 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID, 1410 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID, 1411 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID, 1412 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID, 1413 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE, 1414 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, 1415 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD, 1416 .roam_scan_rssi_change_threshold = 1417 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 1418 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE, 1419 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, 1420 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, 1421 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD, 1422 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO, 1423 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, 1424 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE, 1425 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, 1426 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, 1427 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID, 1428 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, 1429 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, 1430 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, 1431 .wlan_profile_set_hist_intvl_cmdid = 1432 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 1433 .wlan_profile_get_profile_data_cmdid = 1434 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 1435 .wlan_profile_enable_profile_id_cmdid = 1436 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 1437 .wlan_profile_list_profile_id_cmdid = 1438 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 1439 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID, 1440 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID, 1441 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID, 1442 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID, 1443 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, 1444 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, 1445 .wow_enable_disable_wake_event_cmdid = 1446 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 1447 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID, 1448 .wow_hostwakeup_from_sleep_cmdid = 1449 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 1450 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID, 1451 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID, 1452 .vdev_spectral_scan_configure_cmdid = 1453 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 1454 .vdev_spectral_scan_enable_cmdid = 1455 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 1456 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID, 1457 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, 1458 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, 1459 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, 1460 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, 1461 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, 1462 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, 1463 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, 1464 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, 1465 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, 1466 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, 1467 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, 1468 .echo_cmdid = WMI_10_2_ECHO_CMDID, 1469 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID, 1470 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID, 1471 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID, 1472 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, 1473 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 1474 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 1475 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 1476 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, 1477 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, 1478 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, 1479 .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED, 1480 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, 1481 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, 1482 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, 1483 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, 1484 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, 1485 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, 1486 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, 1487 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, 1488 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, 1489 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 1490 .oem_req_cmdid = WMI_CMD_UNSUPPORTED, 1491 .nan_cmdid = WMI_CMD_UNSUPPORTED, 1492 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, 1493 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, 1494 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, 1495 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 1496 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 1497 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, 1498 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, 1499 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, 1500 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, 1501 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, 1502 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, 1503 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, 1504 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, 1505 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, 1506 .fwtest_cmdid = WMI_CMD_UNSUPPORTED, 1507 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 1508 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 1509 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, 1510 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, 1511 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, 1512 .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, 1513 .radar_found_cmdid = WMI_CMD_UNSUPPORTED, 1514 }; 1515 1516 static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { 1517 .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK, 1518 .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK, 1519 .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G, 1520 .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G, 1521 .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE, 1522 .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE, 1523 .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE, 1524 .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 1525 .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE, 1526 .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW, 1527 .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 1528 .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH, 1529 .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH, 1530 .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING, 1531 .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE, 1532 .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE, 1533 .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK, 1534 .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI, 1535 .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO, 1536 .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 1537 .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 1538 .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE, 1539 .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 1540 .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE, 1541 .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE, 1542 .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH, 1543 .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, 1544 .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 1545 .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, 1546 .pdev_stats_update_period = 1547 WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 1548 .vdev_stats_update_period = 1549 WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 1550 .peer_stats_update_period = 1551 WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 1552 .bcnflt_stats_update_period = 1553 WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 1554 .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS, 1555 .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE, 1556 .dcs = WMI_10_4_PDEV_PARAM_DCS, 1557 .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE, 1558 .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD, 1559 .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD, 1560 .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL, 1561 .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL, 1562 .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN, 1563 .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA, 1564 .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG, 1565 .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP, 1566 .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET, 1567 .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR, 1568 .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE, 1569 .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD, 1570 .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST, 1571 .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE, 1572 .smart_antenna_default_antenna = 1573 WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, 1574 .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE, 1575 .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID, 1576 .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN, 1577 .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER, 1578 .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID, 1579 .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE, 1580 .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE, 1581 .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, 1582 .remove_mcast2ucast_buffer = 1583 WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, 1584 .peer_sta_ps_statechg_enable = 1585 WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE, 1586 .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, 1587 .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS, 1588 .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID, 1589 .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID, 1590 .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID, 1591 .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, 1592 .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID, 1593 .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID, 1594 .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS, 1595 .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY, 1596 .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION, 1597 .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD, 1598 .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE, 1599 .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO, 1600 .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH, 1601 .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION, 1602 .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN, 1603 .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT, 1604 .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL, 1605 .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G, 1606 .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G, 1607 .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU, 1608 .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU, 1609 .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD, 1610 .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE, 1611 .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET, 1612 .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, 1613 .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, 1614 .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, 1615 .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, 1616 }; 1617 1618 static const u8 wmi_key_cipher_suites[] = { 1619 [WMI_CIPHER_NONE] = WMI_CIPHER_NONE, 1620 [WMI_CIPHER_WEP] = WMI_CIPHER_WEP, 1621 [WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP, 1622 [WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB, 1623 [WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM, 1624 [WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI, 1625 [WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP, 1626 [WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC, 1627 [WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM, 1628 }; 1629 1630 static const u8 wmi_tlv_key_cipher_suites[] = { 1631 [WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE, 1632 [WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP, 1633 [WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP, 1634 [WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB, 1635 [WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM, 1636 [WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI, 1637 [WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP, 1638 [WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC, 1639 [WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM, 1640 }; 1641 1642 static const struct wmi_peer_flags_map wmi_peer_flags_map = { 1643 .auth = WMI_PEER_AUTH, 1644 .qos = WMI_PEER_QOS, 1645 .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY, 1646 .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY, 1647 .apsd = WMI_PEER_APSD, 1648 .ht = WMI_PEER_HT, 1649 .bw40 = WMI_PEER_40MHZ, 1650 .stbc = WMI_PEER_STBC, 1651 .ldbc = WMI_PEER_LDPC, 1652 .dyn_mimops = WMI_PEER_DYN_MIMOPS, 1653 .static_mimops = WMI_PEER_STATIC_MIMOPS, 1654 .spatial_mux = WMI_PEER_SPATIAL_MUX, 1655 .vht = WMI_PEER_VHT, 1656 .bw80 = WMI_PEER_80MHZ, 1657 .vht_2g = WMI_PEER_VHT_2G, 1658 .pmf = WMI_PEER_PMF, 1659 .bw160 = WMI_PEER_160MHZ, 1660 }; 1661 1662 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { 1663 .auth = WMI_10X_PEER_AUTH, 1664 .qos = WMI_10X_PEER_QOS, 1665 .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY, 1666 .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY, 1667 .apsd = WMI_10X_PEER_APSD, 1668 .ht = WMI_10X_PEER_HT, 1669 .bw40 = WMI_10X_PEER_40MHZ, 1670 .stbc = WMI_10X_PEER_STBC, 1671 .ldbc = WMI_10X_PEER_LDPC, 1672 .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS, 1673 .static_mimops = WMI_10X_PEER_STATIC_MIMOPS, 1674 .spatial_mux = WMI_10X_PEER_SPATIAL_MUX, 1675 .vht = WMI_10X_PEER_VHT, 1676 .bw80 = WMI_10X_PEER_80MHZ, 1677 .bw160 = WMI_10X_PEER_160MHZ, 1678 }; 1679 1680 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { 1681 .auth = WMI_10_2_PEER_AUTH, 1682 .qos = WMI_10_2_PEER_QOS, 1683 .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY, 1684 .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY, 1685 .apsd = WMI_10_2_PEER_APSD, 1686 .ht = WMI_10_2_PEER_HT, 1687 .bw40 = WMI_10_2_PEER_40MHZ, 1688 .stbc = WMI_10_2_PEER_STBC, 1689 .ldbc = WMI_10_2_PEER_LDPC, 1690 .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS, 1691 .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS, 1692 .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX, 1693 .vht = WMI_10_2_PEER_VHT, 1694 .bw80 = WMI_10_2_PEER_80MHZ, 1695 .vht_2g = WMI_10_2_PEER_VHT_2G, 1696 .pmf = WMI_10_2_PEER_PMF, 1697 .bw160 = WMI_10_2_PEER_160MHZ, 1698 }; 1699 1700 void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, 1701 const struct wmi_channel_arg *arg) 1702 { 1703 u32 flags = 0; 1704 struct ieee80211_channel *chan = NULL; 1705 1706 memset(ch, 0, sizeof(*ch)); 1707 1708 if (arg->passive) 1709 flags |= WMI_CHAN_FLAG_PASSIVE; 1710 if (arg->allow_ibss) 1711 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; 1712 if (arg->allow_ht) 1713 flags |= WMI_CHAN_FLAG_ALLOW_HT; 1714 if (arg->allow_vht) 1715 flags |= WMI_CHAN_FLAG_ALLOW_VHT; 1716 if (arg->ht40plus) 1717 flags |= WMI_CHAN_FLAG_HT40_PLUS; 1718 if (arg->chan_radar) 1719 flags |= WMI_CHAN_FLAG_DFS; 1720 1721 ch->band_center_freq2 = 0; 1722 ch->mhz = __cpu_to_le32(arg->freq); 1723 ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1); 1724 if (arg->mode == MODE_11AC_VHT80_80) { 1725 ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2); 1726 chan = ieee80211_get_channel(ar->hw->wiphy, 1727 arg->band_center_freq2 - 10); 1728 } 1729 1730 if (arg->mode == MODE_11AC_VHT160) { 1731 u32 band_center_freq1; 1732 u32 band_center_freq2; 1733 1734 if (arg->freq > arg->band_center_freq1) { 1735 band_center_freq1 = arg->band_center_freq1 + 40; 1736 band_center_freq2 = arg->band_center_freq1 - 40; 1737 } else { 1738 band_center_freq1 = arg->band_center_freq1 - 40; 1739 band_center_freq2 = arg->band_center_freq1 + 40; 1740 } 1741 1742 ch->band_center_freq1 = 1743 __cpu_to_le32(band_center_freq1); 1744 /* Minus 10 to get a defined 5G channel frequency*/ 1745 chan = ieee80211_get_channel(ar->hw->wiphy, 1746 band_center_freq2 - 10); 1747 /* The center frequency of the entire VHT160 */ 1748 ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1); 1749 } 1750 1751 if (chan && chan->flags & IEEE80211_CHAN_RADAR) 1752 flags |= WMI_CHAN_FLAG_DFS_CFREQ2; 1753 1754 ch->min_power = arg->min_power; 1755 ch->max_power = arg->max_power; 1756 ch->reg_power = arg->max_reg_power; 1757 ch->antenna_max = arg->max_antenna_gain; 1758 ch->max_tx_power = arg->max_power; 1759 1760 /* mode & flags share storage */ 1761 ch->mode = arg->mode; 1762 ch->flags |= __cpu_to_le32(flags); 1763 } 1764 1765 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 1766 { 1767 unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ; 1768 unsigned long time_left, i; 1769 1770 /* Sometimes the PCI HIF doesn't receive interrupt 1771 * for the service ready message even if the buffer 1772 * was completed. PCIe sniffer shows that it's 1773 * because the corresponding CE ring doesn't fires 1774 * it. Workaround here by polling CE rings. Since 1775 * the message could arrive at any time, continue 1776 * polling until timeout. 1777 */ 1778 do { 1779 for (i = 0; i < CE_COUNT; i++) 1780 ath10k_hif_send_complete_check(ar, i, 1); 1781 1782 /* The 100 ms granularity is a tradeoff considering scheduler 1783 * overhead and response latency 1784 */ 1785 time_left = wait_for_completion_timeout(&ar->wmi.service_ready, 1786 msecs_to_jiffies(100)); 1787 if (time_left) 1788 return 0; 1789 } while (time_before(jiffies, timeout)); 1790 1791 ath10k_warn(ar, "failed to receive service ready completion\n"); 1792 return -ETIMEDOUT; 1793 } 1794 1795 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) 1796 { 1797 unsigned long time_left; 1798 1799 time_left = wait_for_completion_timeout(&ar->wmi.unified_ready, 1800 WMI_UNIFIED_READY_TIMEOUT_HZ); 1801 if (!time_left) 1802 return -ETIMEDOUT; 1803 return 0; 1804 } 1805 1806 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len) 1807 { 1808 struct sk_buff *skb; 1809 u32 round_len = roundup(len, 4); 1810 1811 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len); 1812 if (!skb) 1813 return NULL; 1814 1815 skb_reserve(skb, WMI_SKB_HEADROOM); 1816 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 1817 ath10k_warn(ar, "Unaligned WMI skb\n"); 1818 1819 skb_put(skb, round_len); 1820 memset(skb->data, 0, round_len); 1821 1822 return skb; 1823 } 1824 1825 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 1826 { 1827 dev_kfree_skb(skb); 1828 } 1829 1830 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, 1831 u32 cmd_id) 1832 { 1833 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 1834 struct wmi_cmd_hdr *cmd_hdr; 1835 int ret; 1836 u32 cmd = 0; 1837 1838 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 1839 return -ENOMEM; 1840 1841 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID); 1842 1843 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 1844 cmd_hdr->cmd_id = __cpu_to_le32(cmd); 1845 1846 memset(skb_cb, 0, sizeof(*skb_cb)); 1847 trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len); 1848 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); 1849 1850 if (ret) 1851 goto err_pull; 1852 1853 return 0; 1854 1855 err_pull: 1856 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 1857 return ret; 1858 } 1859 1860 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) 1861 { 1862 struct ath10k *ar = arvif->ar; 1863 struct ath10k_skb_cb *cb; 1864 struct sk_buff *bcn; 1865 bool dtim_zero; 1866 bool deliver_cab; 1867 int ret; 1868 1869 spin_lock_bh(&ar->data_lock); 1870 1871 bcn = arvif->beacon; 1872 1873 if (!bcn) 1874 goto unlock; 1875 1876 cb = ATH10K_SKB_CB(bcn); 1877 1878 switch (arvif->beacon_state) { 1879 case ATH10K_BEACON_SENDING: 1880 case ATH10K_BEACON_SENT: 1881 break; 1882 case ATH10K_BEACON_SCHEDULED: 1883 arvif->beacon_state = ATH10K_BEACON_SENDING; 1884 spin_unlock_bh(&ar->data_lock); 1885 1886 dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO); 1887 deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB); 1888 ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar, 1889 arvif->vdev_id, 1890 bcn->data, bcn->len, 1891 cb->paddr, 1892 dtim_zero, 1893 deliver_cab); 1894 1895 spin_lock_bh(&ar->data_lock); 1896 1897 if (ret == 0) 1898 arvif->beacon_state = ATH10K_BEACON_SENT; 1899 else 1900 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 1901 } 1902 1903 unlock: 1904 spin_unlock_bh(&ar->data_lock); 1905 } 1906 1907 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, 1908 struct ieee80211_vif *vif) 1909 { 1910 struct ath10k_vif *arvif = (void *)vif->drv_priv; 1911 1912 ath10k_wmi_tx_beacon_nowait(arvif); 1913 } 1914 1915 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) 1916 { 1917 ieee80211_iterate_active_interfaces_atomic(ar->hw, 1918 ATH10K_ITER_NORMAL_FLAGS, 1919 ath10k_wmi_tx_beacons_iter, 1920 NULL); 1921 } 1922 1923 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) 1924 { 1925 /* try to send pending beacons first. they take priority */ 1926 ath10k_wmi_tx_beacons_nowait(ar); 1927 1928 wake_up(&ar->wmi.tx_credits_wq); 1929 } 1930 1931 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) 1932 { 1933 int ret = -EOPNOTSUPP; 1934 1935 might_sleep(); 1936 1937 if (cmd_id == WMI_CMD_UNSUPPORTED) { 1938 ath10k_warn(ar, "wmi command %d is not supported by firmware\n", 1939 cmd_id); 1940 return ret; 1941 } 1942 1943 wait_event_timeout(ar->wmi.tx_credits_wq, ({ 1944 if (ar->state == ATH10K_STATE_WEDGED) { 1945 ret = -ESHUTDOWN; 1946 ath10k_dbg(ar, ATH10K_DBG_WMI, 1947 "drop wmi command %d, hardware is wedged\n", cmd_id); 1948 } 1949 /* try to send pending beacons first. they take priority */ 1950 ath10k_wmi_tx_beacons_nowait(ar); 1951 1952 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); 1953 1954 if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 1955 ret = -ESHUTDOWN; 1956 1957 (ret != -EAGAIN); 1958 }), 3 * HZ); 1959 1960 if (ret) 1961 dev_kfree_skb_any(skb); 1962 1963 if (ret == -EAGAIN) { 1964 ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n", 1965 cmd_id); 1966 ath10k_core_start_recovery(ar); 1967 } 1968 1969 return ret; 1970 } 1971 1972 static struct sk_buff * 1973 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 1974 { 1975 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); 1976 struct ath10k_vif *arvif; 1977 struct wmi_mgmt_tx_cmd *cmd; 1978 struct ieee80211_hdr *hdr; 1979 struct sk_buff *skb; 1980 int len; 1981 u32 vdev_id; 1982 u32 buf_len = msdu->len; 1983 u16 fc; 1984 const u8 *peer_addr; 1985 1986 hdr = (struct ieee80211_hdr *)msdu->data; 1987 fc = le16_to_cpu(hdr->frame_control); 1988 1989 if (cb->vif) { 1990 arvif = (void *)cb->vif->drv_priv; 1991 vdev_id = arvif->vdev_id; 1992 } else { 1993 vdev_id = 0; 1994 } 1995 1996 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) 1997 return ERR_PTR(-EINVAL); 1998 1999 len = sizeof(cmd->hdr) + msdu->len; 2000 2001 if ((ieee80211_is_action(hdr->frame_control) || 2002 ieee80211_is_deauth(hdr->frame_control) || 2003 ieee80211_is_disassoc(hdr->frame_control)) && 2004 ieee80211_has_protected(hdr->frame_control)) { 2005 peer_addr = hdr->addr1; 2006 if (is_multicast_ether_addr(peer_addr)) { 2007 len += sizeof(struct ieee80211_mmie_16); 2008 buf_len += sizeof(struct ieee80211_mmie_16); 2009 } else { 2010 if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP || 2011 cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) { 2012 len += IEEE80211_GCMP_MIC_LEN; 2013 buf_len += IEEE80211_GCMP_MIC_LEN; 2014 } else { 2015 len += IEEE80211_CCMP_MIC_LEN; 2016 buf_len += IEEE80211_CCMP_MIC_LEN; 2017 } 2018 } 2019 } 2020 2021 len = round_up(len, 4); 2022 2023 skb = ath10k_wmi_alloc_skb(ar, len); 2024 if (!skb) 2025 return ERR_PTR(-ENOMEM); 2026 2027 cmd = (struct wmi_mgmt_tx_cmd *)skb->data; 2028 2029 cmd->hdr.vdev_id = __cpu_to_le32(vdev_id); 2030 cmd->hdr.tx_rate = 0; 2031 cmd->hdr.tx_power = 0; 2032 cmd->hdr.buf_len = __cpu_to_le32(buf_len); 2033 2034 ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr)); 2035 memcpy(cmd->buf, msdu->data, msdu->len); 2036 2037 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", 2038 msdu, skb->len, fc & IEEE80211_FCTL_FTYPE, 2039 fc & IEEE80211_FCTL_STYPE); 2040 trace_ath10k_tx_hdr(ar, skb->data, skb->len); 2041 trace_ath10k_tx_payload(ar, skb->data, skb->len); 2042 2043 return skb; 2044 } 2045 2046 static void ath10k_wmi_event_scan_started(struct ath10k *ar) 2047 { 2048 lockdep_assert_held(&ar->data_lock); 2049 2050 switch (ar->scan.state) { 2051 case ATH10K_SCAN_IDLE: 2052 case ATH10K_SCAN_RUNNING: 2053 case ATH10K_SCAN_ABORTING: 2054 ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n", 2055 ath10k_scan_state_str(ar->scan.state), 2056 ar->scan.state); 2057 break; 2058 case ATH10K_SCAN_STARTING: 2059 ar->scan.state = ATH10K_SCAN_RUNNING; 2060 2061 if (ar->scan.is_roc) 2062 ieee80211_ready_on_channel(ar->hw); 2063 2064 complete(&ar->scan.started); 2065 break; 2066 } 2067 } 2068 2069 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar) 2070 { 2071 lockdep_assert_held(&ar->data_lock); 2072 2073 switch (ar->scan.state) { 2074 case ATH10K_SCAN_IDLE: 2075 case ATH10K_SCAN_RUNNING: 2076 case ATH10K_SCAN_ABORTING: 2077 ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n", 2078 ath10k_scan_state_str(ar->scan.state), 2079 ar->scan.state); 2080 break; 2081 case ATH10K_SCAN_STARTING: 2082 complete(&ar->scan.started); 2083 __ath10k_scan_finish(ar); 2084 break; 2085 } 2086 } 2087 2088 static void ath10k_wmi_event_scan_completed(struct ath10k *ar) 2089 { 2090 lockdep_assert_held(&ar->data_lock); 2091 2092 switch (ar->scan.state) { 2093 case ATH10K_SCAN_IDLE: 2094 case ATH10K_SCAN_STARTING: 2095 /* One suspected reason scan can be completed while starting is 2096 * if firmware fails to deliver all scan events to the host, 2097 * e.g. when transport pipe is full. This has been observed 2098 * with spectral scan phyerr events starving wmi transport 2099 * pipe. In such case the "scan completed" event should be (and 2100 * is) ignored by the host as it may be just firmware's scan 2101 * state machine recovering. 2102 */ 2103 ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n", 2104 ath10k_scan_state_str(ar->scan.state), 2105 ar->scan.state); 2106 break; 2107 case ATH10K_SCAN_RUNNING: 2108 case ATH10K_SCAN_ABORTING: 2109 __ath10k_scan_finish(ar); 2110 break; 2111 } 2112 } 2113 2114 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar) 2115 { 2116 lockdep_assert_held(&ar->data_lock); 2117 2118 switch (ar->scan.state) { 2119 case ATH10K_SCAN_IDLE: 2120 case ATH10K_SCAN_STARTING: 2121 ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n", 2122 ath10k_scan_state_str(ar->scan.state), 2123 ar->scan.state); 2124 break; 2125 case ATH10K_SCAN_RUNNING: 2126 case ATH10K_SCAN_ABORTING: 2127 ar->scan_channel = NULL; 2128 break; 2129 } 2130 } 2131 2132 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq) 2133 { 2134 lockdep_assert_held(&ar->data_lock); 2135 2136 switch (ar->scan.state) { 2137 case ATH10K_SCAN_IDLE: 2138 case ATH10K_SCAN_STARTING: 2139 ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n", 2140 ath10k_scan_state_str(ar->scan.state), 2141 ar->scan.state); 2142 break; 2143 case ATH10K_SCAN_RUNNING: 2144 case ATH10K_SCAN_ABORTING: 2145 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); 2146 2147 if (ar->scan.is_roc && ar->scan.roc_freq == freq) 2148 complete(&ar->scan.on_channel); 2149 break; 2150 } 2151 } 2152 2153 static const char * 2154 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, 2155 enum wmi_scan_completion_reason reason) 2156 { 2157 switch (type) { 2158 case WMI_SCAN_EVENT_STARTED: 2159 return "started"; 2160 case WMI_SCAN_EVENT_COMPLETED: 2161 switch (reason) { 2162 case WMI_SCAN_REASON_COMPLETED: 2163 return "completed"; 2164 case WMI_SCAN_REASON_CANCELLED: 2165 return "completed [cancelled]"; 2166 case WMI_SCAN_REASON_PREEMPTED: 2167 return "completed [preempted]"; 2168 case WMI_SCAN_REASON_TIMEDOUT: 2169 return "completed [timedout]"; 2170 case WMI_SCAN_REASON_INTERNAL_FAILURE: 2171 return "completed [internal err]"; 2172 case WMI_SCAN_REASON_MAX: 2173 break; 2174 } 2175 return "completed [unknown]"; 2176 case WMI_SCAN_EVENT_BSS_CHANNEL: 2177 return "bss channel"; 2178 case WMI_SCAN_EVENT_FOREIGN_CHANNEL: 2179 return "foreign channel"; 2180 case WMI_SCAN_EVENT_DEQUEUED: 2181 return "dequeued"; 2182 case WMI_SCAN_EVENT_PREEMPTED: 2183 return "preempted"; 2184 case WMI_SCAN_EVENT_START_FAILED: 2185 return "start failed"; 2186 case WMI_SCAN_EVENT_RESTARTED: 2187 return "restarted"; 2188 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: 2189 return "foreign channel exit"; 2190 default: 2191 return "unknown"; 2192 } 2193 } 2194 2195 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb, 2196 struct wmi_scan_ev_arg *arg) 2197 { 2198 struct wmi_scan_event *ev = (void *)skb->data; 2199 2200 if (skb->len < sizeof(*ev)) 2201 return -EPROTO; 2202 2203 skb_pull(skb, sizeof(*ev)); 2204 arg->event_type = ev->event_type; 2205 arg->reason = ev->reason; 2206 arg->channel_freq = ev->channel_freq; 2207 arg->scan_req_id = ev->scan_req_id; 2208 arg->scan_id = ev->scan_id; 2209 arg->vdev_id = ev->vdev_id; 2210 2211 return 0; 2212 } 2213 2214 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 2215 { 2216 struct wmi_scan_ev_arg arg = {}; 2217 enum wmi_scan_event_type event_type; 2218 enum wmi_scan_completion_reason reason; 2219 u32 freq; 2220 u32 req_id; 2221 u32 scan_id; 2222 u32 vdev_id; 2223 int ret; 2224 2225 ret = ath10k_wmi_pull_scan(ar, skb, &arg); 2226 if (ret) { 2227 ath10k_warn(ar, "failed to parse scan event: %d\n", ret); 2228 return ret; 2229 } 2230 2231 event_type = __le32_to_cpu(arg.event_type); 2232 reason = __le32_to_cpu(arg.reason); 2233 freq = __le32_to_cpu(arg.channel_freq); 2234 req_id = __le32_to_cpu(arg.scan_req_id); 2235 scan_id = __le32_to_cpu(arg.scan_id); 2236 vdev_id = __le32_to_cpu(arg.vdev_id); 2237 2238 spin_lock_bh(&ar->data_lock); 2239 2240 ath10k_dbg(ar, ATH10K_DBG_WMI, 2241 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 2242 ath10k_wmi_event_scan_type_str(event_type, reason), 2243 event_type, reason, freq, req_id, scan_id, vdev_id, 2244 ath10k_scan_state_str(ar->scan.state), ar->scan.state); 2245 2246 switch (event_type) { 2247 case WMI_SCAN_EVENT_STARTED: 2248 ath10k_wmi_event_scan_started(ar); 2249 break; 2250 case WMI_SCAN_EVENT_COMPLETED: 2251 ath10k_wmi_event_scan_completed(ar); 2252 break; 2253 case WMI_SCAN_EVENT_BSS_CHANNEL: 2254 ath10k_wmi_event_scan_bss_chan(ar); 2255 break; 2256 case WMI_SCAN_EVENT_FOREIGN_CHANNEL: 2257 ath10k_wmi_event_scan_foreign_chan(ar, freq); 2258 break; 2259 case WMI_SCAN_EVENT_START_FAILED: 2260 ath10k_warn(ar, "received scan start failure event\n"); 2261 ath10k_wmi_event_scan_start_failed(ar); 2262 break; 2263 case WMI_SCAN_EVENT_DEQUEUED: 2264 case WMI_SCAN_EVENT_PREEMPTED: 2265 case WMI_SCAN_EVENT_RESTARTED: 2266 case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: 2267 default: 2268 break; 2269 } 2270 2271 spin_unlock_bh(&ar->data_lock); 2272 return 0; 2273 } 2274 2275 /* If keys are configured, HW decrypts all frames 2276 * with protected bit set. Mark such frames as decrypted. 2277 */ 2278 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar, 2279 struct sk_buff *skb, 2280 struct ieee80211_rx_status *status) 2281 { 2282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2283 unsigned int hdrlen; 2284 bool peer_key; 2285 u8 *addr, keyidx; 2286 2287 if (!ieee80211_is_auth(hdr->frame_control) || 2288 !ieee80211_has_protected(hdr->frame_control)) 2289 return; 2290 2291 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2292 if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN)) 2293 return; 2294 2295 keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT; 2296 addr = ieee80211_get_SA(hdr); 2297 2298 spin_lock_bh(&ar->data_lock); 2299 peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx); 2300 spin_unlock_bh(&ar->data_lock); 2301 2302 if (peer_key) { 2303 ath10k_dbg(ar, ATH10K_DBG_MAC, 2304 "mac wep key present for peer %pM\n", addr); 2305 status->flag |= RX_FLAG_DECRYPTED; 2306 } 2307 } 2308 2309 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, 2310 struct wmi_mgmt_rx_ev_arg *arg) 2311 { 2312 struct wmi_mgmt_rx_event_v1 *ev_v1; 2313 struct wmi_mgmt_rx_event_v2 *ev_v2; 2314 struct wmi_mgmt_rx_hdr_v1 *ev_hdr; 2315 struct wmi_mgmt_rx_ext_info *ext_info; 2316 size_t pull_len; 2317 u32 msdu_len; 2318 u32 len; 2319 2320 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, 2321 ar->running_fw->fw_file.fw_features)) { 2322 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; 2323 ev_hdr = &ev_v2->hdr.v1; 2324 pull_len = sizeof(*ev_v2); 2325 } else { 2326 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data; 2327 ev_hdr = &ev_v1->hdr; 2328 pull_len = sizeof(*ev_v1); 2329 } 2330 2331 if (skb->len < pull_len) 2332 return -EPROTO; 2333 2334 skb_pull(skb, pull_len); 2335 arg->channel = ev_hdr->channel; 2336 arg->buf_len = ev_hdr->buf_len; 2337 arg->status = ev_hdr->status; 2338 arg->snr = ev_hdr->snr; 2339 arg->phy_mode = ev_hdr->phy_mode; 2340 arg->rate = ev_hdr->rate; 2341 2342 msdu_len = __le32_to_cpu(arg->buf_len); 2343 if (skb->len < msdu_len) 2344 return -EPROTO; 2345 2346 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { 2347 len = ALIGN(le32_to_cpu(arg->buf_len), 4); 2348 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); 2349 memcpy(&arg->ext_info, ext_info, 2350 sizeof(struct wmi_mgmt_rx_ext_info)); 2351 } 2352 /* the WMI buffer might've ended up being padded to 4 bytes due to HTC 2353 * trailer with credit update. Trim the excess garbage. 2354 */ 2355 skb_trim(skb, msdu_len); 2356 2357 return 0; 2358 } 2359 2360 static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, 2361 struct sk_buff *skb, 2362 struct wmi_mgmt_rx_ev_arg *arg) 2363 { 2364 struct wmi_10_4_mgmt_rx_event *ev; 2365 struct wmi_10_4_mgmt_rx_hdr *ev_hdr; 2366 size_t pull_len; 2367 u32 msdu_len; 2368 struct wmi_mgmt_rx_ext_info *ext_info; 2369 u32 len; 2370 2371 ev = (struct wmi_10_4_mgmt_rx_event *)skb->data; 2372 ev_hdr = &ev->hdr; 2373 pull_len = sizeof(*ev); 2374 2375 if (skb->len < pull_len) 2376 return -EPROTO; 2377 2378 skb_pull(skb, pull_len); 2379 arg->channel = ev_hdr->channel; 2380 arg->buf_len = ev_hdr->buf_len; 2381 arg->status = ev_hdr->status; 2382 arg->snr = ev_hdr->snr; 2383 arg->phy_mode = ev_hdr->phy_mode; 2384 arg->rate = ev_hdr->rate; 2385 2386 msdu_len = __le32_to_cpu(arg->buf_len); 2387 if (skb->len < msdu_len) 2388 return -EPROTO; 2389 2390 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) { 2391 len = ALIGN(le32_to_cpu(arg->buf_len), 4); 2392 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len); 2393 memcpy(&arg->ext_info, ext_info, 2394 sizeof(struct wmi_mgmt_rx_ext_info)); 2395 } 2396 2397 /* Make sure bytes added for padding are removed. */ 2398 skb_trim(skb, msdu_len); 2399 2400 return 0; 2401 } 2402 2403 static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar, 2404 struct ieee80211_hdr *hdr) 2405 { 2406 if (!ieee80211_has_protected(hdr->frame_control)) 2407 return false; 2408 2409 /* FW delivers WEP Shared Auth frame with Protected Bit set and 2410 * encrypted payload. However in case of PMF it delivers decrypted 2411 * frames with Protected Bit set. 2412 */ 2413 if (ieee80211_is_auth(hdr->frame_control)) 2414 return false; 2415 2416 /* qca99x0 based FW delivers broadcast or multicast management frames 2417 * (ex: group privacy action frames in mesh) as encrypted payload. 2418 */ 2419 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) && 2420 ar->hw_params.sw_decrypt_mcast_mgmt) 2421 return false; 2422 2423 return true; 2424 } 2425 2426 static int 2427 wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param) 2428 { 2429 struct ath10k_mgmt_tx_pkt_addr *pkt_addr; 2430 struct ath10k_wmi *wmi = &ar->wmi; 2431 struct ieee80211_tx_info *info; 2432 struct sk_buff *msdu; 2433 int ret; 2434 2435 spin_lock_bh(&ar->data_lock); 2436 2437 pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id); 2438 if (!pkt_addr) { 2439 ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n", 2440 param->desc_id); 2441 ret = -ENOENT; 2442 goto out; 2443 } 2444 2445 msdu = pkt_addr->vaddr; 2446 dma_unmap_single(ar->dev, pkt_addr->paddr, 2447 msdu->len, DMA_TO_DEVICE); 2448 info = IEEE80211_SKB_CB(msdu); 2449 kfree(pkt_addr); 2450 2451 if (param->status) { 2452 info->flags &= ~IEEE80211_TX_STAT_ACK; 2453 } else { 2454 info->flags |= IEEE80211_TX_STAT_ACK; 2455 info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + 2456 param->ack_rssi; 2457 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; 2458 } 2459 2460 ieee80211_tx_status_irqsafe(ar->hw, msdu); 2461 2462 ret = 0; 2463 2464 out: 2465 idr_remove(&wmi->mgmt_pending_tx, param->desc_id); 2466 spin_unlock_bh(&ar->data_lock); 2467 return ret; 2468 } 2469 2470 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb) 2471 { 2472 struct wmi_tlv_mgmt_tx_compl_ev_arg arg; 2473 struct mgmt_tx_compl_params param; 2474 int ret; 2475 2476 ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg); 2477 if (ret) { 2478 ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret); 2479 return ret; 2480 } 2481 2482 memset(¶m, 0, sizeof(struct mgmt_tx_compl_params)); 2483 param.desc_id = __le32_to_cpu(arg.desc_id); 2484 param.status = __le32_to_cpu(arg.status); 2485 2486 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 2487 param.ack_rssi = __le32_to_cpu(arg.ack_rssi); 2488 2489 wmi_process_mgmt_tx_comp(ar, ¶m); 2490 2491 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n"); 2492 2493 return 0; 2494 } 2495 2496 int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb) 2497 { 2498 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg; 2499 struct mgmt_tx_compl_params param; 2500 u32 num_reports; 2501 int i, ret; 2502 2503 ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg); 2504 if (ret) { 2505 ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret); 2506 return ret; 2507 } 2508 2509 num_reports = __le32_to_cpu(arg.num_reports); 2510 2511 for (i = 0; i < num_reports; i++) { 2512 memset(¶m, 0, sizeof(struct mgmt_tx_compl_params)); 2513 param.desc_id = __le32_to_cpu(arg.desc_ids[i]); 2514 param.status = __le32_to_cpu(arg.desc_ids[i]); 2515 2516 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 2517 param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]); 2518 wmi_process_mgmt_tx_comp(ar, ¶m); 2519 } 2520 2521 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n"); 2522 2523 return 0; 2524 } 2525 2526 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) 2527 { 2528 struct wmi_mgmt_rx_ev_arg arg = {}; 2529 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2530 struct ieee80211_hdr *hdr; 2531 struct ieee80211_supported_band *sband; 2532 u32 rx_status; 2533 u32 channel; 2534 u32 phy_mode; 2535 u32 snr, rssi; 2536 u32 rate; 2537 u16 fc; 2538 int ret, i; 2539 2540 ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg); 2541 if (ret) { 2542 ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret); 2543 dev_kfree_skb(skb); 2544 return ret; 2545 } 2546 2547 channel = __le32_to_cpu(arg.channel); 2548 rx_status = __le32_to_cpu(arg.status); 2549 snr = __le32_to_cpu(arg.snr); 2550 phy_mode = __le32_to_cpu(arg.phy_mode); 2551 rate = __le32_to_cpu(arg.rate); 2552 2553 memset(status, 0, sizeof(*status)); 2554 2555 ath10k_dbg(ar, ATH10K_DBG_MGMT, 2556 "event mgmt rx status %08x\n", rx_status); 2557 2558 if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) || 2559 (rx_status & (WMI_RX_STATUS_ERR_DECRYPT | 2560 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { 2561 dev_kfree_skb(skb); 2562 return 0; 2563 } 2564 2565 if (rx_status & WMI_RX_STATUS_ERR_MIC) 2566 status->flag |= RX_FLAG_MMIC_ERROR; 2567 2568 if (rx_status & WMI_RX_STATUS_EXT_INFO) { 2569 status->mactime = 2570 __le64_to_cpu(arg.ext_info.rx_mac_timestamp); 2571 status->flag |= RX_FLAG_MACTIME_END; 2572 } 2573 /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to 2574 * MODE_11B. This means phy_mode is not a reliable source for the band 2575 * of mgmt rx. 2576 */ 2577 if (channel >= 1 && channel <= 14) { 2578 status->band = NL80211_BAND_2GHZ; 2579 } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) { 2580 status->band = NL80211_BAND_5GHZ; 2581 } else { 2582 /* Shouldn't happen unless list of advertised channels to 2583 * mac80211 has been changed. 2584 */ 2585 WARN_ON_ONCE(1); 2586 dev_kfree_skb(skb); 2587 return 0; 2588 } 2589 2590 if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ) 2591 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); 2592 2593 sband = &ar->mac.sbands[status->band]; 2594 2595 status->freq = ieee80211_channel_to_frequency(channel, status->band); 2596 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; 2597 2598 BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi)); 2599 2600 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 2601 status->chains &= ~BIT(i); 2602 rssi = __le32_to_cpu(arg.rssi[i]); 2603 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]); 2604 2605 if (rssi != ATH10K_INVALID_RSSI && rssi != 0) { 2606 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi; 2607 status->chains |= BIT(i); 2608 } 2609 } 2610 2611 status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100); 2612 2613 hdr = (struct ieee80211_hdr *)skb->data; 2614 fc = le16_to_cpu(hdr->frame_control); 2615 2616 /* Firmware is guaranteed to report all essential management frames via 2617 * WMI while it can deliver some extra via HTT. Since there can be 2618 * duplicates split the reporting wrt monitor/sniffing. 2619 */ 2620 status->flag |= RX_FLAG_SKIP_MONITOR; 2621 2622 ath10k_wmi_handle_wep_reauth(ar, skb, status); 2623 2624 if (ath10k_wmi_rx_is_decrypted(ar, hdr)) { 2625 status->flag |= RX_FLAG_DECRYPTED; 2626 2627 if (!ieee80211_is_action(hdr->frame_control) && 2628 !ieee80211_is_deauth(hdr->frame_control) && 2629 !ieee80211_is_disassoc(hdr->frame_control)) { 2630 status->flag |= RX_FLAG_IV_STRIPPED | 2631 RX_FLAG_MMIC_STRIPPED; 2632 hdr->frame_control = __cpu_to_le16(fc & 2633 ~IEEE80211_FCTL_PROTECTED); 2634 } 2635 } 2636 2637 if (ieee80211_is_beacon(hdr->frame_control)) 2638 ath10k_mac_handle_beacon(ar, skb); 2639 2640 if (ieee80211_is_beacon(hdr->frame_control) || 2641 ieee80211_is_probe_resp(hdr->frame_control)) 2642 status->boottime_ns = ktime_get_boottime_ns(); 2643 2644 ath10k_dbg(ar, ATH10K_DBG_MGMT, 2645 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 2646 skb, skb->len, 2647 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 2648 2649 ath10k_dbg(ar, ATH10K_DBG_MGMT, 2650 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 2651 status->freq, status->band, status->signal, 2652 status->rate_idx); 2653 2654 ieee80211_rx_ni(ar->hw, skb); 2655 2656 return 0; 2657 } 2658 2659 static int freq_to_idx(struct ath10k *ar, int freq) 2660 { 2661 struct ieee80211_supported_band *sband; 2662 int band, ch, idx = 0; 2663 2664 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 2665 sband = ar->hw->wiphy->bands[band]; 2666 if (!sband) 2667 continue; 2668 2669 for (ch = 0; ch < sband->n_channels; ch++, idx++) 2670 if (sband->channels[ch].center_freq == freq) 2671 goto exit; 2672 } 2673 2674 exit: 2675 return idx; 2676 } 2677 2678 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb, 2679 struct wmi_ch_info_ev_arg *arg) 2680 { 2681 struct wmi_chan_info_event *ev = (void *)skb->data; 2682 2683 if (skb->len < sizeof(*ev)) 2684 return -EPROTO; 2685 2686 skb_pull(skb, sizeof(*ev)); 2687 arg->err_code = ev->err_code; 2688 arg->freq = ev->freq; 2689 arg->cmd_flags = ev->cmd_flags; 2690 arg->noise_floor = ev->noise_floor; 2691 arg->rx_clear_count = ev->rx_clear_count; 2692 arg->cycle_count = ev->cycle_count; 2693 2694 return 0; 2695 } 2696 2697 static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar, 2698 struct sk_buff *skb, 2699 struct wmi_ch_info_ev_arg *arg) 2700 { 2701 struct wmi_10_4_chan_info_event *ev = (void *)skb->data; 2702 2703 if (skb->len < sizeof(*ev)) 2704 return -EPROTO; 2705 2706 skb_pull(skb, sizeof(*ev)); 2707 arg->err_code = ev->err_code; 2708 arg->freq = ev->freq; 2709 arg->cmd_flags = ev->cmd_flags; 2710 arg->noise_floor = ev->noise_floor; 2711 arg->rx_clear_count = ev->rx_clear_count; 2712 arg->cycle_count = ev->cycle_count; 2713 arg->chan_tx_pwr_range = ev->chan_tx_pwr_range; 2714 arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; 2715 arg->rx_frame_count = ev->rx_frame_count; 2716 2717 return 0; 2718 } 2719 2720 /* 2721 * Handle the channel info event for firmware which only sends one 2722 * chan_info event per scanned channel. 2723 */ 2724 static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar, 2725 struct chan_info_params *params) 2726 { 2727 struct survey_info *survey; 2728 int idx; 2729 2730 if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { 2731 ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n"); 2732 return; 2733 } 2734 2735 idx = freq_to_idx(ar, params->freq); 2736 if (idx >= ARRAY_SIZE(ar->survey)) { 2737 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", 2738 params->freq, idx); 2739 return; 2740 } 2741 2742 survey = &ar->survey[idx]; 2743 2744 if (!params->mac_clk_mhz) 2745 return; 2746 2747 memset(survey, 0, sizeof(*survey)); 2748 2749 survey->noise = params->noise_floor; 2750 survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000; 2751 survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000; 2752 survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | 2753 SURVEY_INFO_TIME_BUSY; 2754 } 2755 2756 /* 2757 * Handle the channel info event for firmware which sends chan_info 2758 * event in pairs(start and stop events) for every scanned channel. 2759 */ 2760 static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar, 2761 struct chan_info_params *params) 2762 { 2763 struct survey_info *survey; 2764 int idx; 2765 2766 idx = freq_to_idx(ar, params->freq); 2767 if (idx >= ARRAY_SIZE(ar->survey)) { 2768 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", 2769 params->freq, idx); 2770 return; 2771 } 2772 2773 if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { 2774 if (ar->ch_info_can_report_survey) { 2775 survey = &ar->survey[idx]; 2776 survey->noise = params->noise_floor; 2777 survey->filled = SURVEY_INFO_NOISE_DBM; 2778 2779 ath10k_hw_fill_survey_time(ar, 2780 survey, 2781 params->cycle_count, 2782 params->rx_clear_count, 2783 ar->survey_last_cycle_count, 2784 ar->survey_last_rx_clear_count); 2785 } 2786 2787 ar->ch_info_can_report_survey = false; 2788 } else { 2789 ar->ch_info_can_report_survey = true; 2790 } 2791 2792 if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) { 2793 ar->survey_last_rx_clear_count = params->rx_clear_count; 2794 ar->survey_last_cycle_count = params->cycle_count; 2795 } 2796 } 2797 2798 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) 2799 { 2800 struct chan_info_params ch_info_param; 2801 struct wmi_ch_info_ev_arg arg = {}; 2802 int ret; 2803 2804 ret = ath10k_wmi_pull_ch_info(ar, skb, &arg); 2805 if (ret) { 2806 ath10k_warn(ar, "failed to parse chan info event: %d\n", ret); 2807 return; 2808 } 2809 2810 ch_info_param.err_code = __le32_to_cpu(arg.err_code); 2811 ch_info_param.freq = __le32_to_cpu(arg.freq); 2812 ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags); 2813 ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor); 2814 ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count); 2815 ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count); 2816 ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz); 2817 2818 ath10k_dbg(ar, ATH10K_DBG_WMI, 2819 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", 2820 ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags, 2821 ch_info_param.noise_floor, ch_info_param.rx_clear_count, 2822 ch_info_param.cycle_count); 2823 2824 spin_lock_bh(&ar->data_lock); 2825 2826 switch (ar->scan.state) { 2827 case ATH10K_SCAN_IDLE: 2828 case ATH10K_SCAN_STARTING: 2829 ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n"); 2830 goto exit; 2831 case ATH10K_SCAN_RUNNING: 2832 case ATH10K_SCAN_ABORTING: 2833 break; 2834 } 2835 2836 if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL, 2837 ar->running_fw->fw_file.fw_features)) 2838 ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param); 2839 else 2840 ath10k_wmi_event_chan_info_paired(ar, &ch_info_param); 2841 2842 exit: 2843 spin_unlock_bh(&ar->data_lock); 2844 } 2845 2846 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 2847 { 2848 struct wmi_echo_ev_arg arg = {}; 2849 int ret; 2850 2851 ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg); 2852 if (ret) { 2853 ath10k_warn(ar, "failed to parse echo: %d\n", ret); 2854 return; 2855 } 2856 2857 ath10k_dbg(ar, ATH10K_DBG_WMI, 2858 "wmi event echo value 0x%08x\n", 2859 le32_to_cpu(arg.value)); 2860 2861 if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID) 2862 complete(&ar->wmi.barrier); 2863 } 2864 2865 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) 2866 { 2867 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", 2868 skb->len); 2869 2870 trace_ath10k_wmi_dbglog(ar, skb->data, skb->len); 2871 2872 return 0; 2873 } 2874 2875 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, 2876 struct ath10k_fw_stats_pdev *dst) 2877 { 2878 dst->ch_noise_floor = __le32_to_cpu(src->chan_nf); 2879 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); 2880 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); 2881 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); 2882 dst->cycle_count = __le32_to_cpu(src->cycle_count); 2883 dst->phy_err_count = __le32_to_cpu(src->phy_err_count); 2884 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); 2885 } 2886 2887 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, 2888 struct ath10k_fw_stats_pdev *dst) 2889 { 2890 dst->comp_queued = __le32_to_cpu(src->comp_queued); 2891 dst->comp_delivered = __le32_to_cpu(src->comp_delivered); 2892 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued); 2893 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued); 2894 dst->wmm_drop = __le32_to_cpu(src->wmm_drop); 2895 dst->local_enqued = __le32_to_cpu(src->local_enqued); 2896 dst->local_freed = __le32_to_cpu(src->local_freed); 2897 dst->hw_queued = __le32_to_cpu(src->hw_queued); 2898 dst->hw_reaped = __le32_to_cpu(src->hw_reaped); 2899 dst->underrun = __le32_to_cpu(src->underrun); 2900 dst->tx_abort = __le32_to_cpu(src->tx_abort); 2901 dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued); 2902 dst->tx_ko = __le32_to_cpu(src->tx_ko); 2903 dst->data_rc = __le32_to_cpu(src->data_rc); 2904 dst->self_triggers = __le32_to_cpu(src->self_triggers); 2905 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); 2906 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); 2907 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); 2908 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); 2909 dst->pdev_resets = __le32_to_cpu(src->pdev_resets); 2910 dst->phy_underrun = __le32_to_cpu(src->phy_underrun); 2911 dst->txop_ovf = __le32_to_cpu(src->txop_ovf); 2912 } 2913 2914 static void 2915 ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src, 2916 struct ath10k_fw_stats_pdev *dst) 2917 { 2918 dst->comp_queued = __le32_to_cpu(src->comp_queued); 2919 dst->comp_delivered = __le32_to_cpu(src->comp_delivered); 2920 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued); 2921 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued); 2922 dst->wmm_drop = __le32_to_cpu(src->wmm_drop); 2923 dst->local_enqued = __le32_to_cpu(src->local_enqued); 2924 dst->local_freed = __le32_to_cpu(src->local_freed); 2925 dst->hw_queued = __le32_to_cpu(src->hw_queued); 2926 dst->hw_reaped = __le32_to_cpu(src->hw_reaped); 2927 dst->underrun = __le32_to_cpu(src->underrun); 2928 dst->tx_abort = __le32_to_cpu(src->tx_abort); 2929 dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued); 2930 dst->tx_ko = __le32_to_cpu(src->tx_ko); 2931 dst->data_rc = __le32_to_cpu(src->data_rc); 2932 dst->self_triggers = __le32_to_cpu(src->self_triggers); 2933 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); 2934 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); 2935 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); 2936 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); 2937 dst->pdev_resets = __le32_to_cpu(src->pdev_resets); 2938 dst->phy_underrun = __le32_to_cpu(src->phy_underrun); 2939 dst->txop_ovf = __le32_to_cpu(src->txop_ovf); 2940 dst->hw_paused = __le32_to_cpu(src->hw_paused); 2941 dst->seq_posted = __le32_to_cpu(src->seq_posted); 2942 dst->seq_failed_queueing = 2943 __le32_to_cpu(src->seq_failed_queueing); 2944 dst->seq_completed = __le32_to_cpu(src->seq_completed); 2945 dst->seq_restarted = __le32_to_cpu(src->seq_restarted); 2946 dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted); 2947 dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush); 2948 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter); 2949 dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated); 2950 dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed); 2951 dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter); 2952 dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired); 2953 } 2954 2955 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, 2956 struct ath10k_fw_stats_pdev *dst) 2957 { 2958 dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change); 2959 dst->status_rcvd = __le32_to_cpu(src->status_rcvd); 2960 dst->r0_frags = __le32_to_cpu(src->r0_frags); 2961 dst->r1_frags = __le32_to_cpu(src->r1_frags); 2962 dst->r2_frags = __le32_to_cpu(src->r2_frags); 2963 dst->r3_frags = __le32_to_cpu(src->r3_frags); 2964 dst->htt_msdus = __le32_to_cpu(src->htt_msdus); 2965 dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus); 2966 dst->loc_msdus = __le32_to_cpu(src->loc_msdus); 2967 dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus); 2968 dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu); 2969 dst->phy_errs = __le32_to_cpu(src->phy_errs); 2970 dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop); 2971 dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs); 2972 } 2973 2974 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src, 2975 struct ath10k_fw_stats_pdev *dst) 2976 { 2977 dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad); 2978 dst->rts_bad = __le32_to_cpu(src->rts_bad); 2979 dst->rts_good = __le32_to_cpu(src->rts_good); 2980 dst->fcs_bad = __le32_to_cpu(src->fcs_bad); 2981 dst->no_beacons = __le32_to_cpu(src->no_beacons); 2982 dst->mib_int_count = __le32_to_cpu(src->mib_int_count); 2983 } 2984 2985 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, 2986 struct ath10k_fw_stats_peer *dst) 2987 { 2988 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); 2989 dst->peer_rssi = __le32_to_cpu(src->peer_rssi); 2990 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); 2991 } 2992 2993 static void 2994 ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src, 2995 struct ath10k_fw_stats_peer *dst) 2996 { 2997 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); 2998 dst->peer_rssi = __le32_to_cpu(src->peer_rssi); 2999 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); 3000 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); 3001 } 3002 3003 static void 3004 ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src, 3005 struct ath10k_fw_stats_vdev_extd *dst) 3006 { 3007 dst->vdev_id = __le32_to_cpu(src->vdev_id); 3008 dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt); 3009 dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack); 3010 dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued); 3011 dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt); 3012 dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued); 3013 dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry); 3014 dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry); 3015 dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry); 3016 dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc); 3017 dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry); 3018 dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail); 3019 dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt); 3020 dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt); 3021 dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt); 3022 dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt); 3023 } 3024 3025 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, 3026 struct sk_buff *skb, 3027 struct ath10k_fw_stats *stats) 3028 { 3029 const struct wmi_stats_event *ev = (void *)skb->data; 3030 u32 num_pdev_stats, num_peer_stats; 3031 int i; 3032 3033 if (!skb_pull(skb, sizeof(*ev))) 3034 return -EPROTO; 3035 3036 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); 3037 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 3038 3039 for (i = 0; i < num_pdev_stats; i++) { 3040 const struct wmi_pdev_stats *src; 3041 struct ath10k_fw_stats_pdev *dst; 3042 3043 src = (void *)skb->data; 3044 if (!skb_pull(skb, sizeof(*src))) 3045 return -EPROTO; 3046 3047 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3048 if (!dst) 3049 continue; 3050 3051 ath10k_wmi_pull_pdev_stats_base(&src->base, dst); 3052 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); 3053 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); 3054 3055 list_add_tail(&dst->list, &stats->pdevs); 3056 } 3057 3058 /* fw doesn't implement vdev stats */ 3059 3060 for (i = 0; i < num_peer_stats; i++) { 3061 const struct wmi_peer_stats *src; 3062 struct ath10k_fw_stats_peer *dst; 3063 3064 src = (void *)skb->data; 3065 if (!skb_pull(skb, sizeof(*src))) 3066 return -EPROTO; 3067 3068 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3069 if (!dst) 3070 continue; 3071 3072 ath10k_wmi_pull_peer_stats(src, dst); 3073 list_add_tail(&dst->list, &stats->peers); 3074 } 3075 3076 return 0; 3077 } 3078 3079 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar, 3080 struct sk_buff *skb, 3081 struct ath10k_fw_stats *stats) 3082 { 3083 const struct wmi_stats_event *ev = (void *)skb->data; 3084 u32 num_pdev_stats, num_peer_stats; 3085 int i; 3086 3087 if (!skb_pull(skb, sizeof(*ev))) 3088 return -EPROTO; 3089 3090 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); 3091 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 3092 3093 for (i = 0; i < num_pdev_stats; i++) { 3094 const struct wmi_10x_pdev_stats *src; 3095 struct ath10k_fw_stats_pdev *dst; 3096 3097 src = (void *)skb->data; 3098 if (!skb_pull(skb, sizeof(*src))) 3099 return -EPROTO; 3100 3101 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3102 if (!dst) 3103 continue; 3104 3105 ath10k_wmi_pull_pdev_stats_base(&src->base, dst); 3106 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); 3107 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); 3108 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); 3109 3110 list_add_tail(&dst->list, &stats->pdevs); 3111 } 3112 3113 /* fw doesn't implement vdev stats */ 3114 3115 for (i = 0; i < num_peer_stats; i++) { 3116 const struct wmi_10x_peer_stats *src; 3117 struct ath10k_fw_stats_peer *dst; 3118 3119 src = (void *)skb->data; 3120 if (!skb_pull(skb, sizeof(*src))) 3121 return -EPROTO; 3122 3123 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3124 if (!dst) 3125 continue; 3126 3127 ath10k_wmi_pull_peer_stats(&src->old, dst); 3128 3129 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); 3130 3131 list_add_tail(&dst->list, &stats->peers); 3132 } 3133 3134 return 0; 3135 } 3136 3137 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, 3138 struct sk_buff *skb, 3139 struct ath10k_fw_stats *stats) 3140 { 3141 const struct wmi_10_2_stats_event *ev = (void *)skb->data; 3142 u32 num_pdev_stats; 3143 u32 num_pdev_ext_stats; 3144 u32 num_peer_stats; 3145 int i; 3146 3147 if (!skb_pull(skb, sizeof(*ev))) 3148 return -EPROTO; 3149 3150 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); 3151 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); 3152 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 3153 3154 for (i = 0; i < num_pdev_stats; i++) { 3155 const struct wmi_10_2_pdev_stats *src; 3156 struct ath10k_fw_stats_pdev *dst; 3157 3158 src = (void *)skb->data; 3159 if (!skb_pull(skb, sizeof(*src))) 3160 return -EPROTO; 3161 3162 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3163 if (!dst) 3164 continue; 3165 3166 ath10k_wmi_pull_pdev_stats_base(&src->base, dst); 3167 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); 3168 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); 3169 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); 3170 /* FIXME: expose 10.2 specific values */ 3171 3172 list_add_tail(&dst->list, &stats->pdevs); 3173 } 3174 3175 for (i = 0; i < num_pdev_ext_stats; i++) { 3176 const struct wmi_10_2_pdev_ext_stats *src; 3177 3178 src = (void *)skb->data; 3179 if (!skb_pull(skb, sizeof(*src))) 3180 return -EPROTO; 3181 3182 /* FIXME: expose values to userspace 3183 * 3184 * Note: Even though this loop seems to do nothing it is 3185 * required to parse following sub-structures properly. 3186 */ 3187 } 3188 3189 /* fw doesn't implement vdev stats */ 3190 3191 for (i = 0; i < num_peer_stats; i++) { 3192 const struct wmi_10_2_peer_stats *src; 3193 struct ath10k_fw_stats_peer *dst; 3194 3195 src = (void *)skb->data; 3196 if (!skb_pull(skb, sizeof(*src))) 3197 return -EPROTO; 3198 3199 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3200 if (!dst) 3201 continue; 3202 3203 ath10k_wmi_pull_peer_stats(&src->old, dst); 3204 3205 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); 3206 /* FIXME: expose 10.2 specific values */ 3207 3208 list_add_tail(&dst->list, &stats->peers); 3209 } 3210 3211 return 0; 3212 } 3213 3214 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, 3215 struct sk_buff *skb, 3216 struct ath10k_fw_stats *stats) 3217 { 3218 const struct wmi_10_2_stats_event *ev = (void *)skb->data; 3219 u32 num_pdev_stats; 3220 u32 num_pdev_ext_stats; 3221 u32 num_peer_stats; 3222 int i; 3223 3224 if (!skb_pull(skb, sizeof(*ev))) 3225 return -EPROTO; 3226 3227 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); 3228 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); 3229 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 3230 3231 for (i = 0; i < num_pdev_stats; i++) { 3232 const struct wmi_10_2_pdev_stats *src; 3233 struct ath10k_fw_stats_pdev *dst; 3234 3235 src = (void *)skb->data; 3236 if (!skb_pull(skb, sizeof(*src))) 3237 return -EPROTO; 3238 3239 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3240 if (!dst) 3241 continue; 3242 3243 ath10k_wmi_pull_pdev_stats_base(&src->base, dst); 3244 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); 3245 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); 3246 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); 3247 /* FIXME: expose 10.2 specific values */ 3248 3249 list_add_tail(&dst->list, &stats->pdevs); 3250 } 3251 3252 for (i = 0; i < num_pdev_ext_stats; i++) { 3253 const struct wmi_10_2_pdev_ext_stats *src; 3254 3255 src = (void *)skb->data; 3256 if (!skb_pull(skb, sizeof(*src))) 3257 return -EPROTO; 3258 3259 /* FIXME: expose values to userspace 3260 * 3261 * Note: Even though this loop seems to do nothing it is 3262 * required to parse following sub-structures properly. 3263 */ 3264 } 3265 3266 /* fw doesn't implement vdev stats */ 3267 3268 for (i = 0; i < num_peer_stats; i++) { 3269 const struct wmi_10_2_4_ext_peer_stats *src; 3270 struct ath10k_fw_stats_peer *dst; 3271 int stats_len; 3272 3273 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) 3274 stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats); 3275 else 3276 stats_len = sizeof(struct wmi_10_2_4_peer_stats); 3277 3278 src = (void *)skb->data; 3279 if (!skb_pull(skb, stats_len)) 3280 return -EPROTO; 3281 3282 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3283 if (!dst) 3284 continue; 3285 3286 ath10k_wmi_pull_peer_stats(&src->common.old, dst); 3287 3288 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); 3289 3290 if (ath10k_peer_stats_enabled(ar)) 3291 dst->rx_duration = __le32_to_cpu(src->rx_duration); 3292 /* FIXME: expose 10.2 specific values */ 3293 3294 list_add_tail(&dst->list, &stats->peers); 3295 } 3296 3297 return 0; 3298 } 3299 3300 static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, 3301 struct sk_buff *skb, 3302 struct ath10k_fw_stats *stats) 3303 { 3304 const struct wmi_10_2_stats_event *ev = (void *)skb->data; 3305 u32 num_pdev_stats; 3306 u32 num_pdev_ext_stats; 3307 u32 num_vdev_stats; 3308 u32 num_peer_stats; 3309 u32 num_bcnflt_stats; 3310 u32 stats_id; 3311 int i; 3312 3313 if (!skb_pull(skb, sizeof(*ev))) 3314 return -EPROTO; 3315 3316 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); 3317 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); 3318 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); 3319 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 3320 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); 3321 stats_id = __le32_to_cpu(ev->stats_id); 3322 3323 for (i = 0; i < num_pdev_stats; i++) { 3324 const struct wmi_10_4_pdev_stats *src; 3325 struct ath10k_fw_stats_pdev *dst; 3326 3327 src = (void *)skb->data; 3328 if (!skb_pull(skb, sizeof(*src))) 3329 return -EPROTO; 3330 3331 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3332 if (!dst) 3333 continue; 3334 3335 ath10k_wmi_pull_pdev_stats_base(&src->base, dst); 3336 ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst); 3337 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); 3338 dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs); 3339 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); 3340 3341 list_add_tail(&dst->list, &stats->pdevs); 3342 } 3343 3344 for (i = 0; i < num_pdev_ext_stats; i++) { 3345 const struct wmi_10_2_pdev_ext_stats *src; 3346 3347 src = (void *)skb->data; 3348 if (!skb_pull(skb, sizeof(*src))) 3349 return -EPROTO; 3350 3351 /* FIXME: expose values to userspace 3352 * 3353 * Note: Even though this loop seems to do nothing it is 3354 * required to parse following sub-structures properly. 3355 */ 3356 } 3357 3358 for (i = 0; i < num_vdev_stats; i++) { 3359 const struct wmi_vdev_stats *src; 3360 3361 /* Ignore vdev stats here as it has only vdev id. Actual vdev 3362 * stats will be retrieved from vdev extended stats. 3363 */ 3364 src = (void *)skb->data; 3365 if (!skb_pull(skb, sizeof(*src))) 3366 return -EPROTO; 3367 } 3368 3369 for (i = 0; i < num_peer_stats; i++) { 3370 const struct wmi_10_4_peer_stats *src; 3371 struct ath10k_fw_stats_peer *dst; 3372 3373 src = (void *)skb->data; 3374 if (!skb_pull(skb, sizeof(*src))) 3375 return -EPROTO; 3376 3377 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3378 if (!dst) 3379 continue; 3380 3381 ath10k_wmi_10_4_pull_peer_stats(src, dst); 3382 list_add_tail(&dst->list, &stats->peers); 3383 } 3384 3385 for (i = 0; i < num_bcnflt_stats; i++) { 3386 const struct wmi_10_4_bss_bcn_filter_stats *src; 3387 3388 src = (void *)skb->data; 3389 if (!skb_pull(skb, sizeof(*src))) 3390 return -EPROTO; 3391 3392 /* FIXME: expose values to userspace 3393 * 3394 * Note: Even though this loop seems to do nothing it is 3395 * required to parse following sub-structures properly. 3396 */ 3397 } 3398 3399 if (stats_id & WMI_10_4_STAT_PEER_EXTD) { 3400 stats->extended = true; 3401 3402 for (i = 0; i < num_peer_stats; i++) { 3403 const struct wmi_10_4_peer_extd_stats *src; 3404 struct ath10k_fw_extd_stats_peer *dst; 3405 3406 src = (void *)skb->data; 3407 if (!skb_pull(skb, sizeof(*src))) 3408 return -EPROTO; 3409 3410 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3411 if (!dst) 3412 continue; 3413 3414 ether_addr_copy(dst->peer_macaddr, 3415 src->peer_macaddr.addr); 3416 dst->rx_duration = __le32_to_cpu(src->rx_duration); 3417 list_add_tail(&dst->list, &stats->peers_extd); 3418 } 3419 } 3420 3421 if (stats_id & WMI_10_4_STAT_VDEV_EXTD) { 3422 for (i = 0; i < num_vdev_stats; i++) { 3423 const struct wmi_vdev_stats_extd *src; 3424 struct ath10k_fw_stats_vdev_extd *dst; 3425 3426 src = (void *)skb->data; 3427 if (!skb_pull(skb, sizeof(*src))) 3428 return -EPROTO; 3429 3430 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 3431 if (!dst) 3432 continue; 3433 ath10k_wmi_10_4_pull_vdev_stats(src, dst); 3434 list_add_tail(&dst->list, &stats->vdevs); 3435 } 3436 } 3437 3438 return 0; 3439 } 3440 3441 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb) 3442 { 3443 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); 3444 ath10k_debug_fw_stats_process(ar, skb); 3445 } 3446 3447 static int 3448 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, 3449 struct wmi_vdev_start_ev_arg *arg) 3450 { 3451 struct wmi_vdev_start_response_event *ev = (void *)skb->data; 3452 3453 if (skb->len < sizeof(*ev)) 3454 return -EPROTO; 3455 3456 skb_pull(skb, sizeof(*ev)); 3457 arg->vdev_id = ev->vdev_id; 3458 arg->req_id = ev->req_id; 3459 arg->resp_type = ev->resp_type; 3460 arg->status = ev->status; 3461 3462 return 0; 3463 } 3464 3465 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb) 3466 { 3467 struct wmi_vdev_start_ev_arg arg = {}; 3468 int ret; 3469 u32 status; 3470 3471 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); 3472 3473 ar->last_wmi_vdev_start_status = 0; 3474 3475 ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg); 3476 if (ret) { 3477 ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret); 3478 ar->last_wmi_vdev_start_status = ret; 3479 goto out; 3480 } 3481 3482 status = __le32_to_cpu(arg.status); 3483 if (WARN_ON_ONCE(status)) { 3484 ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n", 3485 status, (status == WMI_VDEV_START_CHAN_INVALID) ? 3486 "chan-invalid" : "unknown"); 3487 /* Setup is done one way or another though, so we should still 3488 * do the completion, so don't return here. 3489 */ 3490 ar->last_wmi_vdev_start_status = -EINVAL; 3491 } 3492 3493 out: 3494 complete(&ar->vdev_setup_done); 3495 } 3496 3497 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb) 3498 { 3499 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); 3500 complete(&ar->vdev_setup_done); 3501 } 3502 3503 static int 3504 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb, 3505 struct wmi_peer_kick_ev_arg *arg) 3506 { 3507 struct wmi_peer_sta_kickout_event *ev = (void *)skb->data; 3508 3509 if (skb->len < sizeof(*ev)) 3510 return -EPROTO; 3511 3512 skb_pull(skb, sizeof(*ev)); 3513 arg->mac_addr = ev->peer_macaddr.addr; 3514 3515 return 0; 3516 } 3517 3518 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb) 3519 { 3520 struct wmi_peer_kick_ev_arg arg = {}; 3521 struct ieee80211_sta *sta; 3522 int ret; 3523 3524 ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg); 3525 if (ret) { 3526 ath10k_warn(ar, "failed to parse peer kickout event: %d\n", 3527 ret); 3528 return; 3529 } 3530 3531 ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n", 3532 arg.mac_addr); 3533 3534 rcu_read_lock(); 3535 3536 sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL); 3537 if (!sta) { 3538 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n", 3539 arg.mac_addr); 3540 goto exit; 3541 } 3542 3543 ieee80211_report_low_ack(sta, 10); 3544 3545 exit: 3546 rcu_read_unlock(); 3547 } 3548 3549 /* 3550 * FIXME 3551 * 3552 * We don't report to mac80211 sleep state of connected 3553 * stations. Due to this mac80211 can't fill in TIM IE 3554 * correctly. 3555 * 3556 * I know of no way of getting nullfunc frames that contain 3557 * sleep transition from connected stations - these do not 3558 * seem to be sent from the target to the host. There also 3559 * doesn't seem to be a dedicated event for that. So the 3560 * only way left to do this would be to read tim_bitmap 3561 * during SWBA. 3562 * 3563 * We could probably try using tim_bitmap from SWBA to tell 3564 * mac80211 which stations are asleep and which are not. The 3565 * problem here is calling mac80211 functions so many times 3566 * could take too long and make us miss the time to submit 3567 * the beacon to the target. 3568 * 3569 * So as a workaround we try to extend the TIM IE if there 3570 * is unicast buffered for stations with aid > 7 and fill it 3571 * in ourselves. 3572 */ 3573 static void ath10k_wmi_update_tim(struct ath10k *ar, 3574 struct ath10k_vif *arvif, 3575 struct sk_buff *bcn, 3576 const struct wmi_tim_info_arg *tim_info) 3577 { 3578 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; 3579 struct ieee80211_tim_ie *tim; 3580 u8 *ies, *ie; 3581 u8 ie_len, pvm_len; 3582 __le32 t; 3583 u32 v, tim_len; 3584 3585 /* When FW reports 0 in tim_len, ensure at least first byte 3586 * in tim_bitmap is considered for pvm calculation. 3587 */ 3588 tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1; 3589 3590 /* if next SWBA has no tim_changed the tim_bitmap is garbage. 3591 * we must copy the bitmap upon change and reuse it later 3592 */ 3593 if (__le32_to_cpu(tim_info->tim_changed)) { 3594 int i; 3595 3596 if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) { 3597 ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu", 3598 tim_len, sizeof(arvif->u.ap.tim_bitmap)); 3599 tim_len = sizeof(arvif->u.ap.tim_bitmap); 3600 } 3601 3602 for (i = 0; i < tim_len; i++) { 3603 t = tim_info->tim_bitmap[i / 4]; 3604 v = __le32_to_cpu(t); 3605 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; 3606 } 3607 3608 /* FW reports either length 0 or length based on max supported 3609 * station. so we calculate this on our own 3610 */ 3611 arvif->u.ap.tim_len = 0; 3612 for (i = 0; i < tim_len; i++) 3613 if (arvif->u.ap.tim_bitmap[i]) 3614 arvif->u.ap.tim_len = i; 3615 3616 arvif->u.ap.tim_len++; 3617 } 3618 3619 ies = bcn->data; 3620 ies += ieee80211_hdrlen(hdr->frame_control); 3621 ies += 12; /* fixed parameters */ 3622 3623 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, 3624 (u8 *)skb_tail_pointer(bcn) - ies); 3625 if (!ie) { 3626 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 3627 ath10k_warn(ar, "no tim ie found;\n"); 3628 return; 3629 } 3630 3631 tim = (void *)ie + 2; 3632 ie_len = ie[1]; 3633 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ 3634 3635 if (pvm_len < arvif->u.ap.tim_len) { 3636 int expand_size = tim_len - pvm_len; 3637 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); 3638 void *next_ie = ie + 2 + ie_len; 3639 3640 if (skb_put(bcn, expand_size)) { 3641 memmove(next_ie + expand_size, next_ie, move_size); 3642 3643 ie[1] += expand_size; 3644 ie_len += expand_size; 3645 pvm_len += expand_size; 3646 } else { 3647 ath10k_warn(ar, "tim expansion failed\n"); 3648 } 3649 } 3650 3651 if (pvm_len > tim_len) { 3652 ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len); 3653 return; 3654 } 3655 3656 tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast); 3657 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); 3658 3659 if (tim->dtim_count == 0) { 3660 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO; 3661 3662 if (__le32_to_cpu(tim_info->tim_mcast) == 1) 3663 ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB; 3664 } 3665 3666 ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", 3667 tim->dtim_count, tim->dtim_period, 3668 tim->bitmap_ctrl, pvm_len); 3669 } 3670 3671 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, 3672 struct sk_buff *bcn, 3673 const struct wmi_p2p_noa_info *noa) 3674 { 3675 if (!arvif->vif->p2p) 3676 return; 3677 3678 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); 3679 3680 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) 3681 ath10k_p2p_noa_update(arvif, noa); 3682 3683 if (arvif->u.ap.noa_data) 3684 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) 3685 skb_put_data(bcn, arvif->u.ap.noa_data, 3686 arvif->u.ap.noa_len); 3687 } 3688 3689 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, 3690 struct wmi_swba_ev_arg *arg) 3691 { 3692 struct wmi_host_swba_event *ev = (void *)skb->data; 3693 u32 map; 3694 size_t i; 3695 3696 if (skb->len < sizeof(*ev)) 3697 return -EPROTO; 3698 3699 skb_pull(skb, sizeof(*ev)); 3700 arg->vdev_map = ev->vdev_map; 3701 3702 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { 3703 if (!(map & BIT(0))) 3704 continue; 3705 3706 /* If this happens there were some changes in firmware and 3707 * ath10k should update the max size of tim_info array. 3708 */ 3709 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) 3710 break; 3711 3712 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > 3713 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { 3714 ath10k_warn(ar, "refusing to parse invalid swba structure\n"); 3715 return -EPROTO; 3716 } 3717 3718 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; 3719 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; 3720 arg->tim_info[i].tim_bitmap = 3721 ev->bcn_info[i].tim_info.tim_bitmap; 3722 arg->tim_info[i].tim_changed = 3723 ev->bcn_info[i].tim_info.tim_changed; 3724 arg->tim_info[i].tim_num_ps_pending = 3725 ev->bcn_info[i].tim_info.tim_num_ps_pending; 3726 3727 arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info; 3728 i++; 3729 } 3730 3731 return 0; 3732 } 3733 3734 static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar, 3735 struct sk_buff *skb, 3736 struct wmi_swba_ev_arg *arg) 3737 { 3738 struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data; 3739 u32 map; 3740 size_t i; 3741 3742 if (skb->len < sizeof(*ev)) 3743 return -EPROTO; 3744 3745 skb_pull(skb, sizeof(*ev)); 3746 arg->vdev_map = ev->vdev_map; 3747 3748 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { 3749 if (!(map & BIT(0))) 3750 continue; 3751 3752 /* If this happens there were some changes in firmware and 3753 * ath10k should update the max size of tim_info array. 3754 */ 3755 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) 3756 break; 3757 3758 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > 3759 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { 3760 ath10k_warn(ar, "refusing to parse invalid swba structure\n"); 3761 return -EPROTO; 3762 } 3763 3764 arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; 3765 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; 3766 arg->tim_info[i].tim_bitmap = 3767 ev->bcn_info[i].tim_info.tim_bitmap; 3768 arg->tim_info[i].tim_changed = 3769 ev->bcn_info[i].tim_info.tim_changed; 3770 arg->tim_info[i].tim_num_ps_pending = 3771 ev->bcn_info[i].tim_info.tim_num_ps_pending; 3772 i++; 3773 } 3774 3775 return 0; 3776 } 3777 3778 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar, 3779 struct sk_buff *skb, 3780 struct wmi_swba_ev_arg *arg) 3781 { 3782 struct wmi_10_4_host_swba_event *ev = (void *)skb->data; 3783 u32 map, tim_len; 3784 size_t i; 3785 3786 if (skb->len < sizeof(*ev)) 3787 return -EPROTO; 3788 3789 skb_pull(skb, sizeof(*ev)); 3790 arg->vdev_map = ev->vdev_map; 3791 3792 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { 3793 if (!(map & BIT(0))) 3794 continue; 3795 3796 /* If this happens there were some changes in firmware and 3797 * ath10k should update the max size of tim_info array. 3798 */ 3799 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) 3800 break; 3801 3802 if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > 3803 sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { 3804 ath10k_warn(ar, "refusing to parse invalid swba structure\n"); 3805 return -EPROTO; 3806 } 3807 3808 tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len); 3809 if (tim_len) { 3810 /* Exclude 4 byte guard length */ 3811 tim_len -= 4; 3812 arg->tim_info[i].tim_len = __cpu_to_le32(tim_len); 3813 } else { 3814 arg->tim_info[i].tim_len = 0; 3815 } 3816 3817 arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; 3818 arg->tim_info[i].tim_bitmap = 3819 ev->bcn_info[i].tim_info.tim_bitmap; 3820 arg->tim_info[i].tim_changed = 3821 ev->bcn_info[i].tim_info.tim_changed; 3822 arg->tim_info[i].tim_num_ps_pending = 3823 ev->bcn_info[i].tim_info.tim_num_ps_pending; 3824 3825 /* 10.4 firmware doesn't have p2p support. notice of absence 3826 * info can be ignored for now. 3827 */ 3828 3829 i++; 3830 } 3831 3832 return 0; 3833 } 3834 3835 static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar) 3836 { 3837 return WMI_TXBF_CONF_BEFORE_ASSOC; 3838 } 3839 3840 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) 3841 { 3842 struct wmi_swba_ev_arg arg = {}; 3843 u32 map; 3844 int i = -1; 3845 const struct wmi_tim_info_arg *tim_info; 3846 const struct wmi_p2p_noa_info *noa_info; 3847 struct ath10k_vif *arvif; 3848 struct sk_buff *bcn; 3849 dma_addr_t paddr; 3850 int ret, vdev_id = 0; 3851 3852 ret = ath10k_wmi_pull_swba(ar, skb, &arg); 3853 if (ret) { 3854 ath10k_warn(ar, "failed to parse swba event: %d\n", ret); 3855 return; 3856 } 3857 3858 map = __le32_to_cpu(arg.vdev_map); 3859 3860 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", 3861 map); 3862 3863 for (; map; map >>= 1, vdev_id++) { 3864 if (!(map & 0x1)) 3865 continue; 3866 3867 i++; 3868 3869 if (i >= WMI_MAX_AP_VDEV) { 3870 ath10k_warn(ar, "swba has corrupted vdev map\n"); 3871 break; 3872 } 3873 3874 tim_info = &arg.tim_info[i]; 3875 noa_info = arg.noa_info[i]; 3876 3877 ath10k_dbg(ar, ATH10K_DBG_MGMT, 3878 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", 3879 i, 3880 __le32_to_cpu(tim_info->tim_len), 3881 __le32_to_cpu(tim_info->tim_mcast), 3882 __le32_to_cpu(tim_info->tim_changed), 3883 __le32_to_cpu(tim_info->tim_num_ps_pending), 3884 __le32_to_cpu(tim_info->tim_bitmap[3]), 3885 __le32_to_cpu(tim_info->tim_bitmap[2]), 3886 __le32_to_cpu(tim_info->tim_bitmap[1]), 3887 __le32_to_cpu(tim_info->tim_bitmap[0])); 3888 3889 /* TODO: Only first 4 word from tim_bitmap is dumped. 3890 * Extend debug code to dump full tim_bitmap. 3891 */ 3892 3893 arvif = ath10k_get_arvif(ar, vdev_id); 3894 if (arvif == NULL) { 3895 ath10k_warn(ar, "no vif for vdev_id %d found\n", 3896 vdev_id); 3897 continue; 3898 } 3899 3900 /* mac80211 would have already asked us to stop beaconing and 3901 * bring the vdev down, so continue in that case 3902 */ 3903 if (!arvif->is_up) 3904 continue; 3905 3906 /* There are no completions for beacons so wait for next SWBA 3907 * before telling mac80211 to decrement CSA counter 3908 * 3909 * Once CSA counter is completed stop sending beacons until 3910 * actual channel switch is done 3911 */ 3912 if (arvif->vif->bss_conf.csa_active && 3913 ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) { 3914 ieee80211_csa_finish(arvif->vif, 0); 3915 continue; 3916 } 3917 3918 bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0); 3919 if (!bcn) { 3920 ath10k_warn(ar, "could not get mac80211 beacon\n"); 3921 continue; 3922 } 3923 3924 ath10k_tx_h_seq_no(arvif->vif, bcn); 3925 ath10k_wmi_update_tim(ar, arvif, bcn, tim_info); 3926 ath10k_wmi_update_noa(ar, arvif, bcn, noa_info); 3927 3928 spin_lock_bh(&ar->data_lock); 3929 3930 if (arvif->beacon) { 3931 switch (arvif->beacon_state) { 3932 case ATH10K_BEACON_SENT: 3933 break; 3934 case ATH10K_BEACON_SCHEDULED: 3935 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n", 3936 arvif->vdev_id); 3937 break; 3938 case ATH10K_BEACON_SENDING: 3939 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n", 3940 arvif->vdev_id); 3941 dev_kfree_skb(bcn); 3942 goto skip; 3943 } 3944 3945 ath10k_mac_vif_beacon_free(arvif); 3946 } 3947 3948 if (!arvif->beacon_buf) { 3949 paddr = dma_map_single(arvif->ar->dev, bcn->data, 3950 bcn->len, DMA_TO_DEVICE); 3951 ret = dma_mapping_error(arvif->ar->dev, paddr); 3952 if (ret) { 3953 ath10k_warn(ar, "failed to map beacon: %d\n", 3954 ret); 3955 dev_kfree_skb_any(bcn); 3956 goto skip; 3957 } 3958 3959 ATH10K_SKB_CB(bcn)->paddr = paddr; 3960 } else { 3961 if (bcn->len > IEEE80211_MAX_FRAME_LEN) { 3962 ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n", 3963 bcn->len, IEEE80211_MAX_FRAME_LEN); 3964 skb_trim(bcn, IEEE80211_MAX_FRAME_LEN); 3965 } 3966 memcpy(arvif->beacon_buf, bcn->data, bcn->len); 3967 ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr; 3968 } 3969 3970 arvif->beacon = bcn; 3971 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 3972 3973 trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); 3974 trace_ath10k_tx_payload(ar, bcn->data, bcn->len); 3975 3976 skip: 3977 spin_unlock_bh(&ar->data_lock); 3978 } 3979 3980 ath10k_wmi_tx_beacons_nowait(ar); 3981 } 3982 3983 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) 3984 { 3985 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); 3986 } 3987 3988 static void ath10k_radar_detected(struct ath10k *ar) 3989 { 3990 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); 3991 ATH10K_DFS_STAT_INC(ar, radar_detected); 3992 3993 /* Control radar events reporting in debugfs file 3994 * dfs_block_radar_events 3995 */ 3996 if (ar->dfs_block_radar_events) 3997 ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); 3998 else 3999 ieee80211_radar_detected(ar->hw, NULL); 4000 } 4001 4002 static void ath10k_radar_confirmation_work(struct work_struct *work) 4003 { 4004 struct ath10k *ar = container_of(work, struct ath10k, 4005 radar_confirmation_work); 4006 struct ath10k_radar_found_info radar_info; 4007 int ret, time_left; 4008 4009 reinit_completion(&ar->wmi.radar_confirm); 4010 4011 spin_lock_bh(&ar->data_lock); 4012 memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info)); 4013 spin_unlock_bh(&ar->data_lock); 4014 4015 ret = ath10k_wmi_report_radar_found(ar, &radar_info); 4016 if (ret) { 4017 ath10k_warn(ar, "failed to send radar found %d\n", ret); 4018 goto wait_complete; 4019 } 4020 4021 time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm, 4022 ATH10K_WMI_DFS_CONF_TIMEOUT_HZ); 4023 if (time_left) { 4024 /* DFS Confirmation status event received and 4025 * necessary action completed. 4026 */ 4027 goto wait_complete; 4028 } else { 4029 /* DFS Confirmation event not received from FW.Considering this 4030 * as real radar. 4031 */ 4032 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4033 "dfs confirmation not received from fw, considering as radar\n"); 4034 goto radar_detected; 4035 } 4036 4037 radar_detected: 4038 ath10k_radar_detected(ar); 4039 4040 /* Reset state to allow sending confirmation on consecutive radar 4041 * detections, unless radar confirmation is disabled/stopped. 4042 */ 4043 wait_complete: 4044 spin_lock_bh(&ar->data_lock); 4045 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED) 4046 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; 4047 spin_unlock_bh(&ar->data_lock); 4048 } 4049 4050 static void ath10k_dfs_radar_report(struct ath10k *ar, 4051 struct wmi_phyerr_ev_arg *phyerr, 4052 const struct phyerr_radar_report *rr, 4053 u64 tsf) 4054 { 4055 u32 reg0, reg1, tsf32l; 4056 struct ieee80211_channel *ch; 4057 struct pulse_event pe; 4058 struct radar_detector_specs rs; 4059 u64 tsf64; 4060 u8 rssi, width; 4061 struct ath10k_radar_found_info *radar_info; 4062 4063 reg0 = __le32_to_cpu(rr->reg0); 4064 reg1 = __le32_to_cpu(rr->reg1); 4065 4066 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4067 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", 4068 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), 4069 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), 4070 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), 4071 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); 4072 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4073 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", 4074 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), 4075 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), 4076 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), 4077 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), 4078 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); 4079 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4080 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", 4081 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), 4082 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); 4083 4084 if (!ar->dfs_detector) 4085 return; 4086 4087 spin_lock_bh(&ar->data_lock); 4088 ch = ar->rx_channel; 4089 4090 /* fetch target operating channel during channel change */ 4091 if (!ch) 4092 ch = ar->tgt_oper_chan; 4093 4094 spin_unlock_bh(&ar->data_lock); 4095 4096 if (!ch) { 4097 ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n"); 4098 goto radar_detected; 4099 } 4100 4101 /* report event to DFS pattern detector */ 4102 tsf32l = phyerr->tsf_timestamp; 4103 tsf64 = tsf & (~0xFFFFFFFFULL); 4104 tsf64 |= tsf32l; 4105 4106 width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); 4107 rssi = phyerr->rssi_combined; 4108 4109 /* hardware store this as 8 bit signed value, 4110 * set to zero if negative number 4111 */ 4112 if (rssi & 0x80) 4113 rssi = 0; 4114 4115 pe.ts = tsf64; 4116 pe.freq = ch->center_freq; 4117 pe.width = width; 4118 pe.rssi = rssi; 4119 pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0); 4120 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4121 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", 4122 pe.freq, pe.width, pe.rssi, pe.ts); 4123 4124 ATH10K_DFS_STAT_INC(ar, pulses_detected); 4125 4126 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) { 4127 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4128 "dfs no pulse pattern detected, yet\n"); 4129 return; 4130 } 4131 4132 if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) && 4133 ar->dfs_detector->region == NL80211_DFS_FCC) { 4134 /* Consecutive radar indications need not be 4135 * sent to the firmware until we get confirmation 4136 * for the previous detected radar. 4137 */ 4138 spin_lock_bh(&ar->data_lock); 4139 if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) { 4140 spin_unlock_bh(&ar->data_lock); 4141 return; 4142 } 4143 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS; 4144 radar_info = &ar->last_radar_info; 4145 4146 radar_info->pri_min = rs.pri_min; 4147 radar_info->pri_max = rs.pri_max; 4148 radar_info->width_min = rs.width_min; 4149 radar_info->width_max = rs.width_max; 4150 /*TODO Find sidx_min and sidx_max */ 4151 radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); 4152 radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); 4153 4154 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4155 "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", 4156 radar_info->pri_min, radar_info->pri_max, 4157 radar_info->width_min, radar_info->width_max, 4158 radar_info->sidx_min, radar_info->sidx_max); 4159 ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work); 4160 spin_unlock_bh(&ar->data_lock); 4161 return; 4162 } 4163 4164 radar_detected: 4165 ath10k_radar_detected(ar); 4166 } 4167 4168 static int ath10k_dfs_fft_report(struct ath10k *ar, 4169 struct wmi_phyerr_ev_arg *phyerr, 4170 const struct phyerr_fft_report *fftr, 4171 u64 tsf) 4172 { 4173 u32 reg0, reg1; 4174 u8 rssi, peak_mag; 4175 4176 reg0 = __le32_to_cpu(fftr->reg0); 4177 reg1 = __le32_to_cpu(fftr->reg1); 4178 rssi = phyerr->rssi_combined; 4179 4180 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4181 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", 4182 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), 4183 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), 4184 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), 4185 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); 4186 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4187 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", 4188 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), 4189 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), 4190 MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG), 4191 MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB)); 4192 4193 peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); 4194 4195 /* false event detection */ 4196 if (rssi == DFS_RSSI_POSSIBLY_FALSE && 4197 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { 4198 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); 4199 ATH10K_DFS_STAT_INC(ar, pulses_discarded); 4200 return -EINVAL; 4201 } 4202 4203 return 0; 4204 } 4205 4206 void ath10k_wmi_event_dfs(struct ath10k *ar, 4207 struct wmi_phyerr_ev_arg *phyerr, 4208 u64 tsf) 4209 { 4210 int buf_len, tlv_len, res, i = 0; 4211 const struct phyerr_tlv *tlv; 4212 const struct phyerr_radar_report *rr; 4213 const struct phyerr_fft_report *fftr; 4214 const u8 *tlv_buf; 4215 4216 buf_len = phyerr->buf_len; 4217 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4218 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", 4219 phyerr->phy_err_code, phyerr->rssi_combined, 4220 phyerr->tsf_timestamp, tsf, buf_len); 4221 4222 /* Skip event if DFS disabled */ 4223 if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) 4224 return; 4225 4226 ATH10K_DFS_STAT_INC(ar, pulses_total); 4227 4228 while (i < buf_len) { 4229 if (i + sizeof(*tlv) > buf_len) { 4230 ath10k_warn(ar, "too short buf for tlv header (%d)\n", 4231 i); 4232 return; 4233 } 4234 4235 tlv = (struct phyerr_tlv *)&phyerr->buf[i]; 4236 tlv_len = __le16_to_cpu(tlv->len); 4237 tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; 4238 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4239 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", 4240 tlv_len, tlv->tag, tlv->sig); 4241 4242 switch (tlv->tag) { 4243 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: 4244 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { 4245 ath10k_warn(ar, "too short radar pulse summary (%d)\n", 4246 i); 4247 return; 4248 } 4249 4250 rr = (struct phyerr_radar_report *)tlv_buf; 4251 ath10k_dfs_radar_report(ar, phyerr, rr, tsf); 4252 break; 4253 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 4254 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { 4255 ath10k_warn(ar, "too short fft report (%d)\n", 4256 i); 4257 return; 4258 } 4259 4260 fftr = (struct phyerr_fft_report *)tlv_buf; 4261 res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf); 4262 if (res) 4263 return; 4264 break; 4265 } 4266 4267 i += sizeof(*tlv) + tlv_len; 4268 } 4269 } 4270 4271 void ath10k_wmi_event_spectral_scan(struct ath10k *ar, 4272 struct wmi_phyerr_ev_arg *phyerr, 4273 u64 tsf) 4274 { 4275 int buf_len, tlv_len, res, i = 0; 4276 struct phyerr_tlv *tlv; 4277 const void *tlv_buf; 4278 const struct phyerr_fft_report *fftr; 4279 size_t fftr_len; 4280 4281 buf_len = phyerr->buf_len; 4282 4283 while (i < buf_len) { 4284 if (i + sizeof(*tlv) > buf_len) { 4285 ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n", 4286 i); 4287 return; 4288 } 4289 4290 tlv = (struct phyerr_tlv *)&phyerr->buf[i]; 4291 tlv_len = __le16_to_cpu(tlv->len); 4292 tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; 4293 4294 if (i + sizeof(*tlv) + tlv_len > buf_len) { 4295 ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n", 4296 i); 4297 return; 4298 } 4299 4300 switch (tlv->tag) { 4301 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 4302 if (sizeof(*fftr) > tlv_len) { 4303 ath10k_warn(ar, "failed to parse fft report at byte %d\n", 4304 i); 4305 return; 4306 } 4307 4308 fftr_len = tlv_len - sizeof(*fftr); 4309 fftr = tlv_buf; 4310 res = ath10k_spectral_process_fft(ar, phyerr, 4311 fftr, fftr_len, 4312 tsf); 4313 if (res < 0) { 4314 ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n", 4315 res); 4316 return; 4317 } 4318 break; 4319 } 4320 4321 i += sizeof(*tlv) + tlv_len; 4322 } 4323 } 4324 4325 static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar, 4326 struct sk_buff *skb, 4327 struct wmi_phyerr_hdr_arg *arg) 4328 { 4329 struct wmi_phyerr_event *ev = (void *)skb->data; 4330 4331 if (skb->len < sizeof(*ev)) 4332 return -EPROTO; 4333 4334 arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs); 4335 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); 4336 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); 4337 arg->buf_len = skb->len - sizeof(*ev); 4338 arg->phyerrs = ev->phyerrs; 4339 4340 return 0; 4341 } 4342 4343 static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar, 4344 struct sk_buff *skb, 4345 struct wmi_phyerr_hdr_arg *arg) 4346 { 4347 struct wmi_10_4_phyerr_event *ev = (void *)skb->data; 4348 4349 if (skb->len < sizeof(*ev)) 4350 return -EPROTO; 4351 4352 /* 10.4 firmware always reports only one phyerr */ 4353 arg->num_phyerrs = 1; 4354 4355 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); 4356 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); 4357 arg->buf_len = skb->len; 4358 arg->phyerrs = skb->data; 4359 4360 return 0; 4361 } 4362 4363 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, 4364 const void *phyerr_buf, 4365 int left_len, 4366 struct wmi_phyerr_ev_arg *arg) 4367 { 4368 const struct wmi_phyerr *phyerr = phyerr_buf; 4369 int i; 4370 4371 if (left_len < sizeof(*phyerr)) { 4372 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n", 4373 left_len, sizeof(*phyerr)); 4374 return -EINVAL; 4375 } 4376 4377 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp); 4378 arg->freq1 = __le16_to_cpu(phyerr->freq1); 4379 arg->freq2 = __le16_to_cpu(phyerr->freq2); 4380 arg->rssi_combined = phyerr->rssi_combined; 4381 arg->chan_width_mhz = phyerr->chan_width_mhz; 4382 arg->buf_len = __le32_to_cpu(phyerr->buf_len); 4383 arg->buf = phyerr->buf; 4384 arg->hdr_len = sizeof(*phyerr); 4385 4386 for (i = 0; i < 4; i++) 4387 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]); 4388 4389 switch (phyerr->phy_err_code) { 4390 case PHY_ERROR_GEN_SPECTRAL_SCAN: 4391 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN; 4392 break; 4393 case PHY_ERROR_GEN_FALSE_RADAR_EXT: 4394 arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT; 4395 break; 4396 case PHY_ERROR_GEN_RADAR: 4397 arg->phy_err_code = PHY_ERROR_RADAR; 4398 break; 4399 default: 4400 arg->phy_err_code = PHY_ERROR_UNKNOWN; 4401 break; 4402 } 4403 4404 return 0; 4405 } 4406 4407 static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar, 4408 const void *phyerr_buf, 4409 int left_len, 4410 struct wmi_phyerr_ev_arg *arg) 4411 { 4412 const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf; 4413 u32 phy_err_mask; 4414 int i; 4415 4416 if (left_len < sizeof(*phyerr)) { 4417 ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n", 4418 left_len, sizeof(*phyerr)); 4419 return -EINVAL; 4420 } 4421 4422 arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp); 4423 arg->freq1 = __le16_to_cpu(phyerr->freq1); 4424 arg->freq2 = __le16_to_cpu(phyerr->freq2); 4425 arg->rssi_combined = phyerr->rssi_combined; 4426 arg->chan_width_mhz = phyerr->chan_width_mhz; 4427 arg->buf_len = __le32_to_cpu(phyerr->buf_len); 4428 arg->buf = phyerr->buf; 4429 arg->hdr_len = sizeof(*phyerr); 4430 4431 for (i = 0; i < 4; i++) 4432 arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]); 4433 4434 phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]); 4435 4436 if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK) 4437 arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN; 4438 else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK) 4439 arg->phy_err_code = PHY_ERROR_RADAR; 4440 else 4441 arg->phy_err_code = PHY_ERROR_UNKNOWN; 4442 4443 return 0; 4444 } 4445 4446 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) 4447 { 4448 struct wmi_phyerr_hdr_arg hdr_arg = {}; 4449 struct wmi_phyerr_ev_arg phyerr_arg = {}; 4450 const void *phyerr; 4451 u32 count, i, buf_len, phy_err_code; 4452 u64 tsf; 4453 int left_len, ret; 4454 4455 ATH10K_DFS_STAT_INC(ar, phy_errors); 4456 4457 ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg); 4458 if (ret) { 4459 ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret); 4460 return; 4461 } 4462 4463 /* Check number of included events */ 4464 count = hdr_arg.num_phyerrs; 4465 4466 left_len = hdr_arg.buf_len; 4467 4468 tsf = hdr_arg.tsf_u32; 4469 tsf <<= 32; 4470 tsf |= hdr_arg.tsf_l32; 4471 4472 ath10k_dbg(ar, ATH10K_DBG_WMI, 4473 "wmi event phyerr count %d tsf64 0x%llX\n", 4474 count, tsf); 4475 4476 phyerr = hdr_arg.phyerrs; 4477 for (i = 0; i < count; i++) { 4478 ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg); 4479 if (ret) { 4480 ath10k_warn(ar, "failed to parse phyerr event (%d)\n", 4481 i); 4482 return; 4483 } 4484 4485 left_len -= phyerr_arg.hdr_len; 4486 buf_len = phyerr_arg.buf_len; 4487 phy_err_code = phyerr_arg.phy_err_code; 4488 4489 if (left_len < buf_len) { 4490 ath10k_warn(ar, "single event (%d) wrong buf len\n", i); 4491 return; 4492 } 4493 4494 left_len -= buf_len; 4495 4496 switch (phy_err_code) { 4497 case PHY_ERROR_RADAR: 4498 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); 4499 break; 4500 case PHY_ERROR_SPECTRAL_SCAN: 4501 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); 4502 break; 4503 case PHY_ERROR_FALSE_RADAR_EXT: 4504 ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); 4505 ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); 4506 break; 4507 default: 4508 break; 4509 } 4510 4511 phyerr = phyerr + phyerr_arg.hdr_len + buf_len; 4512 } 4513 } 4514 4515 static int 4516 ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb, 4517 struct wmi_dfs_status_ev_arg *arg) 4518 { 4519 struct wmi_dfs_status_ev_arg *ev = (void *)skb->data; 4520 4521 if (skb->len < sizeof(*ev)) 4522 return -EPROTO; 4523 4524 arg->status = ev->status; 4525 4526 return 0; 4527 } 4528 4529 static void 4530 ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb) 4531 { 4532 struct wmi_dfs_status_ev_arg status_arg = {}; 4533 int ret; 4534 4535 ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg); 4536 4537 if (ret) { 4538 ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret); 4539 return; 4540 } 4541 4542 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, 4543 "dfs status event received from fw: %d\n", 4544 status_arg.status); 4545 4546 /* Even in case of radar detection failure we follow the same 4547 * behaviour as if radar is detected i.e to switch to a different 4548 * channel. 4549 */ 4550 if (status_arg.status == WMI_HW_RADAR_DETECTED || 4551 status_arg.status == WMI_RADAR_DETECTION_FAIL) 4552 ath10k_radar_detected(ar); 4553 complete(&ar->wmi.radar_confirm); 4554 } 4555 4556 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) 4557 { 4558 struct wmi_roam_ev_arg arg = {}; 4559 int ret; 4560 u32 vdev_id; 4561 u32 reason; 4562 s32 rssi; 4563 4564 ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg); 4565 if (ret) { 4566 ath10k_warn(ar, "failed to parse roam event: %d\n", ret); 4567 return; 4568 } 4569 4570 vdev_id = __le32_to_cpu(arg.vdev_id); 4571 reason = __le32_to_cpu(arg.reason); 4572 rssi = __le32_to_cpu(arg.rssi); 4573 rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT; 4574 4575 ath10k_dbg(ar, ATH10K_DBG_WMI, 4576 "wmi roam event vdev %u reason 0x%08x rssi %d\n", 4577 vdev_id, reason, rssi); 4578 4579 if (reason >= WMI_ROAM_REASON_MAX) 4580 ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n", 4581 reason, vdev_id); 4582 4583 switch (reason) { 4584 case WMI_ROAM_REASON_BEACON_MISS: 4585 ath10k_mac_handle_beacon_miss(ar, vdev_id); 4586 break; 4587 case WMI_ROAM_REASON_BETTER_AP: 4588 case WMI_ROAM_REASON_LOW_RSSI: 4589 case WMI_ROAM_REASON_SUITABLE_AP_FOUND: 4590 case WMI_ROAM_REASON_HO_FAILED: 4591 ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n", 4592 reason, vdev_id); 4593 break; 4594 } 4595 } 4596 4597 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb) 4598 { 4599 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); 4600 } 4601 4602 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb) 4603 { 4604 char buf[101], c; 4605 int i; 4606 4607 for (i = 0; i < sizeof(buf) - 1; i++) { 4608 if (i >= skb->len) 4609 break; 4610 4611 c = skb->data[i]; 4612 4613 if (c == '\0') 4614 break; 4615 4616 if (isascii(c) && isprint(c)) 4617 buf[i] = c; 4618 else 4619 buf[i] = '.'; 4620 } 4621 4622 if (i == sizeof(buf) - 1) 4623 ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len); 4624 4625 /* for some reason the debug prints end with \n, remove that */ 4626 if (skb->data[i - 1] == '\n') 4627 i--; 4628 4629 /* the last byte is always reserved for the null character */ 4630 buf[i] = '\0'; 4631 4632 ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf); 4633 } 4634 4635 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) 4636 { 4637 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); 4638 } 4639 4640 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb) 4641 { 4642 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); 4643 } 4644 4645 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, 4646 struct sk_buff *skb) 4647 { 4648 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); 4649 } 4650 4651 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, 4652 struct sk_buff *skb) 4653 { 4654 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); 4655 } 4656 4657 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb) 4658 { 4659 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); 4660 } 4661 4662 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb) 4663 { 4664 struct wmi_wow_ev_arg ev = {}; 4665 int ret; 4666 4667 complete(&ar->wow.wakeup_completed); 4668 4669 ret = ath10k_wmi_pull_wow_event(ar, skb, &ev); 4670 if (ret) { 4671 ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret); 4672 return; 4673 } 4674 4675 ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n", 4676 wow_reason(ev.wake_reason)); 4677 } 4678 4679 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb) 4680 { 4681 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); 4682 } 4683 4684 static u8 ath10k_tpc_config_get_rate(struct ath10k *ar, 4685 struct wmi_pdev_tpc_config_event *ev, 4686 u32 rate_idx, u32 num_chains, 4687 u32 rate_code, u8 type) 4688 { 4689 u8 tpc, num_streams, preamble, ch, stm_idx; 4690 4691 num_streams = ATH10K_HW_NSS(rate_code); 4692 preamble = ATH10K_HW_PREAMBLE(rate_code); 4693 ch = num_chains - 1; 4694 4695 tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]); 4696 4697 if (__le32_to_cpu(ev->num_tx_chain) <= 1) 4698 goto out; 4699 4700 if (preamble == WMI_RATE_PREAMBLE_CCK) 4701 goto out; 4702 4703 stm_idx = num_streams - 1; 4704 if (num_chains <= num_streams) 4705 goto out; 4706 4707 switch (type) { 4708 case WMI_TPC_TABLE_TYPE_STBC: 4709 tpc = min_t(u8, tpc, 4710 ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]); 4711 break; 4712 case WMI_TPC_TABLE_TYPE_TXBF: 4713 tpc = min_t(u8, tpc, 4714 ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]); 4715 break; 4716 case WMI_TPC_TABLE_TYPE_CDD: 4717 tpc = min_t(u8, tpc, 4718 ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]); 4719 break; 4720 default: 4721 ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type); 4722 tpc = 0; 4723 break; 4724 } 4725 4726 out: 4727 return tpc; 4728 } 4729 4730 static void ath10k_tpc_config_disp_tables(struct ath10k *ar, 4731 struct wmi_pdev_tpc_config_event *ev, 4732 struct ath10k_tpc_stats *tpc_stats, 4733 u8 *rate_code, u16 *pream_table, u8 type) 4734 { 4735 u32 i, j, pream_idx, flags; 4736 u8 tpc[WMI_TPC_TX_N_CHAIN]; 4737 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; 4738 char buff[WMI_TPC_BUF_SIZE]; 4739 4740 flags = __le32_to_cpu(ev->flags); 4741 4742 switch (type) { 4743 case WMI_TPC_TABLE_TYPE_CDD: 4744 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) { 4745 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n"); 4746 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; 4747 return; 4748 } 4749 break; 4750 case WMI_TPC_TABLE_TYPE_STBC: 4751 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) { 4752 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n"); 4753 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; 4754 return; 4755 } 4756 break; 4757 case WMI_TPC_TABLE_TYPE_TXBF: 4758 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) { 4759 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n"); 4760 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; 4761 return; 4762 } 4763 break; 4764 default: 4765 ath10k_dbg(ar, ATH10K_DBG_WMI, 4766 "invalid table type in wmi tpc event: %d\n", type); 4767 return; 4768 } 4769 4770 pream_idx = 0; 4771 for (i = 0; i < tpc_stats->rate_max; i++) { 4772 memset(tpc_value, 0, sizeof(tpc_value)); 4773 memset(buff, 0, sizeof(buff)); 4774 if (i == pream_table[pream_idx]) 4775 pream_idx++; 4776 4777 for (j = 0; j < tpc_stats->num_tx_chain; j++) { 4778 tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, 4779 rate_code[i], 4780 type); 4781 snprintf(buff, sizeof(buff), "%8d ", tpc[j]); 4782 strlcat(tpc_value, buff, sizeof(tpc_value)); 4783 } 4784 tpc_stats->tpc_table[type].pream_idx[i] = pream_idx; 4785 tpc_stats->tpc_table[type].rate_code[i] = rate_code[i]; 4786 memcpy(tpc_stats->tpc_table[type].tpc_value[i], 4787 tpc_value, sizeof(tpc_value)); 4788 } 4789 } 4790 4791 void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, 4792 u32 num_tx_chain) 4793 { 4794 u32 i, j, pream_idx; 4795 u8 rate_idx; 4796 4797 /* Create the rate code table based on the chains supported */ 4798 rate_idx = 0; 4799 pream_idx = 0; 4800 4801 /* Fill CCK rate code */ 4802 for (i = 0; i < 4; i++) { 4803 rate_code[rate_idx] = 4804 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK); 4805 rate_idx++; 4806 } 4807 pream_table[pream_idx] = rate_idx; 4808 pream_idx++; 4809 4810 /* Fill OFDM rate code */ 4811 for (i = 0; i < 8; i++) { 4812 rate_code[rate_idx] = 4813 ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM); 4814 rate_idx++; 4815 } 4816 pream_table[pream_idx] = rate_idx; 4817 pream_idx++; 4818 4819 /* Fill HT20 rate code */ 4820 for (i = 0; i < num_tx_chain; i++) { 4821 for (j = 0; j < 8; j++) { 4822 rate_code[rate_idx] = 4823 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT); 4824 rate_idx++; 4825 } 4826 } 4827 pream_table[pream_idx] = rate_idx; 4828 pream_idx++; 4829 4830 /* Fill HT40 rate code */ 4831 for (i = 0; i < num_tx_chain; i++) { 4832 for (j = 0; j < 8; j++) { 4833 rate_code[rate_idx] = 4834 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT); 4835 rate_idx++; 4836 } 4837 } 4838 pream_table[pream_idx] = rate_idx; 4839 pream_idx++; 4840 4841 /* Fill VHT20 rate code */ 4842 for (i = 0; i < num_tx_chain; i++) { 4843 for (j = 0; j < 10; j++) { 4844 rate_code[rate_idx] = 4845 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); 4846 rate_idx++; 4847 } 4848 } 4849 pream_table[pream_idx] = rate_idx; 4850 pream_idx++; 4851 4852 /* Fill VHT40 rate code */ 4853 for (i = 0; i < num_tx_chain; i++) { 4854 for (j = 0; j < 10; j++) { 4855 rate_code[rate_idx] = 4856 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); 4857 rate_idx++; 4858 } 4859 } 4860 pream_table[pream_idx] = rate_idx; 4861 pream_idx++; 4862 4863 /* Fill VHT80 rate code */ 4864 for (i = 0; i < num_tx_chain; i++) { 4865 for (j = 0; j < 10; j++) { 4866 rate_code[rate_idx] = 4867 ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); 4868 rate_idx++; 4869 } 4870 } 4871 pream_table[pream_idx] = rate_idx; 4872 pream_idx++; 4873 4874 rate_code[rate_idx++] = 4875 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK); 4876 rate_code[rate_idx++] = 4877 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); 4878 rate_code[rate_idx++] = 4879 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK); 4880 rate_code[rate_idx++] = 4881 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); 4882 rate_code[rate_idx++] = 4883 ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); 4884 4885 pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END; 4886 } 4887 4888 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) 4889 { 4890 u32 num_tx_chain, rate_max; 4891 u8 rate_code[WMI_TPC_RATE_MAX]; 4892 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; 4893 struct wmi_pdev_tpc_config_event *ev; 4894 struct ath10k_tpc_stats *tpc_stats; 4895 4896 ev = (struct wmi_pdev_tpc_config_event *)skb->data; 4897 4898 num_tx_chain = __le32_to_cpu(ev->num_tx_chain); 4899 4900 if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { 4901 ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n", 4902 num_tx_chain, WMI_TPC_TX_N_CHAIN); 4903 return; 4904 } 4905 4906 rate_max = __le32_to_cpu(ev->rate_max); 4907 if (rate_max > WMI_TPC_RATE_MAX) { 4908 ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n", 4909 rate_max, WMI_TPC_RATE_MAX); 4910 rate_max = WMI_TPC_RATE_MAX; 4911 } 4912 4913 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); 4914 if (!tpc_stats) 4915 return; 4916 4917 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, 4918 num_tx_chain); 4919 4920 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq); 4921 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode); 4922 tpc_stats->ctl = __le32_to_cpu(ev->ctl); 4923 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain); 4924 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain); 4925 tpc_stats->twice_antenna_reduction = 4926 __le32_to_cpu(ev->twice_antenna_reduction); 4927 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); 4928 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); 4929 tpc_stats->num_tx_chain = num_tx_chain; 4930 tpc_stats->rate_max = rate_max; 4931 4932 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, 4933 rate_code, pream_table, 4934 WMI_TPC_TABLE_TYPE_CDD); 4935 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, 4936 rate_code, pream_table, 4937 WMI_TPC_TABLE_TYPE_STBC); 4938 ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, 4939 rate_code, pream_table, 4940 WMI_TPC_TABLE_TYPE_TXBF); 4941 4942 ath10k_debug_tpc_stats_process(ar, tpc_stats); 4943 4944 ath10k_dbg(ar, ATH10K_DBG_WMI, 4945 "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n", 4946 __le32_to_cpu(ev->chan_freq), 4947 __le32_to_cpu(ev->phy_mode), 4948 __le32_to_cpu(ev->ctl), 4949 __le32_to_cpu(ev->reg_domain), 4950 a_sle32_to_cpu(ev->twice_antenna_gain), 4951 __le32_to_cpu(ev->twice_antenna_reduction), 4952 __le32_to_cpu(ev->power_limit), 4953 __le32_to_cpu(ev->twice_max_rd_power) / 2, 4954 __le32_to_cpu(ev->num_tx_chain), 4955 __le32_to_cpu(ev->rate_max)); 4956 } 4957 4958 static u8 4959 ath10k_wmi_tpc_final_get_rate(struct ath10k *ar, 4960 struct wmi_pdev_tpc_final_table_event *ev, 4961 u32 rate_idx, u32 num_chains, 4962 u32 rate_code, u8 type, u32 pream_idx) 4963 { 4964 u8 tpc, num_streams, preamble, ch, stm_idx; 4965 s8 pow_agcdd, pow_agstbc, pow_agtxbf; 4966 int pream; 4967 4968 num_streams = ATH10K_HW_NSS(rate_code); 4969 preamble = ATH10K_HW_PREAMBLE(rate_code); 4970 ch = num_chains - 1; 4971 stm_idx = num_streams - 1; 4972 pream = -1; 4973 4974 if (__le32_to_cpu(ev->chan_freq) <= 2483) { 4975 switch (pream_idx) { 4976 case WMI_TPC_PREAM_2GHZ_CCK: 4977 pream = 0; 4978 break; 4979 case WMI_TPC_PREAM_2GHZ_OFDM: 4980 pream = 1; 4981 break; 4982 case WMI_TPC_PREAM_2GHZ_HT20: 4983 case WMI_TPC_PREAM_2GHZ_VHT20: 4984 pream = 2; 4985 break; 4986 case WMI_TPC_PREAM_2GHZ_HT40: 4987 case WMI_TPC_PREAM_2GHZ_VHT40: 4988 pream = 3; 4989 break; 4990 case WMI_TPC_PREAM_2GHZ_VHT80: 4991 pream = 4; 4992 break; 4993 default: 4994 pream = -1; 4995 break; 4996 } 4997 } 4998 4999 if (__le32_to_cpu(ev->chan_freq) >= 5180) { 5000 switch (pream_idx) { 5001 case WMI_TPC_PREAM_5GHZ_OFDM: 5002 pream = 0; 5003 break; 5004 case WMI_TPC_PREAM_5GHZ_HT20: 5005 case WMI_TPC_PREAM_5GHZ_VHT20: 5006 pream = 1; 5007 break; 5008 case WMI_TPC_PREAM_5GHZ_HT40: 5009 case WMI_TPC_PREAM_5GHZ_VHT40: 5010 pream = 2; 5011 break; 5012 case WMI_TPC_PREAM_5GHZ_VHT80: 5013 pream = 3; 5014 break; 5015 case WMI_TPC_PREAM_5GHZ_HTCUP: 5016 pream = 4; 5017 break; 5018 default: 5019 pream = -1; 5020 break; 5021 } 5022 } 5023 5024 if (pream == -1) { 5025 ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n", 5026 pream_idx, __le32_to_cpu(ev->chan_freq)); 5027 tpc = 0; 5028 goto out; 5029 } 5030 5031 if (pream == 4) 5032 tpc = min_t(u8, ev->rates_array[rate_idx], 5033 ev->max_reg_allow_pow[ch]); 5034 else 5035 tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx], 5036 ev->max_reg_allow_pow[ch]), 5037 ev->ctl_power_table[0][pream][stm_idx]); 5038 5039 if (__le32_to_cpu(ev->num_tx_chain) <= 1) 5040 goto out; 5041 5042 if (preamble == WMI_RATE_PREAMBLE_CCK) 5043 goto out; 5044 5045 if (num_chains <= num_streams) 5046 goto out; 5047 5048 switch (type) { 5049 case WMI_TPC_TABLE_TYPE_STBC: 5050 pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]; 5051 if (pream == 4) 5052 tpc = min_t(u8, tpc, pow_agstbc); 5053 else 5054 tpc = min_t(u8, min_t(u8, tpc, pow_agstbc), 5055 ev->ctl_power_table[0][pream][stm_idx]); 5056 break; 5057 case WMI_TPC_TABLE_TYPE_TXBF: 5058 pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]; 5059 if (pream == 4) 5060 tpc = min_t(u8, tpc, pow_agtxbf); 5061 else 5062 tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf), 5063 ev->ctl_power_table[1][pream][stm_idx]); 5064 break; 5065 case WMI_TPC_TABLE_TYPE_CDD: 5066 pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]; 5067 if (pream == 4) 5068 tpc = min_t(u8, tpc, pow_agcdd); 5069 else 5070 tpc = min_t(u8, min_t(u8, tpc, pow_agcdd), 5071 ev->ctl_power_table[0][pream][stm_idx]); 5072 break; 5073 default: 5074 ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type); 5075 tpc = 0; 5076 break; 5077 } 5078 5079 out: 5080 return tpc; 5081 } 5082 5083 static void 5084 ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, 5085 struct wmi_pdev_tpc_final_table_event *ev, 5086 struct ath10k_tpc_stats_final *tpc_stats, 5087 u8 *rate_code, u16 *pream_table, u8 type) 5088 { 5089 u32 i, j, pream_idx, flags; 5090 u8 tpc[WMI_TPC_TX_N_CHAIN]; 5091 char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; 5092 char buff[WMI_TPC_BUF_SIZE]; 5093 5094 flags = __le32_to_cpu(ev->flags); 5095 5096 switch (type) { 5097 case WMI_TPC_TABLE_TYPE_CDD: 5098 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) { 5099 ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n"); 5100 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; 5101 return; 5102 } 5103 break; 5104 case WMI_TPC_TABLE_TYPE_STBC: 5105 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) { 5106 ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n"); 5107 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; 5108 return; 5109 } 5110 break; 5111 case WMI_TPC_TABLE_TYPE_TXBF: 5112 if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) { 5113 ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n"); 5114 tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; 5115 return; 5116 } 5117 break; 5118 default: 5119 ath10k_dbg(ar, ATH10K_DBG_WMI, 5120 "invalid table type in wmi tpc event: %d\n", type); 5121 return; 5122 } 5123 5124 pream_idx = 0; 5125 for (i = 0; i < tpc_stats->rate_max; i++) { 5126 memset(tpc_value, 0, sizeof(tpc_value)); 5127 memset(buff, 0, sizeof(buff)); 5128 if (i == pream_table[pream_idx]) 5129 pream_idx++; 5130 5131 for (j = 0; j < tpc_stats->num_tx_chain; j++) { 5132 tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, 5133 rate_code[i], 5134 type, pream_idx); 5135 snprintf(buff, sizeof(buff), "%8d ", tpc[j]); 5136 strlcat(tpc_value, buff, sizeof(tpc_value)); 5137 } 5138 tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx; 5139 tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i]; 5140 memcpy(tpc_stats->tpc_table_final[type].tpc_value[i], 5141 tpc_value, sizeof(tpc_value)); 5142 } 5143 } 5144 5145 void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) 5146 { 5147 u32 num_tx_chain, rate_max; 5148 u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; 5149 u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; 5150 struct wmi_pdev_tpc_final_table_event *ev; 5151 struct ath10k_tpc_stats_final *tpc_stats; 5152 5153 ev = (struct wmi_pdev_tpc_final_table_event *)skb->data; 5154 5155 num_tx_chain = __le32_to_cpu(ev->num_tx_chain); 5156 if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { 5157 ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n", 5158 num_tx_chain, WMI_TPC_TX_N_CHAIN); 5159 return; 5160 } 5161 5162 rate_max = __le32_to_cpu(ev->rate_max); 5163 if (rate_max > WMI_TPC_FINAL_RATE_MAX) { 5164 ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n", 5165 rate_max, WMI_TPC_FINAL_RATE_MAX); 5166 rate_max = WMI_TPC_FINAL_RATE_MAX; 5167 } 5168 5169 tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); 5170 if (!tpc_stats) 5171 return; 5172 5173 ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, 5174 num_tx_chain); 5175 5176 tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq); 5177 tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode); 5178 tpc_stats->ctl = __le32_to_cpu(ev->ctl); 5179 tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain); 5180 tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain); 5181 tpc_stats->twice_antenna_reduction = 5182 __le32_to_cpu(ev->twice_antenna_reduction); 5183 tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); 5184 tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); 5185 tpc_stats->num_tx_chain = num_tx_chain; 5186 tpc_stats->rate_max = rate_max; 5187 5188 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, 5189 rate_code, pream_table, 5190 WMI_TPC_TABLE_TYPE_CDD); 5191 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, 5192 rate_code, pream_table, 5193 WMI_TPC_TABLE_TYPE_STBC); 5194 ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, 5195 rate_code, pream_table, 5196 WMI_TPC_TABLE_TYPE_TXBF); 5197 5198 ath10k_debug_tpc_stats_final_process(ar, tpc_stats); 5199 5200 ath10k_dbg(ar, ATH10K_DBG_WMI, 5201 "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n", 5202 __le32_to_cpu(ev->chan_freq), 5203 __le32_to_cpu(ev->phy_mode), 5204 __le32_to_cpu(ev->ctl), 5205 __le32_to_cpu(ev->reg_domain), 5206 a_sle32_to_cpu(ev->twice_antenna_gain), 5207 __le32_to_cpu(ev->twice_antenna_reduction), 5208 __le32_to_cpu(ev->power_limit), 5209 __le32_to_cpu(ev->twice_max_rd_power) / 2, 5210 __le32_to_cpu(ev->num_tx_chain), 5211 __le32_to_cpu(ev->rate_max)); 5212 } 5213 5214 static void 5215 ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb) 5216 { 5217 struct wmi_tdls_peer_event *ev; 5218 struct ath10k_peer *peer; 5219 struct ath10k_vif *arvif; 5220 int vdev_id; 5221 int peer_status; 5222 int peer_reason; 5223 u8 reason; 5224 5225 if (skb->len < sizeof(*ev)) { 5226 ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n", 5227 skb->len); 5228 return; 5229 } 5230 5231 ev = (struct wmi_tdls_peer_event *)skb->data; 5232 vdev_id = __le32_to_cpu(ev->vdev_id); 5233 peer_status = __le32_to_cpu(ev->peer_status); 5234 peer_reason = __le32_to_cpu(ev->peer_reason); 5235 5236 spin_lock_bh(&ar->data_lock); 5237 peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr); 5238 spin_unlock_bh(&ar->data_lock); 5239 5240 if (!peer) { 5241 ath10k_warn(ar, "failed to find peer entry for %pM\n", 5242 ev->peer_macaddr.addr); 5243 return; 5244 } 5245 5246 switch (peer_status) { 5247 case WMI_TDLS_SHOULD_TEARDOWN: 5248 switch (peer_reason) { 5249 case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: 5250 case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE: 5251 case WMI_TDLS_TEARDOWN_REASON_RSSI: 5252 reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE; 5253 break; 5254 default: 5255 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; 5256 break; 5257 } 5258 5259 arvif = ath10k_get_arvif(ar, vdev_id); 5260 if (!arvif) { 5261 ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n", 5262 vdev_id); 5263 return; 5264 } 5265 5266 ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr, 5267 NL80211_TDLS_TEARDOWN, reason, 5268 GFP_ATOMIC); 5269 5270 ath10k_dbg(ar, ATH10K_DBG_WMI, 5271 "received tdls teardown event for peer %pM reason %u\n", 5272 ev->peer_macaddr.addr, peer_reason); 5273 break; 5274 default: 5275 ath10k_dbg(ar, ATH10K_DBG_WMI, 5276 "received unknown tdls peer event %u\n", 5277 peer_status); 5278 break; 5279 } 5280 } 5281 5282 static void 5283 ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb) 5284 { 5285 struct wmi_peer_sta_ps_state_chg_event *ev; 5286 struct ieee80211_sta *sta; 5287 struct ath10k_sta *arsta; 5288 u8 peer_addr[ETH_ALEN]; 5289 5290 lockdep_assert_held(&ar->data_lock); 5291 5292 ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data; 5293 ether_addr_copy(peer_addr, ev->peer_macaddr.addr); 5294 5295 rcu_read_lock(); 5296 5297 sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL); 5298 5299 if (!sta) { 5300 ath10k_warn(ar, "failed to find station entry %pM\n", 5301 peer_addr); 5302 goto exit; 5303 } 5304 5305 arsta = (struct ath10k_sta *)sta->drv_priv; 5306 arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state); 5307 5308 exit: 5309 rcu_read_unlock(); 5310 } 5311 5312 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) 5313 { 5314 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); 5315 } 5316 5317 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb) 5318 { 5319 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); 5320 } 5321 5322 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb) 5323 { 5324 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); 5325 } 5326 5327 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb) 5328 { 5329 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); 5330 } 5331 5332 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb) 5333 { 5334 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); 5335 } 5336 5337 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, 5338 struct sk_buff *skb) 5339 { 5340 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 5341 } 5342 5343 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb) 5344 { 5345 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); 5346 } 5347 5348 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb) 5349 { 5350 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); 5351 } 5352 5353 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) 5354 { 5355 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); 5356 } 5357 5358 static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, 5359 u32 num_units, u32 unit_len) 5360 { 5361 dma_addr_t paddr; 5362 u32 pool_size; 5363 int idx = ar->wmi.num_mem_chunks; 5364 void *vaddr; 5365 5366 pool_size = num_units * round_up(unit_len, 4); 5367 vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5368 5369 if (!vaddr) 5370 return -ENOMEM; 5371 5372 ar->wmi.mem_chunks[idx].vaddr = vaddr; 5373 ar->wmi.mem_chunks[idx].paddr = paddr; 5374 ar->wmi.mem_chunks[idx].len = pool_size; 5375 ar->wmi.mem_chunks[idx].req_id = req_id; 5376 ar->wmi.num_mem_chunks++; 5377 5378 return num_units; 5379 } 5380 5381 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, 5382 u32 num_units, u32 unit_len) 5383 { 5384 int ret; 5385 5386 while (num_units) { 5387 ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len); 5388 if (ret < 0) 5389 return ret; 5390 5391 num_units -= ret; 5392 } 5393 5394 return 0; 5395 } 5396 5397 static bool 5398 ath10k_wmi_is_host_mem_allocated(struct ath10k *ar, 5399 const struct wlan_host_mem_req **mem_reqs, 5400 u32 num_mem_reqs) 5401 { 5402 u32 req_id, num_units, unit_size, num_unit_info; 5403 u32 pool_size; 5404 int i, j; 5405 bool found; 5406 5407 if (ar->wmi.num_mem_chunks != num_mem_reqs) 5408 return false; 5409 5410 for (i = 0; i < num_mem_reqs; ++i) { 5411 req_id = __le32_to_cpu(mem_reqs[i]->req_id); 5412 num_units = __le32_to_cpu(mem_reqs[i]->num_units); 5413 unit_size = __le32_to_cpu(mem_reqs[i]->unit_size); 5414 num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info); 5415 5416 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) { 5417 if (ar->num_active_peers) 5418 num_units = ar->num_active_peers + 1; 5419 else 5420 num_units = ar->max_num_peers + 1; 5421 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) { 5422 num_units = ar->max_num_peers + 1; 5423 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) { 5424 num_units = ar->max_num_vdevs + 1; 5425 } 5426 5427 found = false; 5428 for (j = 0; j < ar->wmi.num_mem_chunks; j++) { 5429 if (ar->wmi.mem_chunks[j].req_id == req_id) { 5430 pool_size = num_units * round_up(unit_size, 4); 5431 if (ar->wmi.mem_chunks[j].len == pool_size) { 5432 found = true; 5433 break; 5434 } 5435 } 5436 } 5437 if (!found) 5438 return false; 5439 } 5440 5441 return true; 5442 } 5443 5444 static int 5445 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, 5446 struct wmi_svc_rdy_ev_arg *arg) 5447 { 5448 struct wmi_service_ready_event *ev; 5449 size_t i, n; 5450 5451 if (skb->len < sizeof(*ev)) 5452 return -EPROTO; 5453 5454 ev = (void *)skb->data; 5455 skb_pull(skb, sizeof(*ev)); 5456 arg->min_tx_power = ev->hw_min_tx_power; 5457 arg->max_tx_power = ev->hw_max_tx_power; 5458 arg->ht_cap = ev->ht_cap_info; 5459 arg->vht_cap = ev->vht_cap_info; 5460 arg->vht_supp_mcs = ev->vht_supp_mcs; 5461 arg->sw_ver0 = ev->sw_version; 5462 arg->sw_ver1 = ev->sw_version_1; 5463 arg->phy_capab = ev->phy_capability; 5464 arg->num_rf_chains = ev->num_rf_chains; 5465 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; 5466 arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan; 5467 arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan; 5468 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan; 5469 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan; 5470 arg->num_mem_reqs = ev->num_mem_reqs; 5471 arg->service_map = ev->wmi_service_bitmap; 5472 arg->service_map_len = sizeof(ev->wmi_service_bitmap); 5473 5474 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), 5475 ARRAY_SIZE(arg->mem_reqs)); 5476 for (i = 0; i < n; i++) 5477 arg->mem_reqs[i] = &ev->mem_reqs[i]; 5478 5479 if (skb->len < 5480 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) 5481 return -EPROTO; 5482 5483 return 0; 5484 } 5485 5486 static int 5487 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, 5488 struct wmi_svc_rdy_ev_arg *arg) 5489 { 5490 struct wmi_10x_service_ready_event *ev; 5491 int i, n; 5492 5493 if (skb->len < sizeof(*ev)) 5494 return -EPROTO; 5495 5496 ev = (void *)skb->data; 5497 skb_pull(skb, sizeof(*ev)); 5498 arg->min_tx_power = ev->hw_min_tx_power; 5499 arg->max_tx_power = ev->hw_max_tx_power; 5500 arg->ht_cap = ev->ht_cap_info; 5501 arg->vht_cap = ev->vht_cap_info; 5502 arg->vht_supp_mcs = ev->vht_supp_mcs; 5503 arg->sw_ver0 = ev->sw_version; 5504 arg->phy_capab = ev->phy_capability; 5505 arg->num_rf_chains = ev->num_rf_chains; 5506 arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; 5507 arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan; 5508 arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan; 5509 arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan; 5510 arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan; 5511 arg->num_mem_reqs = ev->num_mem_reqs; 5512 arg->service_map = ev->wmi_service_bitmap; 5513 arg->service_map_len = sizeof(ev->wmi_service_bitmap); 5514 5515 /* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have 5516 * different values. We would need a translation to handle that, 5517 * but as we don't currently need anything from sys_cap_info from 5518 * WMI interface (only from WMI-TLV) safest it to skip it. 5519 */ 5520 5521 n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), 5522 ARRAY_SIZE(arg->mem_reqs)); 5523 for (i = 0; i < n; i++) 5524 arg->mem_reqs[i] = &ev->mem_reqs[i]; 5525 5526 if (skb->len < 5527 __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) 5528 return -EPROTO; 5529 5530 return 0; 5531 } 5532 5533 static void ath10k_wmi_event_service_ready_work(struct work_struct *work) 5534 { 5535 struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work); 5536 struct sk_buff *skb = ar->svc_rdy_skb; 5537 struct wmi_svc_rdy_ev_arg arg = {}; 5538 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; 5539 int ret; 5540 bool allocated; 5541 5542 if (!skb) { 5543 ath10k_warn(ar, "invalid service ready event skb\n"); 5544 return; 5545 } 5546 5547 ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg); 5548 if (ret) { 5549 ath10k_warn(ar, "failed to parse service ready: %d\n", ret); 5550 return; 5551 } 5552 5553 ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map, 5554 arg.service_map_len); 5555 5556 ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power); 5557 ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power); 5558 ar->ht_cap_info = __le32_to_cpu(arg.ht_cap); 5559 ar->vht_cap_info = __le32_to_cpu(arg.vht_cap); 5560 ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs); 5561 ar->fw_version_major = 5562 (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24; 5563 ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff); 5564 ar->fw_version_release = 5565 (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16; 5566 ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff); 5567 ar->phy_capability = __le32_to_cpu(arg.phy_capab); 5568 ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains); 5569 ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd); 5570 ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan); 5571 ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan); 5572 ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan); 5573 ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan); 5574 ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info); 5575 5576 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", 5577 arg.service_map, arg.service_map_len); 5578 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n", 5579 ar->sys_cap_info); 5580 5581 if (ar->num_rf_chains > ar->max_spatial_stream) { 5582 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", 5583 ar->num_rf_chains, ar->max_spatial_stream); 5584 ar->num_rf_chains = ar->max_spatial_stream; 5585 } 5586 5587 if (!ar->cfg_tx_chainmask) { 5588 ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1; 5589 ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1; 5590 } 5591 5592 if (strlen(ar->hw->wiphy->fw_version) == 0) { 5593 snprintf(ar->hw->wiphy->fw_version, 5594 sizeof(ar->hw->wiphy->fw_version), 5595 "%u.%u.%u.%u", 5596 ar->fw_version_major, 5597 ar->fw_version_minor, 5598 ar->fw_version_release, 5599 ar->fw_version_build); 5600 } 5601 5602 num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs); 5603 if (num_mem_reqs > WMI_MAX_MEM_REQS) { 5604 ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n", 5605 num_mem_reqs); 5606 return; 5607 } 5608 5609 if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { 5610 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 5611 ar->running_fw->fw_file.fw_features)) 5612 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC + 5613 ar->max_num_vdevs; 5614 else 5615 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + 5616 ar->max_num_vdevs; 5617 5618 ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + 5619 ar->max_num_vdevs; 5620 ar->num_tids = ar->num_active_peers * 2; 5621 ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; 5622 } 5623 5624 /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE 5625 * and WMI_SERVICE_IRAM_TIDS, etc. 5626 */ 5627 5628 allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs, 5629 num_mem_reqs); 5630 if (allocated) 5631 goto skip_mem_alloc; 5632 5633 /* Either this event is received during boot time or there is a change 5634 * in memory requirement from firmware when compared to last request. 5635 * Free any old memory and do a fresh allocation based on the current 5636 * memory requirement. 5637 */ 5638 ath10k_wmi_free_host_mem(ar); 5639 5640 for (i = 0; i < num_mem_reqs; ++i) { 5641 req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id); 5642 num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units); 5643 unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size); 5644 num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info); 5645 5646 if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) { 5647 if (ar->num_active_peers) 5648 num_units = ar->num_active_peers + 1; 5649 else 5650 num_units = ar->max_num_peers + 1; 5651 } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) { 5652 /* number of units to allocate is number of 5653 * peers, 1 extra for self peer on target 5654 * this needs to be tied, host and target 5655 * can get out of sync 5656 */ 5657 num_units = ar->max_num_peers + 1; 5658 } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) { 5659 num_units = ar->max_num_vdevs + 1; 5660 } 5661 5662 ath10k_dbg(ar, ATH10K_DBG_WMI, 5663 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", 5664 req_id, 5665 __le32_to_cpu(arg.mem_reqs[i]->num_units), 5666 num_unit_info, 5667 unit_size, 5668 num_units); 5669 5670 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units, 5671 unit_size); 5672 if (ret) 5673 return; 5674 } 5675 5676 skip_mem_alloc: 5677 ath10k_dbg(ar, ATH10K_DBG_WMI, 5678 "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n", 5679 __le32_to_cpu(arg.min_tx_power), 5680 __le32_to_cpu(arg.max_tx_power), 5681 __le32_to_cpu(arg.ht_cap), 5682 __le32_to_cpu(arg.vht_cap), 5683 __le32_to_cpu(arg.vht_supp_mcs), 5684 __le32_to_cpu(arg.sw_ver0), 5685 __le32_to_cpu(arg.sw_ver1), 5686 __le32_to_cpu(arg.fw_build), 5687 __le32_to_cpu(arg.phy_capab), 5688 __le32_to_cpu(arg.num_rf_chains), 5689 __le32_to_cpu(arg.eeprom_rd), 5690 __le32_to_cpu(arg.low_2ghz_chan), 5691 __le32_to_cpu(arg.high_2ghz_chan), 5692 __le32_to_cpu(arg.low_5ghz_chan), 5693 __le32_to_cpu(arg.high_5ghz_chan), 5694 __le32_to_cpu(arg.num_mem_reqs)); 5695 5696 dev_kfree_skb(skb); 5697 ar->svc_rdy_skb = NULL; 5698 complete(&ar->wmi.service_ready); 5699 } 5700 5701 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) 5702 { 5703 ar->svc_rdy_skb = skb; 5704 queue_work(ar->workqueue_aux, &ar->svc_rdy_work); 5705 } 5706 5707 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb, 5708 struct wmi_rdy_ev_arg *arg) 5709 { 5710 struct wmi_ready_event *ev = (void *)skb->data; 5711 5712 if (skb->len < sizeof(*ev)) 5713 return -EPROTO; 5714 5715 skb_pull(skb, sizeof(*ev)); 5716 arg->sw_version = ev->sw_version; 5717 arg->abi_version = ev->abi_version; 5718 arg->status = ev->status; 5719 arg->mac_addr = ev->mac_addr.addr; 5720 5721 return 0; 5722 } 5723 5724 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 5725 struct wmi_roam_ev_arg *arg) 5726 { 5727 struct wmi_roam_ev *ev = (void *)skb->data; 5728 5729 if (skb->len < sizeof(*ev)) 5730 return -EPROTO; 5731 5732 skb_pull(skb, sizeof(*ev)); 5733 arg->vdev_id = ev->vdev_id; 5734 arg->reason = ev->reason; 5735 5736 return 0; 5737 } 5738 5739 static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar, 5740 struct sk_buff *skb, 5741 struct wmi_echo_ev_arg *arg) 5742 { 5743 struct wmi_echo_event *ev = (void *)skb->data; 5744 5745 arg->value = ev->value; 5746 5747 return 0; 5748 } 5749 5750 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) 5751 { 5752 struct wmi_rdy_ev_arg arg = {}; 5753 int ret; 5754 5755 ret = ath10k_wmi_pull_rdy(ar, skb, &arg); 5756 if (ret) { 5757 ath10k_warn(ar, "failed to parse ready event: %d\n", ret); 5758 return ret; 5759 } 5760 5761 ath10k_dbg(ar, ATH10K_DBG_WMI, 5762 "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n", 5763 __le32_to_cpu(arg.sw_version), 5764 __le32_to_cpu(arg.abi_version), 5765 arg.mac_addr, 5766 __le32_to_cpu(arg.status)); 5767 5768 if (is_zero_ether_addr(ar->mac_addr)) 5769 ether_addr_copy(ar->mac_addr, arg.mac_addr); 5770 complete(&ar->wmi.unified_ready); 5771 return 0; 5772 } 5773 5774 void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) 5775 { 5776 int ret; 5777 struct wmi_svc_avail_ev_arg arg = {}; 5778 5779 ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg); 5780 if (ret) { 5781 ath10k_warn(ar, "failed to parse service available event: %d\n", 5782 ret); 5783 } 5784 5785 /* 5786 * Initialization of "arg.service_map_ext_valid" to ZERO is necessary 5787 * for the below logic to work. 5788 */ 5789 if (arg.service_map_ext_valid) 5790 ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, 5791 __le32_to_cpu(arg.service_map_ext_len)); 5792 } 5793 5794 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) 5795 { 5796 const struct wmi_pdev_temperature_event *ev; 5797 5798 ev = (struct wmi_pdev_temperature_event *)skb->data; 5799 if (WARN_ON(skb->len < sizeof(*ev))) 5800 return -EPROTO; 5801 5802 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature)); 5803 return 0; 5804 } 5805 5806 static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar, 5807 struct sk_buff *skb) 5808 { 5809 struct wmi_pdev_bss_chan_info_event *ev; 5810 struct survey_info *survey; 5811 u64 busy, total, tx, rx, rx_bss; 5812 u32 freq, noise_floor; 5813 u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz; 5814 int idx; 5815 5816 ev = (struct wmi_pdev_bss_chan_info_event *)skb->data; 5817 if (WARN_ON(skb->len < sizeof(*ev))) 5818 return -EPROTO; 5819 5820 freq = __le32_to_cpu(ev->freq); 5821 noise_floor = __le32_to_cpu(ev->noise_floor); 5822 busy = __le64_to_cpu(ev->cycle_busy); 5823 total = __le64_to_cpu(ev->cycle_total); 5824 tx = __le64_to_cpu(ev->cycle_tx); 5825 rx = __le64_to_cpu(ev->cycle_rx); 5826 rx_bss = __le64_to_cpu(ev->cycle_rx_bss); 5827 5828 ath10k_dbg(ar, ATH10K_DBG_WMI, 5829 "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", 5830 freq, noise_floor, busy, total, tx, rx, rx_bss); 5831 5832 spin_lock_bh(&ar->data_lock); 5833 idx = freq_to_idx(ar, freq); 5834 if (idx >= ARRAY_SIZE(ar->survey)) { 5835 ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", 5836 freq, idx); 5837 goto exit; 5838 } 5839 5840 survey = &ar->survey[idx]; 5841 5842 survey->noise = noise_floor; 5843 survey->time = div_u64(total, cc_freq_hz); 5844 survey->time_busy = div_u64(busy, cc_freq_hz); 5845 survey->time_rx = div_u64(rx_bss, cc_freq_hz); 5846 survey->time_tx = div_u64(tx, cc_freq_hz); 5847 survey->filled |= (SURVEY_INFO_NOISE_DBM | 5848 SURVEY_INFO_TIME | 5849 SURVEY_INFO_TIME_BUSY | 5850 SURVEY_INFO_TIME_RX | 5851 SURVEY_INFO_TIME_TX); 5852 exit: 5853 spin_unlock_bh(&ar->data_lock); 5854 complete(&ar->bss_survey_done); 5855 return 0; 5856 } 5857 5858 static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar) 5859 { 5860 if (ar->hw_params.hw_ops->set_coverage_class) { 5861 spin_lock_bh(&ar->data_lock); 5862 5863 /* This call only ensures that the modified coverage class 5864 * persists in case the firmware sets the registers back to 5865 * their default value. So calling it is only necessary if the 5866 * coverage class has a non-zero value. 5867 */ 5868 if (ar->fw_coverage.coverage_class) 5869 queue_work(ar->workqueue, &ar->set_coverage_class_work); 5870 5871 spin_unlock_bh(&ar->data_lock); 5872 } 5873 } 5874 5875 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) 5876 { 5877 struct wmi_cmd_hdr *cmd_hdr; 5878 enum wmi_event_id id; 5879 5880 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 5881 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 5882 5883 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 5884 goto out; 5885 5886 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); 5887 5888 switch (id) { 5889 case WMI_MGMT_RX_EVENTID: 5890 ath10k_wmi_event_mgmt_rx(ar, skb); 5891 /* mgmt_rx() owns the skb now! */ 5892 return; 5893 case WMI_SCAN_EVENTID: 5894 ath10k_wmi_event_scan(ar, skb); 5895 ath10k_wmi_queue_set_coverage_class_work(ar); 5896 break; 5897 case WMI_CHAN_INFO_EVENTID: 5898 ath10k_wmi_event_chan_info(ar, skb); 5899 break; 5900 case WMI_ECHO_EVENTID: 5901 ath10k_wmi_event_echo(ar, skb); 5902 break; 5903 case WMI_DEBUG_MESG_EVENTID: 5904 ath10k_wmi_event_debug_mesg(ar, skb); 5905 ath10k_wmi_queue_set_coverage_class_work(ar); 5906 break; 5907 case WMI_UPDATE_STATS_EVENTID: 5908 ath10k_wmi_event_update_stats(ar, skb); 5909 break; 5910 case WMI_VDEV_START_RESP_EVENTID: 5911 ath10k_wmi_event_vdev_start_resp(ar, skb); 5912 ath10k_wmi_queue_set_coverage_class_work(ar); 5913 break; 5914 case WMI_VDEV_STOPPED_EVENTID: 5915 ath10k_wmi_event_vdev_stopped(ar, skb); 5916 ath10k_wmi_queue_set_coverage_class_work(ar); 5917 break; 5918 case WMI_PEER_STA_KICKOUT_EVENTID: 5919 ath10k_wmi_event_peer_sta_kickout(ar, skb); 5920 break; 5921 case WMI_HOST_SWBA_EVENTID: 5922 ath10k_wmi_event_host_swba(ar, skb); 5923 break; 5924 case WMI_TBTTOFFSET_UPDATE_EVENTID: 5925 ath10k_wmi_event_tbttoffset_update(ar, skb); 5926 break; 5927 case WMI_PHYERR_EVENTID: 5928 ath10k_wmi_event_phyerr(ar, skb); 5929 break; 5930 case WMI_ROAM_EVENTID: 5931 ath10k_wmi_event_roam(ar, skb); 5932 ath10k_wmi_queue_set_coverage_class_work(ar); 5933 break; 5934 case WMI_PROFILE_MATCH: 5935 ath10k_wmi_event_profile_match(ar, skb); 5936 break; 5937 case WMI_DEBUG_PRINT_EVENTID: 5938 ath10k_wmi_event_debug_print(ar, skb); 5939 ath10k_wmi_queue_set_coverage_class_work(ar); 5940 break; 5941 case WMI_PDEV_QVIT_EVENTID: 5942 ath10k_wmi_event_pdev_qvit(ar, skb); 5943 break; 5944 case WMI_WLAN_PROFILE_DATA_EVENTID: 5945 ath10k_wmi_event_wlan_profile_data(ar, skb); 5946 break; 5947 case WMI_RTT_MEASUREMENT_REPORT_EVENTID: 5948 ath10k_wmi_event_rtt_measurement_report(ar, skb); 5949 break; 5950 case WMI_TSF_MEASUREMENT_REPORT_EVENTID: 5951 ath10k_wmi_event_tsf_measurement_report(ar, skb); 5952 break; 5953 case WMI_RTT_ERROR_REPORT_EVENTID: 5954 ath10k_wmi_event_rtt_error_report(ar, skb); 5955 break; 5956 case WMI_WOW_WAKEUP_HOST_EVENTID: 5957 ath10k_wmi_event_wow_wakeup_host(ar, skb); 5958 break; 5959 case WMI_DCS_INTERFERENCE_EVENTID: 5960 ath10k_wmi_event_dcs_interference(ar, skb); 5961 break; 5962 case WMI_PDEV_TPC_CONFIG_EVENTID: 5963 ath10k_wmi_event_pdev_tpc_config(ar, skb); 5964 break; 5965 case WMI_PDEV_FTM_INTG_EVENTID: 5966 ath10k_wmi_event_pdev_ftm_intg(ar, skb); 5967 break; 5968 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 5969 ath10k_wmi_event_gtk_offload_status(ar, skb); 5970 break; 5971 case WMI_GTK_REKEY_FAIL_EVENTID: 5972 ath10k_wmi_event_gtk_rekey_fail(ar, skb); 5973 break; 5974 case WMI_TX_DELBA_COMPLETE_EVENTID: 5975 ath10k_wmi_event_delba_complete(ar, skb); 5976 break; 5977 case WMI_TX_ADDBA_COMPLETE_EVENTID: 5978 ath10k_wmi_event_addba_complete(ar, skb); 5979 break; 5980 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 5981 ath10k_wmi_event_vdev_install_key_complete(ar, skb); 5982 break; 5983 case WMI_SERVICE_READY_EVENTID: 5984 ath10k_wmi_event_service_ready(ar, skb); 5985 return; 5986 case WMI_READY_EVENTID: 5987 ath10k_wmi_event_ready(ar, skb); 5988 ath10k_wmi_queue_set_coverage_class_work(ar); 5989 break; 5990 case WMI_SERVICE_AVAILABLE_EVENTID: 5991 ath10k_wmi_event_service_available(ar, skb); 5992 break; 5993 default: 5994 ath10k_warn(ar, "Unknown eventid: %d\n", id); 5995 break; 5996 } 5997 5998 out: 5999 dev_kfree_skb(skb); 6000 } 6001 6002 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb) 6003 { 6004 struct wmi_cmd_hdr *cmd_hdr; 6005 enum wmi_10x_event_id id; 6006 bool consumed; 6007 6008 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 6009 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 6010 6011 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 6012 goto out; 6013 6014 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); 6015 6016 consumed = ath10k_tm_event_wmi(ar, id, skb); 6017 6018 /* Ready event must be handled normally also in UTF mode so that we 6019 * know the UTF firmware has booted, others we are just bypass WMI 6020 * events to testmode. 6021 */ 6022 if (consumed && id != WMI_10X_READY_EVENTID) { 6023 ath10k_dbg(ar, ATH10K_DBG_WMI, 6024 "wmi testmode consumed 0x%x\n", id); 6025 goto out; 6026 } 6027 6028 switch (id) { 6029 case WMI_10X_MGMT_RX_EVENTID: 6030 ath10k_wmi_event_mgmt_rx(ar, skb); 6031 /* mgmt_rx() owns the skb now! */ 6032 return; 6033 case WMI_10X_SCAN_EVENTID: 6034 ath10k_wmi_event_scan(ar, skb); 6035 ath10k_wmi_queue_set_coverage_class_work(ar); 6036 break; 6037 case WMI_10X_CHAN_INFO_EVENTID: 6038 ath10k_wmi_event_chan_info(ar, skb); 6039 break; 6040 case WMI_10X_ECHO_EVENTID: 6041 ath10k_wmi_event_echo(ar, skb); 6042 break; 6043 case WMI_10X_DEBUG_MESG_EVENTID: 6044 ath10k_wmi_event_debug_mesg(ar, skb); 6045 ath10k_wmi_queue_set_coverage_class_work(ar); 6046 break; 6047 case WMI_10X_UPDATE_STATS_EVENTID: 6048 ath10k_wmi_event_update_stats(ar, skb); 6049 break; 6050 case WMI_10X_VDEV_START_RESP_EVENTID: 6051 ath10k_wmi_event_vdev_start_resp(ar, skb); 6052 ath10k_wmi_queue_set_coverage_class_work(ar); 6053 break; 6054 case WMI_10X_VDEV_STOPPED_EVENTID: 6055 ath10k_wmi_event_vdev_stopped(ar, skb); 6056 ath10k_wmi_queue_set_coverage_class_work(ar); 6057 break; 6058 case WMI_10X_PEER_STA_KICKOUT_EVENTID: 6059 ath10k_wmi_event_peer_sta_kickout(ar, skb); 6060 break; 6061 case WMI_10X_HOST_SWBA_EVENTID: 6062 ath10k_wmi_event_host_swba(ar, skb); 6063 break; 6064 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID: 6065 ath10k_wmi_event_tbttoffset_update(ar, skb); 6066 break; 6067 case WMI_10X_PHYERR_EVENTID: 6068 ath10k_wmi_event_phyerr(ar, skb); 6069 break; 6070 case WMI_10X_ROAM_EVENTID: 6071 ath10k_wmi_event_roam(ar, skb); 6072 ath10k_wmi_queue_set_coverage_class_work(ar); 6073 break; 6074 case WMI_10X_PROFILE_MATCH: 6075 ath10k_wmi_event_profile_match(ar, skb); 6076 break; 6077 case WMI_10X_DEBUG_PRINT_EVENTID: 6078 ath10k_wmi_event_debug_print(ar, skb); 6079 ath10k_wmi_queue_set_coverage_class_work(ar); 6080 break; 6081 case WMI_10X_PDEV_QVIT_EVENTID: 6082 ath10k_wmi_event_pdev_qvit(ar, skb); 6083 break; 6084 case WMI_10X_WLAN_PROFILE_DATA_EVENTID: 6085 ath10k_wmi_event_wlan_profile_data(ar, skb); 6086 break; 6087 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID: 6088 ath10k_wmi_event_rtt_measurement_report(ar, skb); 6089 break; 6090 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID: 6091 ath10k_wmi_event_tsf_measurement_report(ar, skb); 6092 break; 6093 case WMI_10X_RTT_ERROR_REPORT_EVENTID: 6094 ath10k_wmi_event_rtt_error_report(ar, skb); 6095 break; 6096 case WMI_10X_WOW_WAKEUP_HOST_EVENTID: 6097 ath10k_wmi_event_wow_wakeup_host(ar, skb); 6098 break; 6099 case WMI_10X_DCS_INTERFERENCE_EVENTID: 6100 ath10k_wmi_event_dcs_interference(ar, skb); 6101 break; 6102 case WMI_10X_PDEV_TPC_CONFIG_EVENTID: 6103 ath10k_wmi_event_pdev_tpc_config(ar, skb); 6104 break; 6105 case WMI_10X_INST_RSSI_STATS_EVENTID: 6106 ath10k_wmi_event_inst_rssi_stats(ar, skb); 6107 break; 6108 case WMI_10X_VDEV_STANDBY_REQ_EVENTID: 6109 ath10k_wmi_event_vdev_standby_req(ar, skb); 6110 break; 6111 case WMI_10X_VDEV_RESUME_REQ_EVENTID: 6112 ath10k_wmi_event_vdev_resume_req(ar, skb); 6113 break; 6114 case WMI_10X_SERVICE_READY_EVENTID: 6115 ath10k_wmi_event_service_ready(ar, skb); 6116 return; 6117 case WMI_10X_READY_EVENTID: 6118 ath10k_wmi_event_ready(ar, skb); 6119 ath10k_wmi_queue_set_coverage_class_work(ar); 6120 break; 6121 case WMI_10X_PDEV_UTF_EVENTID: 6122 /* ignore utf events */ 6123 break; 6124 default: 6125 ath10k_warn(ar, "Unknown eventid: %d\n", id); 6126 break; 6127 } 6128 6129 out: 6130 dev_kfree_skb(skb); 6131 } 6132 6133 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) 6134 { 6135 struct wmi_cmd_hdr *cmd_hdr; 6136 enum wmi_10_2_event_id id; 6137 bool consumed; 6138 6139 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 6140 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 6141 6142 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 6143 goto out; 6144 6145 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); 6146 6147 consumed = ath10k_tm_event_wmi(ar, id, skb); 6148 6149 /* Ready event must be handled normally also in UTF mode so that we 6150 * know the UTF firmware has booted, others we are just bypass WMI 6151 * events to testmode. 6152 */ 6153 if (consumed && id != WMI_10_2_READY_EVENTID) { 6154 ath10k_dbg(ar, ATH10K_DBG_WMI, 6155 "wmi testmode consumed 0x%x\n", id); 6156 goto out; 6157 } 6158 6159 switch (id) { 6160 case WMI_10_2_MGMT_RX_EVENTID: 6161 ath10k_wmi_event_mgmt_rx(ar, skb); 6162 /* mgmt_rx() owns the skb now! */ 6163 return; 6164 case WMI_10_2_SCAN_EVENTID: 6165 ath10k_wmi_event_scan(ar, skb); 6166 ath10k_wmi_queue_set_coverage_class_work(ar); 6167 break; 6168 case WMI_10_2_CHAN_INFO_EVENTID: 6169 ath10k_wmi_event_chan_info(ar, skb); 6170 break; 6171 case WMI_10_2_ECHO_EVENTID: 6172 ath10k_wmi_event_echo(ar, skb); 6173 break; 6174 case WMI_10_2_DEBUG_MESG_EVENTID: 6175 ath10k_wmi_event_debug_mesg(ar, skb); 6176 ath10k_wmi_queue_set_coverage_class_work(ar); 6177 break; 6178 case WMI_10_2_UPDATE_STATS_EVENTID: 6179 ath10k_wmi_event_update_stats(ar, skb); 6180 break; 6181 case WMI_10_2_VDEV_START_RESP_EVENTID: 6182 ath10k_wmi_event_vdev_start_resp(ar, skb); 6183 ath10k_wmi_queue_set_coverage_class_work(ar); 6184 break; 6185 case WMI_10_2_VDEV_STOPPED_EVENTID: 6186 ath10k_wmi_event_vdev_stopped(ar, skb); 6187 ath10k_wmi_queue_set_coverage_class_work(ar); 6188 break; 6189 case WMI_10_2_PEER_STA_KICKOUT_EVENTID: 6190 ath10k_wmi_event_peer_sta_kickout(ar, skb); 6191 break; 6192 case WMI_10_2_HOST_SWBA_EVENTID: 6193 ath10k_wmi_event_host_swba(ar, skb); 6194 break; 6195 case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID: 6196 ath10k_wmi_event_tbttoffset_update(ar, skb); 6197 break; 6198 case WMI_10_2_PHYERR_EVENTID: 6199 ath10k_wmi_event_phyerr(ar, skb); 6200 break; 6201 case WMI_10_2_ROAM_EVENTID: 6202 ath10k_wmi_event_roam(ar, skb); 6203 ath10k_wmi_queue_set_coverage_class_work(ar); 6204 break; 6205 case WMI_10_2_PROFILE_MATCH: 6206 ath10k_wmi_event_profile_match(ar, skb); 6207 break; 6208 case WMI_10_2_DEBUG_PRINT_EVENTID: 6209 ath10k_wmi_event_debug_print(ar, skb); 6210 ath10k_wmi_queue_set_coverage_class_work(ar); 6211 break; 6212 case WMI_10_2_PDEV_QVIT_EVENTID: 6213 ath10k_wmi_event_pdev_qvit(ar, skb); 6214 break; 6215 case WMI_10_2_WLAN_PROFILE_DATA_EVENTID: 6216 ath10k_wmi_event_wlan_profile_data(ar, skb); 6217 break; 6218 case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID: 6219 ath10k_wmi_event_rtt_measurement_report(ar, skb); 6220 break; 6221 case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID: 6222 ath10k_wmi_event_tsf_measurement_report(ar, skb); 6223 break; 6224 case WMI_10_2_RTT_ERROR_REPORT_EVENTID: 6225 ath10k_wmi_event_rtt_error_report(ar, skb); 6226 break; 6227 case WMI_10_2_WOW_WAKEUP_HOST_EVENTID: 6228 ath10k_wmi_event_wow_wakeup_host(ar, skb); 6229 break; 6230 case WMI_10_2_DCS_INTERFERENCE_EVENTID: 6231 ath10k_wmi_event_dcs_interference(ar, skb); 6232 break; 6233 case WMI_10_2_PDEV_TPC_CONFIG_EVENTID: 6234 ath10k_wmi_event_pdev_tpc_config(ar, skb); 6235 break; 6236 case WMI_10_2_INST_RSSI_STATS_EVENTID: 6237 ath10k_wmi_event_inst_rssi_stats(ar, skb); 6238 break; 6239 case WMI_10_2_VDEV_STANDBY_REQ_EVENTID: 6240 ath10k_wmi_event_vdev_standby_req(ar, skb); 6241 ath10k_wmi_queue_set_coverage_class_work(ar); 6242 break; 6243 case WMI_10_2_VDEV_RESUME_REQ_EVENTID: 6244 ath10k_wmi_event_vdev_resume_req(ar, skb); 6245 ath10k_wmi_queue_set_coverage_class_work(ar); 6246 break; 6247 case WMI_10_2_SERVICE_READY_EVENTID: 6248 ath10k_wmi_event_service_ready(ar, skb); 6249 return; 6250 case WMI_10_2_READY_EVENTID: 6251 ath10k_wmi_event_ready(ar, skb); 6252 ath10k_wmi_queue_set_coverage_class_work(ar); 6253 break; 6254 case WMI_10_2_PDEV_TEMPERATURE_EVENTID: 6255 ath10k_wmi_event_temperature(ar, skb); 6256 break; 6257 case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID: 6258 ath10k_wmi_event_pdev_bss_chan_info(ar, skb); 6259 break; 6260 case WMI_10_2_RTT_KEEPALIVE_EVENTID: 6261 case WMI_10_2_GPIO_INPUT_EVENTID: 6262 case WMI_10_2_PEER_RATECODE_LIST_EVENTID: 6263 case WMI_10_2_GENERIC_BUFFER_EVENTID: 6264 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID: 6265 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID: 6266 case WMI_10_2_WDS_PEER_EVENTID: 6267 ath10k_dbg(ar, ATH10K_DBG_WMI, 6268 "received event id %d not implemented\n", id); 6269 break; 6270 case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID: 6271 ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb); 6272 break; 6273 default: 6274 ath10k_warn(ar, "Unknown eventid: %d\n", id); 6275 break; 6276 } 6277 6278 out: 6279 dev_kfree_skb(skb); 6280 } 6281 6282 static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) 6283 { 6284 struct wmi_cmd_hdr *cmd_hdr; 6285 enum wmi_10_4_event_id id; 6286 bool consumed; 6287 6288 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 6289 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 6290 6291 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) 6292 goto out; 6293 6294 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); 6295 6296 consumed = ath10k_tm_event_wmi(ar, id, skb); 6297 6298 /* Ready event must be handled normally also in UTF mode so that we 6299 * know the UTF firmware has booted, others we are just bypass WMI 6300 * events to testmode. 6301 */ 6302 if (consumed && id != WMI_10_4_READY_EVENTID) { 6303 ath10k_dbg(ar, ATH10K_DBG_WMI, 6304 "wmi testmode consumed 0x%x\n", id); 6305 goto out; 6306 } 6307 6308 switch (id) { 6309 case WMI_10_4_MGMT_RX_EVENTID: 6310 ath10k_wmi_event_mgmt_rx(ar, skb); 6311 /* mgmt_rx() owns the skb now! */ 6312 return; 6313 case WMI_10_4_ECHO_EVENTID: 6314 ath10k_wmi_event_echo(ar, skb); 6315 break; 6316 case WMI_10_4_DEBUG_MESG_EVENTID: 6317 ath10k_wmi_event_debug_mesg(ar, skb); 6318 ath10k_wmi_queue_set_coverage_class_work(ar); 6319 break; 6320 case WMI_10_4_SERVICE_READY_EVENTID: 6321 ath10k_wmi_event_service_ready(ar, skb); 6322 return; 6323 case WMI_10_4_SCAN_EVENTID: 6324 ath10k_wmi_event_scan(ar, skb); 6325 ath10k_wmi_queue_set_coverage_class_work(ar); 6326 break; 6327 case WMI_10_4_CHAN_INFO_EVENTID: 6328 ath10k_wmi_event_chan_info(ar, skb); 6329 break; 6330 case WMI_10_4_PHYERR_EVENTID: 6331 ath10k_wmi_event_phyerr(ar, skb); 6332 break; 6333 case WMI_10_4_READY_EVENTID: 6334 ath10k_wmi_event_ready(ar, skb); 6335 ath10k_wmi_queue_set_coverage_class_work(ar); 6336 break; 6337 case WMI_10_4_PEER_STA_KICKOUT_EVENTID: 6338 ath10k_wmi_event_peer_sta_kickout(ar, skb); 6339 break; 6340 case WMI_10_4_ROAM_EVENTID: 6341 ath10k_wmi_event_roam(ar, skb); 6342 ath10k_wmi_queue_set_coverage_class_work(ar); 6343 break; 6344 case WMI_10_4_HOST_SWBA_EVENTID: 6345 ath10k_wmi_event_host_swba(ar, skb); 6346 break; 6347 case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID: 6348 ath10k_wmi_event_tbttoffset_update(ar, skb); 6349 break; 6350 case WMI_10_4_DEBUG_PRINT_EVENTID: 6351 ath10k_wmi_event_debug_print(ar, skb); 6352 ath10k_wmi_queue_set_coverage_class_work(ar); 6353 break; 6354 case WMI_10_4_VDEV_START_RESP_EVENTID: 6355 ath10k_wmi_event_vdev_start_resp(ar, skb); 6356 ath10k_wmi_queue_set_coverage_class_work(ar); 6357 break; 6358 case WMI_10_4_VDEV_STOPPED_EVENTID: 6359 ath10k_wmi_event_vdev_stopped(ar, skb); 6360 ath10k_wmi_queue_set_coverage_class_work(ar); 6361 break; 6362 case WMI_10_4_WOW_WAKEUP_HOST_EVENTID: 6363 case WMI_10_4_PEER_RATECODE_LIST_EVENTID: 6364 case WMI_10_4_WDS_PEER_EVENTID: 6365 case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID: 6366 ath10k_dbg(ar, ATH10K_DBG_WMI, 6367 "received event id %d not implemented\n", id); 6368 break; 6369 case WMI_10_4_UPDATE_STATS_EVENTID: 6370 ath10k_wmi_event_update_stats(ar, skb); 6371 break; 6372 case WMI_10_4_PDEV_TEMPERATURE_EVENTID: 6373 ath10k_wmi_event_temperature(ar, skb); 6374 break; 6375 case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID: 6376 ath10k_wmi_event_pdev_bss_chan_info(ar, skb); 6377 break; 6378 case WMI_10_4_PDEV_TPC_CONFIG_EVENTID: 6379 ath10k_wmi_event_pdev_tpc_config(ar, skb); 6380 break; 6381 case WMI_10_4_TDLS_PEER_EVENTID: 6382 ath10k_wmi_handle_tdls_peer_event(ar, skb); 6383 break; 6384 case WMI_10_4_PDEV_TPC_TABLE_EVENTID: 6385 ath10k_wmi_event_tpc_final_table(ar, skb); 6386 break; 6387 case WMI_10_4_DFS_STATUS_CHECK_EVENTID: 6388 ath10k_wmi_event_dfs_status_check(ar, skb); 6389 break; 6390 case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID: 6391 ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb); 6392 break; 6393 default: 6394 ath10k_warn(ar, "Unknown eventid: %d\n", id); 6395 break; 6396 } 6397 6398 out: 6399 dev_kfree_skb(skb); 6400 } 6401 6402 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) 6403 { 6404 int ret; 6405 6406 ret = ath10k_wmi_rx(ar, skb); 6407 if (ret) 6408 ath10k_warn(ar, "failed to process wmi rx: %d\n", ret); 6409 } 6410 6411 int ath10k_wmi_connect(struct ath10k *ar) 6412 { 6413 int status; 6414 struct ath10k_htc_svc_conn_req conn_req; 6415 struct ath10k_htc_svc_conn_resp conn_resp; 6416 6417 memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); 6418 6419 memset(&conn_req, 0, sizeof(conn_req)); 6420 memset(&conn_resp, 0, sizeof(conn_resp)); 6421 6422 /* these fields are the same for all service endpoints */ 6423 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; 6424 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; 6425 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits; 6426 6427 /* connect to control service */ 6428 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; 6429 6430 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); 6431 if (status) { 6432 ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n", 6433 status); 6434 return status; 6435 } 6436 6437 ar->wmi.eid = conn_resp.eid; 6438 return 0; 6439 } 6440 6441 static struct sk_buff * 6442 ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar, 6443 const u8 macaddr[ETH_ALEN]) 6444 { 6445 struct wmi_pdev_set_base_macaddr_cmd *cmd; 6446 struct sk_buff *skb; 6447 6448 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 6449 if (!skb) 6450 return ERR_PTR(-ENOMEM); 6451 6452 cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data; 6453 ether_addr_copy(cmd->mac_addr.addr, macaddr); 6454 6455 ath10k_dbg(ar, ATH10K_DBG_WMI, 6456 "wmi pdev basemac %pM\n", macaddr); 6457 return skb; 6458 } 6459 6460 static struct sk_buff * 6461 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 6462 u16 ctl2g, u16 ctl5g, 6463 enum wmi_dfs_region dfs_reg) 6464 { 6465 struct wmi_pdev_set_regdomain_cmd *cmd; 6466 struct sk_buff *skb; 6467 6468 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 6469 if (!skb) 6470 return ERR_PTR(-ENOMEM); 6471 6472 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 6473 cmd->reg_domain = __cpu_to_le32(rd); 6474 cmd->reg_domain_2G = __cpu_to_le32(rd2g); 6475 cmd->reg_domain_5G = __cpu_to_le32(rd5g); 6476 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 6477 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 6478 6479 ath10k_dbg(ar, ATH10K_DBG_WMI, 6480 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 6481 rd, rd2g, rd5g, ctl2g, ctl5g); 6482 return skb; 6483 } 6484 6485 static struct sk_buff * 6486 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 6487 rd5g, u16 ctl2g, u16 ctl5g, 6488 enum wmi_dfs_region dfs_reg) 6489 { 6490 struct wmi_pdev_set_regdomain_cmd_10x *cmd; 6491 struct sk_buff *skb; 6492 6493 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 6494 if (!skb) 6495 return ERR_PTR(-ENOMEM); 6496 6497 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data; 6498 cmd->reg_domain = __cpu_to_le32(rd); 6499 cmd->reg_domain_2G = __cpu_to_le32(rd2g); 6500 cmd->reg_domain_5G = __cpu_to_le32(rd5g); 6501 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 6502 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 6503 cmd->dfs_domain = __cpu_to_le32(dfs_reg); 6504 6505 ath10k_dbg(ar, ATH10K_DBG_WMI, 6506 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", 6507 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); 6508 return skb; 6509 } 6510 6511 static struct sk_buff * 6512 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt) 6513 { 6514 struct wmi_pdev_suspend_cmd *cmd; 6515 struct sk_buff *skb; 6516 6517 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 6518 if (!skb) 6519 return ERR_PTR(-ENOMEM); 6520 6521 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 6522 cmd->suspend_opt = __cpu_to_le32(suspend_opt); 6523 6524 return skb; 6525 } 6526 6527 static struct sk_buff * 6528 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar) 6529 { 6530 struct sk_buff *skb; 6531 6532 skb = ath10k_wmi_alloc_skb(ar, 0); 6533 if (!skb) 6534 return ERR_PTR(-ENOMEM); 6535 6536 return skb; 6537 } 6538 6539 static struct sk_buff * 6540 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 6541 { 6542 struct wmi_pdev_set_param_cmd *cmd; 6543 struct sk_buff *skb; 6544 6545 if (id == WMI_PDEV_PARAM_UNSUPPORTED) { 6546 ath10k_warn(ar, "pdev param %d not supported by firmware\n", 6547 id); 6548 return ERR_PTR(-EOPNOTSUPP); 6549 } 6550 6551 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 6552 if (!skb) 6553 return ERR_PTR(-ENOMEM); 6554 6555 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 6556 cmd->param_id = __cpu_to_le32(id); 6557 cmd->param_value = __cpu_to_le32(value); 6558 6559 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 6560 id, value); 6561 return skb; 6562 } 6563 6564 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, 6565 struct wmi_host_mem_chunks *chunks) 6566 { 6567 struct host_memory_chunk *chunk; 6568 int i; 6569 6570 chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); 6571 6572 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 6573 chunk = &chunks->items[i]; 6574 chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 6575 chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); 6576 chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 6577 6578 ath10k_dbg(ar, ATH10K_DBG_WMI, 6579 "wmi chunk %d len %d requested, addr 0x%llx\n", 6580 i, 6581 ar->wmi.mem_chunks[i].len, 6582 (unsigned long long)ar->wmi.mem_chunks[i].paddr); 6583 } 6584 } 6585 6586 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar) 6587 { 6588 struct wmi_init_cmd *cmd; 6589 struct sk_buff *buf; 6590 struct wmi_resource_config config = {}; 6591 u32 val; 6592 6593 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); 6594 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS); 6595 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); 6596 6597 config.num_offload_reorder_bufs = 6598 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS); 6599 6600 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS); 6601 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS); 6602 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT); 6603 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK); 6604 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK); 6605 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 6606 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 6607 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 6608 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); 6609 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); 6610 config.scan_max_pending_reqs = 6611 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); 6612 6613 config.bmiss_offload_max_vdev = 6614 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV); 6615 6616 config.roam_offload_max_vdev = 6617 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV); 6618 6619 config.roam_offload_max_ap_profiles = 6620 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES); 6621 6622 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS); 6623 config.num_mcast_table_elems = 6624 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS); 6625 6626 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE); 6627 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE); 6628 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES); 6629 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE); 6630 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM); 6631 6632 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 6633 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 6634 6635 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG); 6636 6637 config.gtk_offload_max_vdev = 6638 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV); 6639 6640 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); 6641 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); 6642 6643 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, 6644 ar->wmi.num_mem_chunks)); 6645 if (!buf) 6646 return ERR_PTR(-ENOMEM); 6647 6648 cmd = (struct wmi_init_cmd *)buf->data; 6649 6650 memcpy(&cmd->resource_config, &config, sizeof(config)); 6651 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); 6652 6653 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); 6654 return buf; 6655 } 6656 6657 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar) 6658 { 6659 struct wmi_init_cmd_10x *cmd; 6660 struct sk_buff *buf; 6661 struct wmi_resource_config_10x config = {}; 6662 u32 val; 6663 6664 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 6665 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 6666 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); 6667 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); 6668 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); 6669 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); 6670 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); 6671 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 6672 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 6673 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 6674 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); 6675 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); 6676 config.scan_max_pending_reqs = 6677 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); 6678 6679 config.bmiss_offload_max_vdev = 6680 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); 6681 6682 config.roam_offload_max_vdev = 6683 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); 6684 6685 config.roam_offload_max_ap_profiles = 6686 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); 6687 6688 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); 6689 config.num_mcast_table_elems = 6690 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); 6691 6692 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); 6693 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); 6694 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); 6695 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); 6696 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); 6697 6698 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 6699 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 6700 6701 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); 6702 6703 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); 6704 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); 6705 6706 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, 6707 ar->wmi.num_mem_chunks)); 6708 if (!buf) 6709 return ERR_PTR(-ENOMEM); 6710 6711 cmd = (struct wmi_init_cmd_10x *)buf->data; 6712 6713 memcpy(&cmd->resource_config, &config, sizeof(config)); 6714 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); 6715 6716 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); 6717 return buf; 6718 } 6719 6720 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) 6721 { 6722 struct wmi_init_cmd_10_2 *cmd; 6723 struct sk_buff *buf; 6724 struct wmi_resource_config_10x config = {}; 6725 u32 val, features; 6726 6727 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 6728 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); 6729 6730 if (ath10k_peer_stats_enabled(ar)) { 6731 config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS); 6732 config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS); 6733 } else { 6734 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 6735 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); 6736 } 6737 6738 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); 6739 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); 6740 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); 6741 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 6742 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 6743 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 6744 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); 6745 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); 6746 6747 config.scan_max_pending_reqs = 6748 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); 6749 6750 config.bmiss_offload_max_vdev = 6751 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); 6752 6753 config.roam_offload_max_vdev = 6754 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); 6755 6756 config.roam_offload_max_ap_profiles = 6757 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); 6758 6759 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); 6760 config.num_mcast_table_elems = 6761 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); 6762 6763 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); 6764 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); 6765 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); 6766 config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE); 6767 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); 6768 6769 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 6770 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 6771 6772 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); 6773 6774 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); 6775 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); 6776 6777 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, 6778 ar->wmi.num_mem_chunks)); 6779 if (!buf) 6780 return ERR_PTR(-ENOMEM); 6781 6782 cmd = (struct wmi_init_cmd_10_2 *)buf->data; 6783 6784 features = WMI_10_2_RX_BATCH_MODE; 6785 6786 if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) && 6787 test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) 6788 features |= WMI_10_2_COEX_GPIO; 6789 6790 if (ath10k_peer_stats_enabled(ar)) 6791 features |= WMI_10_2_PEER_STATS; 6792 6793 if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) 6794 features |= WMI_10_2_BSS_CHAN_INFO; 6795 6796 cmd->resource_config.feature_mask = __cpu_to_le32(features); 6797 6798 memcpy(&cmd->resource_config.common, &config, sizeof(config)); 6799 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); 6800 6801 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); 6802 return buf; 6803 } 6804 6805 static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) 6806 { 6807 struct wmi_init_cmd_10_4 *cmd; 6808 struct sk_buff *buf; 6809 struct wmi_resource_config_10_4 config = {}; 6810 6811 config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs); 6812 config.num_peers = __cpu_to_le32(ar->max_num_peers); 6813 config.num_active_peers = __cpu_to_le32(ar->num_active_peers); 6814 config.num_tids = __cpu_to_le32(ar->num_tids); 6815 6816 config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS); 6817 config.num_offload_reorder_buffs = 6818 __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS); 6819 config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS); 6820 config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT); 6821 config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask); 6822 config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask); 6823 6824 config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); 6825 config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); 6826 config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); 6827 config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI); 6828 6829 config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); 6830 config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS); 6831 config.bmiss_offload_max_vdev = 6832 __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV); 6833 config.roam_offload_max_vdev = 6834 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV); 6835 config.roam_offload_max_ap_profiles = 6836 __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES); 6837 config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS); 6838 config.num_mcast_table_elems = 6839 __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS); 6840 6841 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE); 6842 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE); 6843 config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES); 6844 config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE); 6845 config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM); 6846 6847 config.rx_skip_defrag_timeout_dup_detection_check = 6848 __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK); 6849 6850 config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG); 6851 config.gtk_offload_max_vdev = 6852 __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV); 6853 config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); 6854 config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS); 6855 config.max_peer_ext_stats = 6856 __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS); 6857 config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP); 6858 6859 config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE); 6860 config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE); 6861 config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE); 6862 config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE); 6863 6864 config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE); 6865 config.tt_support = 6866 __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG); 6867 config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG); 6868 config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG); 6869 config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG); 6870 6871 buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, 6872 ar->wmi.num_mem_chunks)); 6873 if (!buf) 6874 return ERR_PTR(-ENOMEM); 6875 6876 cmd = (struct wmi_init_cmd_10_4 *)buf->data; 6877 memcpy(&cmd->resource_config, &config, sizeof(config)); 6878 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); 6879 6880 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n"); 6881 return buf; 6882 } 6883 6884 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg) 6885 { 6886 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) 6887 return -EINVAL; 6888 if (arg->n_channels > ARRAY_SIZE(arg->channels)) 6889 return -EINVAL; 6890 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) 6891 return -EINVAL; 6892 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) 6893 return -EINVAL; 6894 6895 return 0; 6896 } 6897 6898 static size_t 6899 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg) 6900 { 6901 int len = 0; 6902 6903 if (arg->ie_len) { 6904 len += sizeof(struct wmi_ie_data); 6905 len += roundup(arg->ie_len, 4); 6906 } 6907 6908 if (arg->n_channels) { 6909 len += sizeof(struct wmi_chan_list); 6910 len += sizeof(__le32) * arg->n_channels; 6911 } 6912 6913 if (arg->n_ssids) { 6914 len += sizeof(struct wmi_ssid_list); 6915 len += sizeof(struct wmi_ssid) * arg->n_ssids; 6916 } 6917 6918 if (arg->n_bssids) { 6919 len += sizeof(struct wmi_bssid_list); 6920 len += sizeof(struct wmi_mac_addr) * arg->n_bssids; 6921 } 6922 6923 return len; 6924 } 6925 6926 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, 6927 const struct wmi_start_scan_arg *arg) 6928 { 6929 u32 scan_id; 6930 u32 scan_req_id; 6931 6932 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; 6933 scan_id |= arg->scan_id; 6934 6935 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 6936 scan_req_id |= arg->scan_req_id; 6937 6938 cmn->scan_id = __cpu_to_le32(scan_id); 6939 cmn->scan_req_id = __cpu_to_le32(scan_req_id); 6940 cmn->vdev_id = __cpu_to_le32(arg->vdev_id); 6941 cmn->scan_priority = __cpu_to_le32(arg->scan_priority); 6942 cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); 6943 cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); 6944 cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); 6945 cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time); 6946 cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time); 6947 cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); 6948 cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); 6949 cmn->idle_time = __cpu_to_le32(arg->idle_time); 6950 cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time); 6951 cmn->probe_delay = __cpu_to_le32(arg->probe_delay); 6952 cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); 6953 } 6954 6955 static void 6956 ath10k_wmi_put_start_scan_tlvs(u8 *tlvs, 6957 const struct wmi_start_scan_arg *arg) 6958 { 6959 struct wmi_ie_data *ie; 6960 struct wmi_chan_list *channels; 6961 struct wmi_ssid_list *ssids; 6962 struct wmi_bssid_list *bssids; 6963 void *ptr = tlvs; 6964 int i; 6965 6966 if (arg->n_channels) { 6967 channels = ptr; 6968 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); 6969 channels->num_chan = __cpu_to_le32(arg->n_channels); 6970 6971 for (i = 0; i < arg->n_channels; i++) 6972 channels->channel_list[i].freq = 6973 __cpu_to_le16(arg->channels[i]); 6974 6975 ptr += sizeof(*channels); 6976 ptr += sizeof(__le32) * arg->n_channels; 6977 } 6978 6979 if (arg->n_ssids) { 6980 ssids = ptr; 6981 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); 6982 ssids->num_ssids = __cpu_to_le32(arg->n_ssids); 6983 6984 for (i = 0; i < arg->n_ssids; i++) { 6985 ssids->ssids[i].ssid_len = 6986 __cpu_to_le32(arg->ssids[i].len); 6987 memcpy(&ssids->ssids[i].ssid, 6988 arg->ssids[i].ssid, 6989 arg->ssids[i].len); 6990 } 6991 6992 ptr += sizeof(*ssids); 6993 ptr += sizeof(struct wmi_ssid) * arg->n_ssids; 6994 } 6995 6996 if (arg->n_bssids) { 6997 bssids = ptr; 6998 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); 6999 bssids->num_bssid = __cpu_to_le32(arg->n_bssids); 7000 7001 for (i = 0; i < arg->n_bssids; i++) 7002 ether_addr_copy(bssids->bssid_list[i].addr, 7003 arg->bssids[i].bssid); 7004 7005 ptr += sizeof(*bssids); 7006 ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids; 7007 } 7008 7009 if (arg->ie_len) { 7010 ie = ptr; 7011 ie->tag = __cpu_to_le32(WMI_IE_TAG); 7012 ie->ie_len = __cpu_to_le32(arg->ie_len); 7013 memcpy(ie->ie_data, arg->ie, arg->ie_len); 7014 7015 ptr += sizeof(*ie); 7016 ptr += roundup(arg->ie_len, 4); 7017 } 7018 } 7019 7020 static struct sk_buff * 7021 ath10k_wmi_op_gen_start_scan(struct ath10k *ar, 7022 const struct wmi_start_scan_arg *arg) 7023 { 7024 struct wmi_start_scan_cmd *cmd; 7025 struct sk_buff *skb; 7026 size_t len; 7027 int ret; 7028 7029 ret = ath10k_wmi_start_scan_verify(arg); 7030 if (ret) 7031 return ERR_PTR(ret); 7032 7033 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg); 7034 skb = ath10k_wmi_alloc_skb(ar, len); 7035 if (!skb) 7036 return ERR_PTR(-ENOMEM); 7037 7038 cmd = (struct wmi_start_scan_cmd *)skb->data; 7039 7040 ath10k_wmi_put_start_scan_common(&cmd->common, arg); 7041 ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); 7042 7043 cmd->burst_duration_ms = __cpu_to_le32(0); 7044 7045 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); 7046 return skb; 7047 } 7048 7049 static struct sk_buff * 7050 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar, 7051 const struct wmi_start_scan_arg *arg) 7052 { 7053 struct wmi_10x_start_scan_cmd *cmd; 7054 struct sk_buff *skb; 7055 size_t len; 7056 int ret; 7057 7058 ret = ath10k_wmi_start_scan_verify(arg); 7059 if (ret) 7060 return ERR_PTR(ret); 7061 7062 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg); 7063 skb = ath10k_wmi_alloc_skb(ar, len); 7064 if (!skb) 7065 return ERR_PTR(-ENOMEM); 7066 7067 cmd = (struct wmi_10x_start_scan_cmd *)skb->data; 7068 7069 ath10k_wmi_put_start_scan_common(&cmd->common, arg); 7070 ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); 7071 7072 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n"); 7073 return skb; 7074 } 7075 7076 void ath10k_wmi_start_scan_init(struct ath10k *ar, 7077 struct wmi_start_scan_arg *arg) 7078 { 7079 /* setup commonly used values */ 7080 arg->scan_req_id = 1; 7081 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 7082 arg->dwell_time_active = 50; 7083 arg->dwell_time_passive = 150; 7084 arg->min_rest_time = 50; 7085 arg->max_rest_time = 500; 7086 arg->repeat_probe_time = 0; 7087 arg->probe_spacing_time = 0; 7088 arg->idle_time = 0; 7089 arg->max_scan_time = 20000; 7090 arg->probe_delay = 5; 7091 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED 7092 | WMI_SCAN_EVENT_COMPLETED 7093 | WMI_SCAN_EVENT_BSS_CHANNEL 7094 | WMI_SCAN_EVENT_FOREIGN_CHANNEL 7095 | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT 7096 | WMI_SCAN_EVENT_DEQUEUED; 7097 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; 7098 arg->n_bssids = 1; 7099 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; 7100 } 7101 7102 static struct sk_buff * 7103 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar, 7104 const struct wmi_stop_scan_arg *arg) 7105 { 7106 struct wmi_stop_scan_cmd *cmd; 7107 struct sk_buff *skb; 7108 u32 scan_id; 7109 u32 req_id; 7110 7111 if (arg->req_id > 0xFFF) 7112 return ERR_PTR(-EINVAL); 7113 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 7114 return ERR_PTR(-EINVAL); 7115 7116 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7117 if (!skb) 7118 return ERR_PTR(-ENOMEM); 7119 7120 scan_id = arg->u.scan_id; 7121 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; 7122 7123 req_id = arg->req_id; 7124 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 7125 7126 cmd = (struct wmi_stop_scan_cmd *)skb->data; 7127 cmd->req_type = __cpu_to_le32(arg->req_type); 7128 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); 7129 cmd->scan_id = __cpu_to_le32(scan_id); 7130 cmd->scan_req_id = __cpu_to_le32(req_id); 7131 7132 ath10k_dbg(ar, ATH10K_DBG_WMI, 7133 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 7134 arg->req_id, arg->req_type, arg->u.scan_id); 7135 return skb; 7136 } 7137 7138 static struct sk_buff * 7139 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id, 7140 enum wmi_vdev_type type, 7141 enum wmi_vdev_subtype subtype, 7142 const u8 macaddr[ETH_ALEN]) 7143 { 7144 struct wmi_vdev_create_cmd *cmd; 7145 struct sk_buff *skb; 7146 7147 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7148 if (!skb) 7149 return ERR_PTR(-ENOMEM); 7150 7151 cmd = (struct wmi_vdev_create_cmd *)skb->data; 7152 cmd->vdev_id = __cpu_to_le32(vdev_id); 7153 cmd->vdev_type = __cpu_to_le32(type); 7154 cmd->vdev_subtype = __cpu_to_le32(subtype); 7155 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); 7156 7157 ath10k_dbg(ar, ATH10K_DBG_WMI, 7158 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 7159 vdev_id, type, subtype, macaddr); 7160 return skb; 7161 } 7162 7163 static struct sk_buff * 7164 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) 7165 { 7166 struct wmi_vdev_delete_cmd *cmd; 7167 struct sk_buff *skb; 7168 7169 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7170 if (!skb) 7171 return ERR_PTR(-ENOMEM); 7172 7173 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 7174 cmd->vdev_id = __cpu_to_le32(vdev_id); 7175 7176 ath10k_dbg(ar, ATH10K_DBG_WMI, 7177 "WMI vdev delete id %d\n", vdev_id); 7178 return skb; 7179 } 7180 7181 static struct sk_buff * 7182 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar, 7183 const struct wmi_vdev_start_request_arg *arg, 7184 bool restart) 7185 { 7186 struct wmi_vdev_start_request_cmd *cmd; 7187 struct sk_buff *skb; 7188 const char *cmdname; 7189 u32 flags = 0; 7190 7191 if (WARN_ON(arg->hidden_ssid && !arg->ssid)) 7192 return ERR_PTR(-EINVAL); 7193 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 7194 return ERR_PTR(-EINVAL); 7195 7196 if (restart) 7197 cmdname = "restart"; 7198 else 7199 cmdname = "start"; 7200 7201 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7202 if (!skb) 7203 return ERR_PTR(-ENOMEM); 7204 7205 if (arg->hidden_ssid) 7206 flags |= WMI_VDEV_START_HIDDEN_SSID; 7207 if (arg->pmf_enabled) 7208 flags |= WMI_VDEV_START_PMF_ENABLED; 7209 7210 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 7211 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 7212 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); 7213 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval); 7214 cmd->dtim_period = __cpu_to_le32(arg->dtim_period); 7215 cmd->flags = __cpu_to_le32(flags); 7216 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); 7217 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); 7218 7219 if (arg->ssid) { 7220 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); 7221 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 7222 } 7223 7224 ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel); 7225 7226 ath10k_dbg(ar, ATH10K_DBG_WMI, 7227 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n", 7228 cmdname, arg->vdev_id, 7229 flags, arg->channel.freq, arg->channel.mode, 7230 cmd->chan.flags, arg->channel.max_power); 7231 7232 return skb; 7233 } 7234 7235 static struct sk_buff * 7236 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) 7237 { 7238 struct wmi_vdev_stop_cmd *cmd; 7239 struct sk_buff *skb; 7240 7241 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7242 if (!skb) 7243 return ERR_PTR(-ENOMEM); 7244 7245 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 7246 cmd->vdev_id = __cpu_to_le32(vdev_id); 7247 7248 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 7249 return skb; 7250 } 7251 7252 static struct sk_buff * 7253 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, 7254 const u8 *bssid) 7255 { 7256 struct wmi_vdev_up_cmd *cmd; 7257 struct sk_buff *skb; 7258 7259 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7260 if (!skb) 7261 return ERR_PTR(-ENOMEM); 7262 7263 cmd = (struct wmi_vdev_up_cmd *)skb->data; 7264 cmd->vdev_id = __cpu_to_le32(vdev_id); 7265 cmd->vdev_assoc_id = __cpu_to_le32(aid); 7266 ether_addr_copy(cmd->vdev_bssid.addr, bssid); 7267 7268 ath10k_dbg(ar, ATH10K_DBG_WMI, 7269 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 7270 vdev_id, aid, bssid); 7271 return skb; 7272 } 7273 7274 static struct sk_buff * 7275 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) 7276 { 7277 struct wmi_vdev_down_cmd *cmd; 7278 struct sk_buff *skb; 7279 7280 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7281 if (!skb) 7282 return ERR_PTR(-ENOMEM); 7283 7284 cmd = (struct wmi_vdev_down_cmd *)skb->data; 7285 cmd->vdev_id = __cpu_to_le32(vdev_id); 7286 7287 ath10k_dbg(ar, ATH10K_DBG_WMI, 7288 "wmi mgmt vdev down id 0x%x\n", vdev_id); 7289 return skb; 7290 } 7291 7292 static struct sk_buff * 7293 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, 7294 u32 param_id, u32 param_value) 7295 { 7296 struct wmi_vdev_set_param_cmd *cmd; 7297 struct sk_buff *skb; 7298 7299 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { 7300 ath10k_dbg(ar, ATH10K_DBG_WMI, 7301 "vdev param %d not supported by firmware\n", 7302 param_id); 7303 return ERR_PTR(-EOPNOTSUPP); 7304 } 7305 7306 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7307 if (!skb) 7308 return ERR_PTR(-ENOMEM); 7309 7310 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 7311 cmd->vdev_id = __cpu_to_le32(vdev_id); 7312 cmd->param_id = __cpu_to_le32(param_id); 7313 cmd->param_value = __cpu_to_le32(param_value); 7314 7315 ath10k_dbg(ar, ATH10K_DBG_WMI, 7316 "wmi vdev id 0x%x set param %d value %d\n", 7317 vdev_id, param_id, param_value); 7318 return skb; 7319 } 7320 7321 static struct sk_buff * 7322 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar, 7323 const struct wmi_vdev_install_key_arg *arg) 7324 { 7325 struct wmi_vdev_install_key_cmd *cmd; 7326 struct sk_buff *skb; 7327 7328 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) 7329 return ERR_PTR(-EINVAL); 7330 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 7331 return ERR_PTR(-EINVAL); 7332 7333 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len); 7334 if (!skb) 7335 return ERR_PTR(-ENOMEM); 7336 7337 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 7338 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 7339 cmd->key_idx = __cpu_to_le32(arg->key_idx); 7340 cmd->key_flags = __cpu_to_le32(arg->key_flags); 7341 cmd->key_cipher = __cpu_to_le32(arg->key_cipher); 7342 cmd->key_len = __cpu_to_le32(arg->key_len); 7343 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); 7344 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); 7345 7346 if (arg->macaddr) 7347 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 7348 if (arg->key_data) 7349 memcpy(cmd->key_data, arg->key_data, arg->key_len); 7350 7351 ath10k_dbg(ar, ATH10K_DBG_WMI, 7352 "wmi vdev install key idx %d cipher %d len %d\n", 7353 arg->key_idx, arg->key_cipher, arg->key_len); 7354 return skb; 7355 } 7356 7357 static struct sk_buff * 7358 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar, 7359 const struct wmi_vdev_spectral_conf_arg *arg) 7360 { 7361 struct wmi_vdev_spectral_conf_cmd *cmd; 7362 struct sk_buff *skb; 7363 7364 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7365 if (!skb) 7366 return ERR_PTR(-ENOMEM); 7367 7368 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data; 7369 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 7370 cmd->scan_count = __cpu_to_le32(arg->scan_count); 7371 cmd->scan_period = __cpu_to_le32(arg->scan_period); 7372 cmd->scan_priority = __cpu_to_le32(arg->scan_priority); 7373 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); 7374 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); 7375 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); 7376 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); 7377 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); 7378 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); 7379 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); 7380 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); 7381 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); 7382 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); 7383 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); 7384 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); 7385 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); 7386 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); 7387 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); 7388 7389 return skb; 7390 } 7391 7392 static struct sk_buff * 7393 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, 7394 u32 trigger, u32 enable) 7395 { 7396 struct wmi_vdev_spectral_enable_cmd *cmd; 7397 struct sk_buff *skb; 7398 7399 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7400 if (!skb) 7401 return ERR_PTR(-ENOMEM); 7402 7403 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data; 7404 cmd->vdev_id = __cpu_to_le32(vdev_id); 7405 cmd->trigger_cmd = __cpu_to_le32(trigger); 7406 cmd->enable_cmd = __cpu_to_le32(enable); 7407 7408 return skb; 7409 } 7410 7411 static struct sk_buff * 7412 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, 7413 const u8 peer_addr[ETH_ALEN], 7414 enum wmi_peer_type peer_type) 7415 { 7416 struct wmi_peer_create_cmd *cmd; 7417 struct sk_buff *skb; 7418 7419 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7420 if (!skb) 7421 return ERR_PTR(-ENOMEM); 7422 7423 cmd = (struct wmi_peer_create_cmd *)skb->data; 7424 cmd->vdev_id = __cpu_to_le32(vdev_id); 7425 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 7426 cmd->peer_type = __cpu_to_le32(peer_type); 7427 7428 ath10k_dbg(ar, ATH10K_DBG_WMI, 7429 "wmi peer create vdev_id %d peer_addr %pM\n", 7430 vdev_id, peer_addr); 7431 return skb; 7432 } 7433 7434 static struct sk_buff * 7435 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, 7436 const u8 peer_addr[ETH_ALEN]) 7437 { 7438 struct wmi_peer_delete_cmd *cmd; 7439 struct sk_buff *skb; 7440 7441 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7442 if (!skb) 7443 return ERR_PTR(-ENOMEM); 7444 7445 cmd = (struct wmi_peer_delete_cmd *)skb->data; 7446 cmd->vdev_id = __cpu_to_le32(vdev_id); 7447 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 7448 7449 ath10k_dbg(ar, ATH10K_DBG_WMI, 7450 "wmi peer delete vdev_id %d peer_addr %pM\n", 7451 vdev_id, peer_addr); 7452 return skb; 7453 } 7454 7455 static struct sk_buff * 7456 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, 7457 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 7458 { 7459 struct wmi_peer_flush_tids_cmd *cmd; 7460 struct sk_buff *skb; 7461 7462 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7463 if (!skb) 7464 return ERR_PTR(-ENOMEM); 7465 7466 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 7467 cmd->vdev_id = __cpu_to_le32(vdev_id); 7468 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 7469 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 7470 7471 ath10k_dbg(ar, ATH10K_DBG_WMI, 7472 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 7473 vdev_id, peer_addr, tid_bitmap); 7474 return skb; 7475 } 7476 7477 static struct sk_buff * 7478 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, 7479 const u8 *peer_addr, 7480 enum wmi_peer_param param_id, 7481 u32 param_value) 7482 { 7483 struct wmi_peer_set_param_cmd *cmd; 7484 struct sk_buff *skb; 7485 7486 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7487 if (!skb) 7488 return ERR_PTR(-ENOMEM); 7489 7490 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 7491 cmd->vdev_id = __cpu_to_le32(vdev_id); 7492 cmd->param_id = __cpu_to_le32(param_id); 7493 cmd->param_value = __cpu_to_le32(param_value); 7494 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 7495 7496 ath10k_dbg(ar, ATH10K_DBG_WMI, 7497 "wmi vdev %d peer 0x%pM set param %d value %d\n", 7498 vdev_id, peer_addr, param_id, param_value); 7499 return skb; 7500 } 7501 7502 static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar, 7503 u32 gpio_num, u32 input, 7504 u32 pull_type, u32 intr_mode) 7505 { 7506 struct wmi_gpio_config_cmd *cmd; 7507 struct sk_buff *skb; 7508 7509 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7510 if (!skb) 7511 return ERR_PTR(-ENOMEM); 7512 7513 cmd = (struct wmi_gpio_config_cmd *)skb->data; 7514 cmd->pull_type = __cpu_to_le32(pull_type); 7515 cmd->gpio_num = __cpu_to_le32(gpio_num); 7516 cmd->input = __cpu_to_le32(input); 7517 cmd->intr_mode = __cpu_to_le32(intr_mode); 7518 7519 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n", 7520 gpio_num, input, pull_type, intr_mode); 7521 7522 return skb; 7523 } 7524 7525 static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar, 7526 u32 gpio_num, u32 set) 7527 { 7528 struct wmi_gpio_output_cmd *cmd; 7529 struct sk_buff *skb; 7530 7531 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7532 if (!skb) 7533 return ERR_PTR(-ENOMEM); 7534 7535 cmd = (struct wmi_gpio_output_cmd *)skb->data; 7536 cmd->gpio_num = __cpu_to_le32(gpio_num); 7537 cmd->set = __cpu_to_le32(set); 7538 7539 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n", 7540 gpio_num, set); 7541 7542 return skb; 7543 } 7544 7545 static struct sk_buff * 7546 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, 7547 enum wmi_sta_ps_mode psmode) 7548 { 7549 struct wmi_sta_powersave_mode_cmd *cmd; 7550 struct sk_buff *skb; 7551 7552 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7553 if (!skb) 7554 return ERR_PTR(-ENOMEM); 7555 7556 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; 7557 cmd->vdev_id = __cpu_to_le32(vdev_id); 7558 cmd->sta_ps_mode = __cpu_to_le32(psmode); 7559 7560 ath10k_dbg(ar, ATH10K_DBG_WMI, 7561 "wmi set powersave id 0x%x mode %d\n", 7562 vdev_id, psmode); 7563 return skb; 7564 } 7565 7566 static struct sk_buff * 7567 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, 7568 enum wmi_sta_powersave_param param_id, 7569 u32 value) 7570 { 7571 struct wmi_sta_powersave_param_cmd *cmd; 7572 struct sk_buff *skb; 7573 7574 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7575 if (!skb) 7576 return ERR_PTR(-ENOMEM); 7577 7578 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 7579 cmd->vdev_id = __cpu_to_le32(vdev_id); 7580 cmd->param_id = __cpu_to_le32(param_id); 7581 cmd->param_value = __cpu_to_le32(value); 7582 7583 ath10k_dbg(ar, ATH10K_DBG_STA, 7584 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 7585 vdev_id, param_id, value); 7586 return skb; 7587 } 7588 7589 static struct sk_buff * 7590 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, 7591 enum wmi_ap_ps_peer_param param_id, u32 value) 7592 { 7593 struct wmi_ap_ps_peer_cmd *cmd; 7594 struct sk_buff *skb; 7595 7596 if (!mac) 7597 return ERR_PTR(-EINVAL); 7598 7599 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7600 if (!skb) 7601 return ERR_PTR(-ENOMEM); 7602 7603 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 7604 cmd->vdev_id = __cpu_to_le32(vdev_id); 7605 cmd->param_id = __cpu_to_le32(param_id); 7606 cmd->param_value = __cpu_to_le32(value); 7607 ether_addr_copy(cmd->peer_macaddr.addr, mac); 7608 7609 ath10k_dbg(ar, ATH10K_DBG_WMI, 7610 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 7611 vdev_id, param_id, value, mac); 7612 return skb; 7613 } 7614 7615 static struct sk_buff * 7616 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar, 7617 const struct wmi_scan_chan_list_arg *arg) 7618 { 7619 struct wmi_scan_chan_list_cmd *cmd; 7620 struct sk_buff *skb; 7621 struct wmi_channel_arg *ch; 7622 struct wmi_channel *ci; 7623 int i; 7624 7625 skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels)); 7626 if (!skb) 7627 return ERR_PTR(-EINVAL); 7628 7629 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 7630 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); 7631 7632 for (i = 0; i < arg->n_channels; i++) { 7633 ch = &arg->channels[i]; 7634 ci = &cmd->chan_info[i]; 7635 7636 ath10k_wmi_put_wmi_channel(ar, ci, ch); 7637 } 7638 7639 return skb; 7640 } 7641 7642 static void 7643 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf, 7644 const struct wmi_peer_assoc_complete_arg *arg) 7645 { 7646 struct wmi_common_peer_assoc_complete_cmd *cmd = buf; 7647 7648 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 7649 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); 7650 cmd->peer_associd = __cpu_to_le32(arg->peer_aid); 7651 cmd->peer_flags = __cpu_to_le32(arg->peer_flags); 7652 cmd->peer_caps = __cpu_to_le32(arg->peer_caps); 7653 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval); 7654 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps); 7655 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); 7656 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); 7657 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps); 7658 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams); 7659 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); 7660 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); 7661 7662 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); 7663 7664 cmd->peer_legacy_rates.num_rates = 7665 __cpu_to_le32(arg->peer_legacy_rates.num_rates); 7666 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates, 7667 arg->peer_legacy_rates.num_rates); 7668 7669 cmd->peer_ht_rates.num_rates = 7670 __cpu_to_le32(arg->peer_ht_rates.num_rates); 7671 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates, 7672 arg->peer_ht_rates.num_rates); 7673 7674 cmd->peer_vht_rates.rx_max_rate = 7675 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); 7676 cmd->peer_vht_rates.rx_mcs_set = 7677 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); 7678 cmd->peer_vht_rates.tx_max_rate = 7679 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); 7680 cmd->peer_vht_rates.tx_mcs_set = 7681 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 7682 } 7683 7684 static void 7685 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf, 7686 const struct wmi_peer_assoc_complete_arg *arg) 7687 { 7688 struct wmi_main_peer_assoc_complete_cmd *cmd = buf; 7689 7690 ath10k_wmi_peer_assoc_fill(ar, buf, arg); 7691 memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info)); 7692 } 7693 7694 static void 7695 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf, 7696 const struct wmi_peer_assoc_complete_arg *arg) 7697 { 7698 ath10k_wmi_peer_assoc_fill(ar, buf, arg); 7699 } 7700 7701 static void 7702 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, 7703 const struct wmi_peer_assoc_complete_arg *arg) 7704 { 7705 struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf; 7706 int max_mcs, max_nss; 7707 u32 info0; 7708 7709 /* TODO: Is using max values okay with firmware? */ 7710 max_mcs = 0xf; 7711 max_nss = 0xf; 7712 7713 info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) | 7714 SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS); 7715 7716 ath10k_wmi_peer_assoc_fill(ar, buf, arg); 7717 cmd->info0 = __cpu_to_le32(info0); 7718 } 7719 7720 static void 7721 ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf, 7722 const struct wmi_peer_assoc_complete_arg *arg) 7723 { 7724 struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf; 7725 7726 ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg); 7727 cmd->peer_bw_rxnss_override = 7728 __cpu_to_le32(arg->peer_bw_rxnss_override); 7729 } 7730 7731 static int 7732 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg) 7733 { 7734 if (arg->peer_mpdu_density > 16) 7735 return -EINVAL; 7736 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) 7737 return -EINVAL; 7738 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) 7739 return -EINVAL; 7740 7741 return 0; 7742 } 7743 7744 static struct sk_buff * 7745 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar, 7746 const struct wmi_peer_assoc_complete_arg *arg) 7747 { 7748 size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd); 7749 struct sk_buff *skb; 7750 int ret; 7751 7752 ret = ath10k_wmi_peer_assoc_check_arg(arg); 7753 if (ret) 7754 return ERR_PTR(ret); 7755 7756 skb = ath10k_wmi_alloc_skb(ar, len); 7757 if (!skb) 7758 return ERR_PTR(-ENOMEM); 7759 7760 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); 7761 7762 ath10k_dbg(ar, ATH10K_DBG_WMI, 7763 "wmi peer assoc vdev %d addr %pM (%s)\n", 7764 arg->vdev_id, arg->addr, 7765 arg->peer_reassoc ? "reassociate" : "new"); 7766 return skb; 7767 } 7768 7769 static struct sk_buff * 7770 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar, 7771 const struct wmi_peer_assoc_complete_arg *arg) 7772 { 7773 size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd); 7774 struct sk_buff *skb; 7775 int ret; 7776 7777 ret = ath10k_wmi_peer_assoc_check_arg(arg); 7778 if (ret) 7779 return ERR_PTR(ret); 7780 7781 skb = ath10k_wmi_alloc_skb(ar, len); 7782 if (!skb) 7783 return ERR_PTR(-ENOMEM); 7784 7785 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); 7786 7787 ath10k_dbg(ar, ATH10K_DBG_WMI, 7788 "wmi peer assoc vdev %d addr %pM (%s)\n", 7789 arg->vdev_id, arg->addr, 7790 arg->peer_reassoc ? "reassociate" : "new"); 7791 return skb; 7792 } 7793 7794 static struct sk_buff * 7795 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar, 7796 const struct wmi_peer_assoc_complete_arg *arg) 7797 { 7798 size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd); 7799 struct sk_buff *skb; 7800 int ret; 7801 7802 ret = ath10k_wmi_peer_assoc_check_arg(arg); 7803 if (ret) 7804 return ERR_PTR(ret); 7805 7806 skb = ath10k_wmi_alloc_skb(ar, len); 7807 if (!skb) 7808 return ERR_PTR(-ENOMEM); 7809 7810 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); 7811 7812 ath10k_dbg(ar, ATH10K_DBG_WMI, 7813 "wmi peer assoc vdev %d addr %pM (%s)\n", 7814 arg->vdev_id, arg->addr, 7815 arg->peer_reassoc ? "reassociate" : "new"); 7816 return skb; 7817 } 7818 7819 static struct sk_buff * 7820 ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar, 7821 const struct wmi_peer_assoc_complete_arg *arg) 7822 { 7823 size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd); 7824 struct sk_buff *skb; 7825 int ret; 7826 7827 ret = ath10k_wmi_peer_assoc_check_arg(arg); 7828 if (ret) 7829 return ERR_PTR(ret); 7830 7831 skb = ath10k_wmi_alloc_skb(ar, len); 7832 if (!skb) 7833 return ERR_PTR(-ENOMEM); 7834 7835 ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg); 7836 7837 ath10k_dbg(ar, ATH10K_DBG_WMI, 7838 "wmi peer assoc vdev %d addr %pM (%s)\n", 7839 arg->vdev_id, arg->addr, 7840 arg->peer_reassoc ? "reassociate" : "new"); 7841 return skb; 7842 } 7843 7844 static struct sk_buff * 7845 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar) 7846 { 7847 struct sk_buff *skb; 7848 7849 skb = ath10k_wmi_alloc_skb(ar, 0); 7850 if (!skb) 7851 return ERR_PTR(-ENOMEM); 7852 7853 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n"); 7854 return skb; 7855 } 7856 7857 static struct sk_buff * 7858 ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar, 7859 enum wmi_bss_survey_req_type type) 7860 { 7861 struct wmi_pdev_chan_info_req_cmd *cmd; 7862 struct sk_buff *skb; 7863 7864 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7865 if (!skb) 7866 return ERR_PTR(-ENOMEM); 7867 7868 cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data; 7869 cmd->type = __cpu_to_le32(type); 7870 7871 ath10k_dbg(ar, ATH10K_DBG_WMI, 7872 "wmi pdev bss info request type %d\n", type); 7873 7874 return skb; 7875 } 7876 7877 /* This function assumes the beacon is already DMA mapped */ 7878 static struct sk_buff * 7879 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, 7880 size_t bcn_len, u32 bcn_paddr, bool dtim_zero, 7881 bool deliver_cab) 7882 { 7883 struct wmi_bcn_tx_ref_cmd *cmd; 7884 struct sk_buff *skb; 7885 struct ieee80211_hdr *hdr; 7886 u16 fc; 7887 7888 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7889 if (!skb) 7890 return ERR_PTR(-ENOMEM); 7891 7892 hdr = (struct ieee80211_hdr *)bcn; 7893 fc = le16_to_cpu(hdr->frame_control); 7894 7895 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; 7896 cmd->vdev_id = __cpu_to_le32(vdev_id); 7897 cmd->data_len = __cpu_to_le32(bcn_len); 7898 cmd->data_ptr = __cpu_to_le32(bcn_paddr); 7899 cmd->msdu_id = 0; 7900 cmd->frame_control = __cpu_to_le32(fc); 7901 cmd->flags = 0; 7902 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); 7903 7904 if (dtim_zero) 7905 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); 7906 7907 if (deliver_cab) 7908 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); 7909 7910 return skb; 7911 } 7912 7913 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params, 7914 const struct wmi_wmm_params_arg *arg) 7915 { 7916 params->cwmin = __cpu_to_le32(arg->cwmin); 7917 params->cwmax = __cpu_to_le32(arg->cwmax); 7918 params->aifs = __cpu_to_le32(arg->aifs); 7919 params->txop = __cpu_to_le32(arg->txop); 7920 params->acm = __cpu_to_le32(arg->acm); 7921 params->no_ack = __cpu_to_le32(arg->no_ack); 7922 } 7923 7924 static struct sk_buff * 7925 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar, 7926 const struct wmi_wmm_params_all_arg *arg) 7927 { 7928 struct wmi_pdev_set_wmm_params *cmd; 7929 struct sk_buff *skb; 7930 7931 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7932 if (!skb) 7933 return ERR_PTR(-ENOMEM); 7934 7935 cmd = (struct wmi_pdev_set_wmm_params *)skb->data; 7936 ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be); 7937 ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); 7938 ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); 7939 ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 7940 7941 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 7942 return skb; 7943 } 7944 7945 static struct sk_buff * 7946 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) 7947 { 7948 struct wmi_request_stats_cmd *cmd; 7949 struct sk_buff *skb; 7950 7951 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7952 if (!skb) 7953 return ERR_PTR(-ENOMEM); 7954 7955 cmd = (struct wmi_request_stats_cmd *)skb->data; 7956 cmd->stats_id = __cpu_to_le32(stats_mask); 7957 7958 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n", 7959 stats_mask); 7960 return skb; 7961 } 7962 7963 static struct sk_buff * 7964 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar, 7965 enum wmi_force_fw_hang_type type, u32 delay_ms) 7966 { 7967 struct wmi_force_fw_hang_cmd *cmd; 7968 struct sk_buff *skb; 7969 7970 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7971 if (!skb) 7972 return ERR_PTR(-ENOMEM); 7973 7974 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 7975 cmd->type = __cpu_to_le32(type); 7976 cmd->delay_ms = __cpu_to_le32(delay_ms); 7977 7978 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 7979 type, delay_ms); 7980 return skb; 7981 } 7982 7983 static struct sk_buff * 7984 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, 7985 u32 log_level) 7986 { 7987 struct wmi_dbglog_cfg_cmd *cmd; 7988 struct sk_buff *skb; 7989 u32 cfg; 7990 7991 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 7992 if (!skb) 7993 return ERR_PTR(-ENOMEM); 7994 7995 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; 7996 7997 if (module_enable) { 7998 cfg = SM(log_level, 7999 ATH10K_DBGLOG_CFG_LOG_LVL); 8000 } else { 8001 /* set back defaults, all modules with WARN level */ 8002 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, 8003 ATH10K_DBGLOG_CFG_LOG_LVL); 8004 module_enable = ~0; 8005 } 8006 8007 cmd->module_enable = __cpu_to_le32(module_enable); 8008 cmd->module_valid = __cpu_to_le32(~0); 8009 cmd->config_enable = __cpu_to_le32(cfg); 8010 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); 8011 8012 ath10k_dbg(ar, ATH10K_DBG_WMI, 8013 "wmi dbglog cfg modules %08x %08x config %08x %08x\n", 8014 __le32_to_cpu(cmd->module_enable), 8015 __le32_to_cpu(cmd->module_valid), 8016 __le32_to_cpu(cmd->config_enable), 8017 __le32_to_cpu(cmd->config_valid)); 8018 return skb; 8019 } 8020 8021 static struct sk_buff * 8022 ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, 8023 u32 log_level) 8024 { 8025 struct wmi_10_4_dbglog_cfg_cmd *cmd; 8026 struct sk_buff *skb; 8027 u32 cfg; 8028 8029 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8030 if (!skb) 8031 return ERR_PTR(-ENOMEM); 8032 8033 cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data; 8034 8035 if (module_enable) { 8036 cfg = SM(log_level, 8037 ATH10K_DBGLOG_CFG_LOG_LVL); 8038 } else { 8039 /* set back defaults, all modules with WARN level */ 8040 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, 8041 ATH10K_DBGLOG_CFG_LOG_LVL); 8042 module_enable = ~0; 8043 } 8044 8045 cmd->module_enable = __cpu_to_le64(module_enable); 8046 cmd->module_valid = __cpu_to_le64(~0); 8047 cmd->config_enable = __cpu_to_le32(cfg); 8048 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); 8049 8050 ath10k_dbg(ar, ATH10K_DBG_WMI, 8051 "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n", 8052 __le64_to_cpu(cmd->module_enable), 8053 __le64_to_cpu(cmd->module_valid), 8054 __le32_to_cpu(cmd->config_enable), 8055 __le32_to_cpu(cmd->config_valid)); 8056 return skb; 8057 } 8058 8059 static struct sk_buff * 8060 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap) 8061 { 8062 struct wmi_pdev_pktlog_enable_cmd *cmd; 8063 struct sk_buff *skb; 8064 8065 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8066 if (!skb) 8067 return ERR_PTR(-ENOMEM); 8068 8069 ev_bitmap &= ATH10K_PKTLOG_ANY; 8070 8071 cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data; 8072 cmd->ev_bitmap = __cpu_to_le32(ev_bitmap); 8073 8074 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n", 8075 ev_bitmap); 8076 return skb; 8077 } 8078 8079 static struct sk_buff * 8080 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar) 8081 { 8082 struct sk_buff *skb; 8083 8084 skb = ath10k_wmi_alloc_skb(ar, 0); 8085 if (!skb) 8086 return ERR_PTR(-ENOMEM); 8087 8088 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n"); 8089 return skb; 8090 } 8091 8092 static struct sk_buff * 8093 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, 8094 u32 duration, u32 next_offset, 8095 u32 enabled) 8096 { 8097 struct wmi_pdev_set_quiet_cmd *cmd; 8098 struct sk_buff *skb; 8099 8100 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8101 if (!skb) 8102 return ERR_PTR(-ENOMEM); 8103 8104 cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data; 8105 cmd->period = __cpu_to_le32(period); 8106 cmd->duration = __cpu_to_le32(duration); 8107 cmd->next_start = __cpu_to_le32(next_offset); 8108 cmd->enabled = __cpu_to_le32(enabled); 8109 8110 ath10k_dbg(ar, ATH10K_DBG_WMI, 8111 "wmi quiet param: period %u duration %u enabled %d\n", 8112 period, duration, enabled); 8113 return skb; 8114 } 8115 8116 static struct sk_buff * 8117 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id, 8118 const u8 *mac) 8119 { 8120 struct wmi_addba_clear_resp_cmd *cmd; 8121 struct sk_buff *skb; 8122 8123 if (!mac) 8124 return ERR_PTR(-EINVAL); 8125 8126 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8127 if (!skb) 8128 return ERR_PTR(-ENOMEM); 8129 8130 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; 8131 cmd->vdev_id = __cpu_to_le32(vdev_id); 8132 ether_addr_copy(cmd->peer_macaddr.addr, mac); 8133 8134 ath10k_dbg(ar, ATH10K_DBG_WMI, 8135 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", 8136 vdev_id, mac); 8137 return skb; 8138 } 8139 8140 static struct sk_buff * 8141 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 8142 u32 tid, u32 buf_size) 8143 { 8144 struct wmi_addba_send_cmd *cmd; 8145 struct sk_buff *skb; 8146 8147 if (!mac) 8148 return ERR_PTR(-EINVAL); 8149 8150 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8151 if (!skb) 8152 return ERR_PTR(-ENOMEM); 8153 8154 cmd = (struct wmi_addba_send_cmd *)skb->data; 8155 cmd->vdev_id = __cpu_to_le32(vdev_id); 8156 ether_addr_copy(cmd->peer_macaddr.addr, mac); 8157 cmd->tid = __cpu_to_le32(tid); 8158 cmd->buffersize = __cpu_to_le32(buf_size); 8159 8160 ath10k_dbg(ar, ATH10K_DBG_WMI, 8161 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", 8162 vdev_id, mac, tid, buf_size); 8163 return skb; 8164 } 8165 8166 static struct sk_buff * 8167 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 8168 u32 tid, u32 status) 8169 { 8170 struct wmi_addba_setresponse_cmd *cmd; 8171 struct sk_buff *skb; 8172 8173 if (!mac) 8174 return ERR_PTR(-EINVAL); 8175 8176 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8177 if (!skb) 8178 return ERR_PTR(-ENOMEM); 8179 8180 cmd = (struct wmi_addba_setresponse_cmd *)skb->data; 8181 cmd->vdev_id = __cpu_to_le32(vdev_id); 8182 ether_addr_copy(cmd->peer_macaddr.addr, mac); 8183 cmd->tid = __cpu_to_le32(tid); 8184 cmd->statuscode = __cpu_to_le32(status); 8185 8186 ath10k_dbg(ar, ATH10K_DBG_WMI, 8187 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", 8188 vdev_id, mac, tid, status); 8189 return skb; 8190 } 8191 8192 static struct sk_buff * 8193 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 8194 u32 tid, u32 initiator, u32 reason) 8195 { 8196 struct wmi_delba_send_cmd *cmd; 8197 struct sk_buff *skb; 8198 8199 if (!mac) 8200 return ERR_PTR(-EINVAL); 8201 8202 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8203 if (!skb) 8204 return ERR_PTR(-ENOMEM); 8205 8206 cmd = (struct wmi_delba_send_cmd *)skb->data; 8207 cmd->vdev_id = __cpu_to_le32(vdev_id); 8208 ether_addr_copy(cmd->peer_macaddr.addr, mac); 8209 cmd->tid = __cpu_to_le32(tid); 8210 cmd->initiator = __cpu_to_le32(initiator); 8211 cmd->reasoncode = __cpu_to_le32(reason); 8212 8213 ath10k_dbg(ar, ATH10K_DBG_WMI, 8214 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", 8215 vdev_id, mac, tid, initiator, reason); 8216 return skb; 8217 } 8218 8219 static struct sk_buff * 8220 ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param) 8221 { 8222 struct wmi_pdev_get_tpc_config_cmd *cmd; 8223 struct sk_buff *skb; 8224 8225 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8226 if (!skb) 8227 return ERR_PTR(-ENOMEM); 8228 8229 cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data; 8230 cmd->param = __cpu_to_le32(param); 8231 8232 ath10k_dbg(ar, ATH10K_DBG_WMI, 8233 "wmi pdev get tpc config param %d\n", param); 8234 return skb; 8235 } 8236 8237 static void 8238 ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev, 8239 char *buf, u32 *length) 8240 { 8241 u32 len = *length; 8242 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8243 8244 len += scnprintf(buf + len, buf_len - len, "\n"); 8245 len += scnprintf(buf + len, buf_len - len, "%30s\n", 8246 "ath10k PDEV stats"); 8247 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8248 "================="); 8249 8250 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8251 "Channel noise floor", pdev->ch_noise_floor); 8252 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8253 "Channel TX power", pdev->chan_tx_power); 8254 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8255 "TX frame count", pdev->tx_frame_count); 8256 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8257 "RX frame count", pdev->rx_frame_count); 8258 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8259 "RX clear count", pdev->rx_clear_count); 8260 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8261 "Cycle count", pdev->cycle_count); 8262 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8263 "PHY error count", pdev->phy_err_count); 8264 8265 *length = len; 8266 } 8267 8268 static void 8269 ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev, 8270 char *buf, u32 *length) 8271 { 8272 u32 len = *length; 8273 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8274 8275 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8276 "RTS bad count", pdev->rts_bad); 8277 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8278 "RTS good count", pdev->rts_good); 8279 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8280 "FCS bad count", pdev->fcs_bad); 8281 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8282 "No beacon count", pdev->no_beacons); 8283 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8284 "MIB int count", pdev->mib_int_count); 8285 8286 len += scnprintf(buf + len, buf_len - len, "\n"); 8287 *length = len; 8288 } 8289 8290 static void 8291 ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, 8292 char *buf, u32 *length) 8293 { 8294 u32 len = *length; 8295 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8296 8297 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 8298 "ath10k PDEV TX stats"); 8299 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8300 "================="); 8301 8302 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8303 "HTT cookies queued", pdev->comp_queued); 8304 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8305 "HTT cookies disp.", pdev->comp_delivered); 8306 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8307 "MSDU queued", pdev->msdu_enqued); 8308 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8309 "MPDU queued", pdev->mpdu_enqued); 8310 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8311 "MSDUs dropped", pdev->wmm_drop); 8312 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8313 "Local enqued", pdev->local_enqued); 8314 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8315 "Local freed", pdev->local_freed); 8316 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8317 "HW queued", pdev->hw_queued); 8318 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8319 "PPDUs reaped", pdev->hw_reaped); 8320 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8321 "Num underruns", pdev->underrun); 8322 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8323 "PPDUs cleaned", pdev->tx_abort); 8324 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8325 "MPDUs requeued", pdev->mpdus_requeued); 8326 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8327 "Excessive retries", pdev->tx_ko); 8328 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8329 "HW rate", pdev->data_rc); 8330 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8331 "Sched self triggers", pdev->self_triggers); 8332 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8333 "Dropped due to SW retries", 8334 pdev->sw_retry_failure); 8335 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8336 "Illegal rate phy errors", 8337 pdev->illgl_rate_phy_err); 8338 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8339 "Pdev continuous xretry", pdev->pdev_cont_xretry); 8340 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8341 "TX timeout", pdev->pdev_tx_timeout); 8342 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8343 "PDEV resets", pdev->pdev_resets); 8344 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8345 "PHY underrun", pdev->phy_underrun); 8346 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8347 "MPDU is more than txop limit", pdev->txop_ovf); 8348 *length = len; 8349 } 8350 8351 static void 8352 ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, 8353 char *buf, u32 *length) 8354 { 8355 u32 len = *length; 8356 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8357 8358 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 8359 "ath10k PDEV RX stats"); 8360 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8361 "================="); 8362 8363 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8364 "Mid PPDU route change", 8365 pdev->mid_ppdu_route_change); 8366 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8367 "Tot. number of statuses", pdev->status_rcvd); 8368 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8369 "Extra frags on rings 0", pdev->r0_frags); 8370 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8371 "Extra frags on rings 1", pdev->r1_frags); 8372 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8373 "Extra frags on rings 2", pdev->r2_frags); 8374 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8375 "Extra frags on rings 3", pdev->r3_frags); 8376 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8377 "MSDUs delivered to HTT", pdev->htt_msdus); 8378 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8379 "MPDUs delivered to HTT", pdev->htt_mpdus); 8380 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8381 "MSDUs delivered to stack", pdev->loc_msdus); 8382 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8383 "MPDUs delivered to stack", pdev->loc_mpdus); 8384 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8385 "Oversized AMSDUs", pdev->oversize_amsdu); 8386 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8387 "PHY errors", pdev->phy_errs); 8388 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8389 "PHY errors drops", pdev->phy_err_drop); 8390 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8391 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); 8392 *length = len; 8393 } 8394 8395 static void 8396 ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev, 8397 char *buf, u32 *length) 8398 { 8399 u32 len = *length; 8400 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8401 int i; 8402 8403 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8404 "vdev id", vdev->vdev_id); 8405 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8406 "beacon snr", vdev->beacon_snr); 8407 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8408 "data snr", vdev->data_snr); 8409 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8410 "num rx frames", vdev->num_rx_frames); 8411 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8412 "num rts fail", vdev->num_rts_fail); 8413 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8414 "num rts success", vdev->num_rts_success); 8415 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8416 "num rx err", vdev->num_rx_err); 8417 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8418 "num rx discard", vdev->num_rx_discard); 8419 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8420 "num tx not acked", vdev->num_tx_not_acked); 8421 8422 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) 8423 len += scnprintf(buf + len, buf_len - len, 8424 "%25s [%02d] %u\n", 8425 "num tx frames", i, 8426 vdev->num_tx_frames[i]); 8427 8428 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) 8429 len += scnprintf(buf + len, buf_len - len, 8430 "%25s [%02d] %u\n", 8431 "num tx frames retries", i, 8432 vdev->num_tx_frames_retries[i]); 8433 8434 for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) 8435 len += scnprintf(buf + len, buf_len - len, 8436 "%25s [%02d] %u\n", 8437 "num tx frames failures", i, 8438 vdev->num_tx_frames_failures[i]); 8439 8440 for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) 8441 len += scnprintf(buf + len, buf_len - len, 8442 "%25s [%02d] 0x%08x\n", 8443 "tx rate history", i, 8444 vdev->tx_rate_history[i]); 8445 8446 for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) 8447 len += scnprintf(buf + len, buf_len - len, 8448 "%25s [%02d] %u\n", 8449 "beacon rssi history", i, 8450 vdev->beacon_rssi_history[i]); 8451 8452 len += scnprintf(buf + len, buf_len - len, "\n"); 8453 *length = len; 8454 } 8455 8456 static void 8457 ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer, 8458 char *buf, u32 *length, bool extended_peer) 8459 { 8460 u32 len = *length; 8461 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8462 8463 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 8464 "Peer MAC address", peer->peer_macaddr); 8465 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8466 "Peer RSSI", peer->peer_rssi); 8467 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8468 "Peer TX rate", peer->peer_tx_rate); 8469 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8470 "Peer RX rate", peer->peer_rx_rate); 8471 if (!extended_peer) 8472 len += scnprintf(buf + len, buf_len - len, "%30s %llu\n", 8473 "Peer RX duration", peer->rx_duration); 8474 8475 len += scnprintf(buf + len, buf_len - len, "\n"); 8476 *length = len; 8477 } 8478 8479 static void 8480 ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer, 8481 char *buf, u32 *length) 8482 { 8483 u32 len = *length; 8484 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8485 8486 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 8487 "Peer MAC address", peer->peer_macaddr); 8488 len += scnprintf(buf + len, buf_len - len, "%30s %llu\n", 8489 "Peer RX duration", peer->rx_duration); 8490 } 8491 8492 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, 8493 struct ath10k_fw_stats *fw_stats, 8494 char *buf) 8495 { 8496 u32 len = 0; 8497 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8498 const struct ath10k_fw_stats_pdev *pdev; 8499 const struct ath10k_fw_stats_vdev *vdev; 8500 const struct ath10k_fw_stats_peer *peer; 8501 size_t num_peers; 8502 size_t num_vdevs; 8503 8504 spin_lock_bh(&ar->data_lock); 8505 8506 pdev = list_first_entry_or_null(&fw_stats->pdevs, 8507 struct ath10k_fw_stats_pdev, list); 8508 if (!pdev) { 8509 ath10k_warn(ar, "failed to get pdev stats\n"); 8510 goto unlock; 8511 } 8512 8513 num_peers = list_count_nodes(&fw_stats->peers); 8514 num_vdevs = list_count_nodes(&fw_stats->vdevs); 8515 8516 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); 8517 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); 8518 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); 8519 8520 len += scnprintf(buf + len, buf_len - len, "\n"); 8521 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 8522 "ath10k VDEV stats", num_vdevs); 8523 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8524 "================="); 8525 8526 list_for_each_entry(vdev, &fw_stats->vdevs, list) { 8527 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len); 8528 } 8529 8530 len += scnprintf(buf + len, buf_len - len, "\n"); 8531 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 8532 "ath10k PEER stats", num_peers); 8533 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8534 "================="); 8535 8536 list_for_each_entry(peer, &fw_stats->peers, list) { 8537 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len, 8538 fw_stats->extended); 8539 } 8540 8541 unlock: 8542 spin_unlock_bh(&ar->data_lock); 8543 8544 if (len >= buf_len) 8545 buf[len - 1] = 0; 8546 else 8547 buf[len] = 0; 8548 } 8549 8550 void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar, 8551 struct ath10k_fw_stats *fw_stats, 8552 char *buf) 8553 { 8554 unsigned int len = 0; 8555 unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; 8556 const struct ath10k_fw_stats_pdev *pdev; 8557 const struct ath10k_fw_stats_vdev *vdev; 8558 const struct ath10k_fw_stats_peer *peer; 8559 size_t num_peers; 8560 size_t num_vdevs; 8561 8562 spin_lock_bh(&ar->data_lock); 8563 8564 pdev = list_first_entry_or_null(&fw_stats->pdevs, 8565 struct ath10k_fw_stats_pdev, list); 8566 if (!pdev) { 8567 ath10k_warn(ar, "failed to get pdev stats\n"); 8568 goto unlock; 8569 } 8570 8571 num_peers = list_count_nodes(&fw_stats->peers); 8572 num_vdevs = list_count_nodes(&fw_stats->vdevs); 8573 8574 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); 8575 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len); 8576 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); 8577 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); 8578 8579 len += scnprintf(buf + len, buf_len - len, "\n"); 8580 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 8581 "ath10k VDEV stats", num_vdevs); 8582 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8583 "================="); 8584 8585 list_for_each_entry(vdev, &fw_stats->vdevs, list) { 8586 ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len); 8587 } 8588 8589 len += scnprintf(buf + len, buf_len - len, "\n"); 8590 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 8591 "ath10k PEER stats", num_peers); 8592 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8593 "================="); 8594 8595 list_for_each_entry(peer, &fw_stats->peers, list) { 8596 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len, 8597 fw_stats->extended); 8598 } 8599 8600 unlock: 8601 spin_unlock_bh(&ar->data_lock); 8602 8603 if (len >= buf_len) 8604 buf[len - 1] = 0; 8605 else 8606 buf[len] = 0; 8607 } 8608 8609 static struct sk_buff * 8610 ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 8611 u32 detect_level, u32 detect_margin) 8612 { 8613 struct wmi_pdev_set_adaptive_cca_params *cmd; 8614 struct sk_buff *skb; 8615 8616 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8617 if (!skb) 8618 return ERR_PTR(-ENOMEM); 8619 8620 cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data; 8621 cmd->enable = __cpu_to_le32(enable); 8622 cmd->cca_detect_level = __cpu_to_le32(detect_level); 8623 cmd->cca_detect_margin = __cpu_to_le32(detect_margin); 8624 8625 ath10k_dbg(ar, ATH10K_DBG_WMI, 8626 "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n", 8627 enable, detect_level, detect_margin); 8628 return skb; 8629 } 8630 8631 static void 8632 ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev, 8633 char *buf, u32 *length) 8634 { 8635 u32 len = *length; 8636 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8637 u32 val; 8638 8639 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8640 "vdev id", vdev->vdev_id); 8641 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8642 "ppdu aggr count", vdev->ppdu_aggr_cnt); 8643 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8644 "ppdu noack", vdev->ppdu_noack); 8645 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8646 "mpdu queued", vdev->mpdu_queued); 8647 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8648 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt); 8649 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8650 "mpdu sw requeued", vdev->mpdu_sw_requeued); 8651 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8652 "mpdu success retry", vdev->mpdu_suc_retry); 8653 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8654 "mpdu success multitry", vdev->mpdu_suc_multitry); 8655 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8656 "mpdu fail retry", vdev->mpdu_fail_retry); 8657 val = vdev->tx_ftm_suc; 8658 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) 8659 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8660 "tx ftm success", 8661 MS(val, WMI_VDEV_STATS_FTM_COUNT)); 8662 val = vdev->tx_ftm_suc_retry; 8663 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) 8664 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8665 "tx ftm success retry", 8666 MS(val, WMI_VDEV_STATS_FTM_COUNT)); 8667 val = vdev->tx_ftm_fail; 8668 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) 8669 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8670 "tx ftm fail", 8671 MS(val, WMI_VDEV_STATS_FTM_COUNT)); 8672 val = vdev->rx_ftmr_cnt; 8673 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) 8674 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8675 "rx ftm request count", 8676 MS(val, WMI_VDEV_STATS_FTM_COUNT)); 8677 val = vdev->rx_ftmr_dup_cnt; 8678 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) 8679 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8680 "rx ftm request dup count", 8681 MS(val, WMI_VDEV_STATS_FTM_COUNT)); 8682 val = vdev->rx_iftmr_cnt; 8683 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) 8684 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8685 "rx initial ftm req count", 8686 MS(val, WMI_VDEV_STATS_FTM_COUNT)); 8687 val = vdev->rx_iftmr_dup_cnt; 8688 if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) 8689 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8690 "rx initial ftm req dup cnt", 8691 MS(val, WMI_VDEV_STATS_FTM_COUNT)); 8692 len += scnprintf(buf + len, buf_len - len, "\n"); 8693 8694 *length = len; 8695 } 8696 8697 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, 8698 struct ath10k_fw_stats *fw_stats, 8699 char *buf) 8700 { 8701 u32 len = 0; 8702 u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; 8703 const struct ath10k_fw_stats_pdev *pdev; 8704 const struct ath10k_fw_stats_vdev_extd *vdev; 8705 const struct ath10k_fw_stats_peer *peer; 8706 const struct ath10k_fw_extd_stats_peer *extd_peer; 8707 size_t num_peers; 8708 size_t num_vdevs; 8709 8710 spin_lock_bh(&ar->data_lock); 8711 8712 pdev = list_first_entry_or_null(&fw_stats->pdevs, 8713 struct ath10k_fw_stats_pdev, list); 8714 if (!pdev) { 8715 ath10k_warn(ar, "failed to get pdev stats\n"); 8716 goto unlock; 8717 } 8718 8719 num_peers = list_count_nodes(&fw_stats->peers); 8720 num_vdevs = list_count_nodes(&fw_stats->vdevs); 8721 8722 ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); 8723 ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len); 8724 ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); 8725 8726 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8727 "HW paused", pdev->hw_paused); 8728 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8729 "Seqs posted", pdev->seq_posted); 8730 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8731 "Seqs failed queueing", pdev->seq_failed_queueing); 8732 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8733 "Seqs completed", pdev->seq_completed); 8734 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8735 "Seqs restarted", pdev->seq_restarted); 8736 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8737 "MU Seqs posted", pdev->mu_seq_posted); 8738 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8739 "MPDUs SW flushed", pdev->mpdus_sw_flush); 8740 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8741 "MPDUs HW filtered", pdev->mpdus_hw_filter); 8742 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8743 "MPDUs truncated", pdev->mpdus_truncated); 8744 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8745 "MPDUs receive no ACK", pdev->mpdus_ack_failed); 8746 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8747 "MPDUs expired", pdev->mpdus_expired); 8748 8749 ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); 8750 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8751 "Num Rx Overflow errors", pdev->rx_ovfl_errs); 8752 8753 len += scnprintf(buf + len, buf_len - len, "\n"); 8754 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 8755 "ath10k VDEV stats", num_vdevs); 8756 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8757 "================="); 8758 list_for_each_entry(vdev, &fw_stats->vdevs, list) { 8759 ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len); 8760 } 8761 8762 len += scnprintf(buf + len, buf_len - len, "\n"); 8763 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 8764 "ath10k PEER stats", num_peers); 8765 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8766 "================="); 8767 8768 list_for_each_entry(peer, &fw_stats->peers, list) { 8769 ath10k_wmi_fw_peer_stats_fill(peer, buf, &len, 8770 fw_stats->extended); 8771 } 8772 8773 if (fw_stats->extended) { 8774 list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) { 8775 ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf, 8776 &len); 8777 } 8778 } 8779 8780 unlock: 8781 spin_unlock_bh(&ar->data_lock); 8782 8783 if (len >= buf_len) 8784 buf[len - 1] = 0; 8785 else 8786 buf[len] = 0; 8787 } 8788 8789 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, 8790 enum wmi_vdev_subtype subtype) 8791 { 8792 switch (subtype) { 8793 case WMI_VDEV_SUBTYPE_NONE: 8794 return WMI_VDEV_SUBTYPE_LEGACY_NONE; 8795 case WMI_VDEV_SUBTYPE_P2P_DEVICE: 8796 return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV; 8797 case WMI_VDEV_SUBTYPE_P2P_CLIENT: 8798 return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI; 8799 case WMI_VDEV_SUBTYPE_P2P_GO: 8800 return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO; 8801 case WMI_VDEV_SUBTYPE_PROXY_STA: 8802 return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA; 8803 case WMI_VDEV_SUBTYPE_MESH_11S: 8804 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 8805 return -EOPNOTSUPP; 8806 } 8807 return -EOPNOTSUPP; 8808 } 8809 8810 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, 8811 enum wmi_vdev_subtype subtype) 8812 { 8813 switch (subtype) { 8814 case WMI_VDEV_SUBTYPE_NONE: 8815 return WMI_VDEV_SUBTYPE_10_2_4_NONE; 8816 case WMI_VDEV_SUBTYPE_P2P_DEVICE: 8817 return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV; 8818 case WMI_VDEV_SUBTYPE_P2P_CLIENT: 8819 return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI; 8820 case WMI_VDEV_SUBTYPE_P2P_GO: 8821 return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO; 8822 case WMI_VDEV_SUBTYPE_PROXY_STA: 8823 return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA; 8824 case WMI_VDEV_SUBTYPE_MESH_11S: 8825 return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S; 8826 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 8827 return -EOPNOTSUPP; 8828 } 8829 return -EOPNOTSUPP; 8830 } 8831 8832 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, 8833 enum wmi_vdev_subtype subtype) 8834 { 8835 switch (subtype) { 8836 case WMI_VDEV_SUBTYPE_NONE: 8837 return WMI_VDEV_SUBTYPE_10_4_NONE; 8838 case WMI_VDEV_SUBTYPE_P2P_DEVICE: 8839 return WMI_VDEV_SUBTYPE_10_4_P2P_DEV; 8840 case WMI_VDEV_SUBTYPE_P2P_CLIENT: 8841 return WMI_VDEV_SUBTYPE_10_4_P2P_CLI; 8842 case WMI_VDEV_SUBTYPE_P2P_GO: 8843 return WMI_VDEV_SUBTYPE_10_4_P2P_GO; 8844 case WMI_VDEV_SUBTYPE_PROXY_STA: 8845 return WMI_VDEV_SUBTYPE_10_4_PROXY_STA; 8846 case WMI_VDEV_SUBTYPE_MESH_11S: 8847 return WMI_VDEV_SUBTYPE_10_4_MESH_11S; 8848 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 8849 return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S; 8850 } 8851 return -EOPNOTSUPP; 8852 } 8853 8854 static struct sk_buff * 8855 ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, 8856 enum wmi_host_platform_type type, 8857 u32 fw_feature_bitmap) 8858 { 8859 struct wmi_ext_resource_config_10_4_cmd *cmd; 8860 struct sk_buff *skb; 8861 u32 num_tdls_sleep_sta = 0; 8862 8863 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8864 if (!skb) 8865 return ERR_PTR(-ENOMEM); 8866 8867 if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map)) 8868 num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA; 8869 8870 cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data; 8871 cmd->host_platform_config = __cpu_to_le32(type); 8872 cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap); 8873 cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin); 8874 cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT); 8875 cmd->coex_gpio_pin1 = __cpu_to_le32(-1); 8876 cmd->coex_gpio_pin2 = __cpu_to_le32(-1); 8877 cmd->coex_gpio_pin3 = __cpu_to_le32(-1); 8878 cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS); 8879 cmd->num_tdls_conn_table_entries = __cpu_to_le32(20); 8880 cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta); 8881 cmd->max_tdls_concurrent_buffer_sta = 8882 __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA); 8883 8884 ath10k_dbg(ar, ATH10K_DBG_WMI, 8885 "wmi ext resource config host type %d firmware feature bitmap %08x\n", 8886 type, fw_feature_bitmap); 8887 return skb; 8888 } 8889 8890 static struct sk_buff * 8891 ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 8892 enum wmi_tdls_state state) 8893 { 8894 struct wmi_10_4_tdls_set_state_cmd *cmd; 8895 struct sk_buff *skb; 8896 u32 options = 0; 8897 8898 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8899 if (!skb) 8900 return ERR_PTR(-ENOMEM); 8901 8902 if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) && 8903 state == WMI_TDLS_ENABLE_ACTIVE) 8904 state = WMI_TDLS_ENABLE_PASSIVE; 8905 8906 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) 8907 options |= WMI_TDLS_BUFFER_STA_EN; 8908 8909 cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data; 8910 cmd->vdev_id = __cpu_to_le32(vdev_id); 8911 cmd->state = __cpu_to_le32(state); 8912 cmd->notification_interval_ms = __cpu_to_le32(5000); 8913 cmd->tx_discovery_threshold = __cpu_to_le32(100); 8914 cmd->tx_teardown_threshold = __cpu_to_le32(5); 8915 cmd->rssi_teardown_threshold = __cpu_to_le32(-75); 8916 cmd->rssi_delta = __cpu_to_le32(-20); 8917 cmd->tdls_options = __cpu_to_le32(options); 8918 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); 8919 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); 8920 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); 8921 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); 8922 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); 8923 cmd->teardown_notification_ms = __cpu_to_le32(10); 8924 cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96); 8925 8926 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n", 8927 state, vdev_id); 8928 return skb; 8929 } 8930 8931 static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp) 8932 { 8933 u32 peer_qos = 0; 8934 8935 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 8936 peer_qos |= WMI_TDLS_PEER_QOS_AC_VO; 8937 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 8938 peer_qos |= WMI_TDLS_PEER_QOS_AC_VI; 8939 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 8940 peer_qos |= WMI_TDLS_PEER_QOS_AC_BK; 8941 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 8942 peer_qos |= WMI_TDLS_PEER_QOS_AC_BE; 8943 8944 peer_qos |= SM(sp, WMI_TDLS_PEER_SP); 8945 8946 return peer_qos; 8947 } 8948 8949 static struct sk_buff * 8950 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 8951 { 8952 struct wmi_pdev_get_tpc_table_cmd *cmd; 8953 struct sk_buff *skb; 8954 8955 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 8956 if (!skb) 8957 return ERR_PTR(-ENOMEM); 8958 8959 cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data; 8960 cmd->param = __cpu_to_le32(param); 8961 8962 ath10k_dbg(ar, ATH10K_DBG_WMI, 8963 "wmi pdev get tpc table param:%d\n", param); 8964 return skb; 8965 } 8966 8967 static struct sk_buff * 8968 ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, 8969 const struct wmi_tdls_peer_update_cmd_arg *arg, 8970 const struct wmi_tdls_peer_capab_arg *cap, 8971 const struct wmi_channel_arg *chan_arg) 8972 { 8973 struct wmi_10_4_tdls_peer_update_cmd *cmd; 8974 struct wmi_tdls_peer_capabilities *peer_cap; 8975 struct wmi_channel *chan; 8976 struct sk_buff *skb; 8977 u32 peer_qos; 8978 int len, chan_len; 8979 int i; 8980 8981 /* tdls peer update cmd has place holder for one channel*/ 8982 chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0; 8983 8984 len = sizeof(*cmd) + chan_len * sizeof(*chan); 8985 8986 skb = ath10k_wmi_alloc_skb(ar, len); 8987 if (!skb) 8988 return ERR_PTR(-ENOMEM); 8989 8990 cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data; 8991 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 8992 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); 8993 cmd->peer_state = __cpu_to_le32(arg->peer_state); 8994 8995 peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues, 8996 cap->peer_max_sp); 8997 8998 peer_cap = &cmd->peer_capab; 8999 peer_cap->peer_qos = __cpu_to_le32(peer_qos); 9000 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); 9001 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); 9002 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); 9003 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); 9004 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); 9005 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); 9006 9007 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) 9008 peer_cap->peer_operclass[i] = cap->peer_operclass[i]; 9009 9010 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); 9011 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); 9012 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); 9013 9014 for (i = 0; i < cap->peer_chan_len; i++) { 9015 chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i]; 9016 ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]); 9017 } 9018 9019 ath10k_dbg(ar, ATH10K_DBG_WMI, 9020 "wmi tdls peer update vdev %i state %d n_chans %u\n", 9021 arg->vdev_id, arg->peer_state, cap->peer_chan_len); 9022 return skb; 9023 } 9024 9025 static struct sk_buff * 9026 ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar, 9027 const struct ath10k_radar_found_info *arg) 9028 { 9029 struct wmi_radar_found_info *cmd; 9030 struct sk_buff *skb; 9031 9032 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 9033 if (!skb) 9034 return ERR_PTR(-ENOMEM); 9035 9036 cmd = (struct wmi_radar_found_info *)skb->data; 9037 cmd->pri_min = __cpu_to_le32(arg->pri_min); 9038 cmd->pri_max = __cpu_to_le32(arg->pri_max); 9039 cmd->width_min = __cpu_to_le32(arg->width_min); 9040 cmd->width_max = __cpu_to_le32(arg->width_max); 9041 cmd->sidx_min = __cpu_to_le32(arg->sidx_min); 9042 cmd->sidx_max = __cpu_to_le32(arg->sidx_max); 9043 9044 ath10k_dbg(ar, ATH10K_DBG_WMI, 9045 "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", 9046 arg->pri_min, arg->pri_max, arg->width_min, 9047 arg->width_max, arg->sidx_min, arg->sidx_max); 9048 return skb; 9049 } 9050 9051 static struct sk_buff * 9052 ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar, 9053 const struct wmi_per_peer_per_tid_cfg_arg *arg) 9054 { 9055 struct wmi_peer_per_tid_cfg_cmd *cmd; 9056 struct sk_buff *skb; 9057 9058 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 9059 if (!skb) 9060 return ERR_PTR(-ENOMEM); 9061 9062 memset(skb->data, 0, sizeof(*cmd)); 9063 9064 cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data; 9065 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 9066 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr); 9067 cmd->tid = cpu_to_le32(arg->tid); 9068 cmd->ack_policy = cpu_to_le32(arg->ack_policy); 9069 cmd->aggr_control = cpu_to_le32(arg->aggr_control); 9070 cmd->rate_control = cpu_to_le32(arg->rate_ctrl); 9071 cmd->retry_count = cpu_to_le32(arg->retry_count); 9072 cmd->rcode_flags = cpu_to_le32(arg->rcode_flags); 9073 cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap); 9074 cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl); 9075 9076 ath10k_dbg(ar, ATH10K_DBG_WMI, 9077 "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n", 9078 arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control, 9079 arg->rate_ctrl, arg->rcode_flags, arg->retry_count, 9080 arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr); 9081 return skb; 9082 } 9083 9084 static struct sk_buff * 9085 ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) 9086 { 9087 struct wmi_echo_cmd *cmd; 9088 struct sk_buff *skb; 9089 9090 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 9091 if (!skb) 9092 return ERR_PTR(-ENOMEM); 9093 9094 cmd = (struct wmi_echo_cmd *)skb->data; 9095 cmd->value = cpu_to_le32(value); 9096 9097 ath10k_dbg(ar, ATH10K_DBG_WMI, 9098 "wmi echo value 0x%08x\n", value); 9099 return skb; 9100 } 9101 9102 int 9103 ath10k_wmi_barrier(struct ath10k *ar) 9104 { 9105 int ret; 9106 int time_left; 9107 9108 spin_lock_bh(&ar->data_lock); 9109 reinit_completion(&ar->wmi.barrier); 9110 spin_unlock_bh(&ar->data_lock); 9111 9112 ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID); 9113 if (ret) { 9114 ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret); 9115 return ret; 9116 } 9117 9118 time_left = wait_for_completion_timeout(&ar->wmi.barrier, 9119 ATH10K_WMI_BARRIER_TIMEOUT_HZ); 9120 if (!time_left) 9121 return -ETIMEDOUT; 9122 9123 return 0; 9124 } 9125 9126 static struct sk_buff * 9127 ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar, 9128 const struct wmi_bb_timing_cfg_arg *arg) 9129 { 9130 struct wmi_pdev_bb_timing_cfg_cmd *cmd; 9131 struct sk_buff *skb; 9132 9133 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 9134 if (!skb) 9135 return ERR_PTR(-ENOMEM); 9136 9137 cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data; 9138 cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing); 9139 cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing); 9140 9141 ath10k_dbg(ar, ATH10K_DBG_WMI, 9142 "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", 9143 arg->bb_tx_timing, arg->bb_xpa_timing); 9144 return skb; 9145 } 9146 9147 static const struct wmi_ops wmi_ops = { 9148 .rx = ath10k_wmi_op_rx, 9149 .map_svc = wmi_main_svc_map, 9150 9151 .pull_scan = ath10k_wmi_op_pull_scan_ev, 9152 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, 9153 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, 9154 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, 9155 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, 9156 .pull_swba = ath10k_wmi_op_pull_swba_ev, 9157 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, 9158 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, 9159 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, 9160 .pull_rdy = ath10k_wmi_op_pull_rdy_ev, 9161 .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats, 9162 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, 9163 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, 9164 9165 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, 9166 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, 9167 .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd, 9168 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, 9169 .gen_init = ath10k_wmi_op_gen_init, 9170 .gen_start_scan = ath10k_wmi_op_gen_start_scan, 9171 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, 9172 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, 9173 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, 9174 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, 9175 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, 9176 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, 9177 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, 9178 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, 9179 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, 9180 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, 9181 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, 9182 /* .gen_vdev_wmm_conf not implemented */ 9183 .gen_peer_create = ath10k_wmi_op_gen_peer_create, 9184 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, 9185 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, 9186 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, 9187 .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc, 9188 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, 9189 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, 9190 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, 9191 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, 9192 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, 9193 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, 9194 .gen_request_stats = ath10k_wmi_op_gen_request_stats, 9195 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, 9196 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, 9197 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, 9198 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, 9199 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, 9200 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, 9201 /* .gen_pdev_get_temperature not implemented */ 9202 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, 9203 .gen_addba_send = ath10k_wmi_op_gen_addba_send, 9204 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 9205 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 9206 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, 9207 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, 9208 .gen_echo = ath10k_wmi_op_gen_echo, 9209 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, 9210 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, 9211 9212 /* .gen_bcn_tmpl not implemented */ 9213 /* .gen_prb_tmpl not implemented */ 9214 /* .gen_p2p_go_bcn_ie not implemented */ 9215 /* .gen_adaptive_qcs not implemented */ 9216 /* .gen_pdev_enable_adaptive_cca not implemented */ 9217 }; 9218 9219 static const struct wmi_ops wmi_10_1_ops = { 9220 .rx = ath10k_wmi_10_1_op_rx, 9221 .map_svc = wmi_10x_svc_map, 9222 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, 9223 .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats, 9224 .gen_init = ath10k_wmi_10_1_op_gen_init, 9225 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, 9226 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, 9227 .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc, 9228 /* .gen_pdev_get_temperature not implemented */ 9229 9230 /* shared with main branch */ 9231 .pull_scan = ath10k_wmi_op_pull_scan_ev, 9232 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, 9233 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, 9234 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, 9235 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, 9236 .pull_swba = ath10k_wmi_op_pull_swba_ev, 9237 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, 9238 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, 9239 .pull_rdy = ath10k_wmi_op_pull_rdy_ev, 9240 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, 9241 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, 9242 9243 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, 9244 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, 9245 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, 9246 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, 9247 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, 9248 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, 9249 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, 9250 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, 9251 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, 9252 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, 9253 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, 9254 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, 9255 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, 9256 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, 9257 /* .gen_vdev_wmm_conf not implemented */ 9258 .gen_peer_create = ath10k_wmi_op_gen_peer_create, 9259 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, 9260 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, 9261 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, 9262 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, 9263 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, 9264 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, 9265 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, 9266 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, 9267 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, 9268 .gen_request_stats = ath10k_wmi_op_gen_request_stats, 9269 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, 9270 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, 9271 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, 9272 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, 9273 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, 9274 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, 9275 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, 9276 .gen_addba_send = ath10k_wmi_op_gen_addba_send, 9277 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 9278 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 9279 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, 9280 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, 9281 .gen_echo = ath10k_wmi_op_gen_echo, 9282 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, 9283 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, 9284 /* .gen_bcn_tmpl not implemented */ 9285 /* .gen_prb_tmpl not implemented */ 9286 /* .gen_p2p_go_bcn_ie not implemented */ 9287 /* .gen_adaptive_qcs not implemented */ 9288 /* .gen_pdev_enable_adaptive_cca not implemented */ 9289 }; 9290 9291 static const struct wmi_ops wmi_10_2_ops = { 9292 .rx = ath10k_wmi_10_2_op_rx, 9293 .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats, 9294 .gen_init = ath10k_wmi_10_2_op_gen_init, 9295 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, 9296 /* .gen_pdev_get_temperature not implemented */ 9297 9298 /* shared with 10.1 */ 9299 .map_svc = wmi_10x_svc_map, 9300 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, 9301 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, 9302 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, 9303 .gen_echo = ath10k_wmi_op_gen_echo, 9304 9305 .pull_scan = ath10k_wmi_op_pull_scan_ev, 9306 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, 9307 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, 9308 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, 9309 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, 9310 .pull_swba = ath10k_wmi_op_pull_swba_ev, 9311 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, 9312 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, 9313 .pull_rdy = ath10k_wmi_op_pull_rdy_ev, 9314 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, 9315 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, 9316 9317 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, 9318 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, 9319 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, 9320 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, 9321 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, 9322 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, 9323 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, 9324 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, 9325 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, 9326 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, 9327 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, 9328 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, 9329 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, 9330 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, 9331 /* .gen_vdev_wmm_conf not implemented */ 9332 .gen_peer_create = ath10k_wmi_op_gen_peer_create, 9333 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, 9334 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, 9335 .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr, 9336 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, 9337 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, 9338 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, 9339 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, 9340 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, 9341 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, 9342 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, 9343 .gen_request_stats = ath10k_wmi_op_gen_request_stats, 9344 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, 9345 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, 9346 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, 9347 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, 9348 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, 9349 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, 9350 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, 9351 .gen_addba_send = ath10k_wmi_op_gen_addba_send, 9352 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 9353 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 9354 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, 9355 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, 9356 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, 9357 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, 9358 /* .gen_pdev_enable_adaptive_cca not implemented */ 9359 }; 9360 9361 static const struct wmi_ops wmi_10_2_4_ops = { 9362 .rx = ath10k_wmi_10_2_op_rx, 9363 .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats, 9364 .gen_init = ath10k_wmi_10_2_op_gen_init, 9365 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, 9366 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, 9367 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, 9368 9369 /* shared with 10.1 */ 9370 .map_svc = wmi_10x_svc_map, 9371 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, 9372 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, 9373 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, 9374 .gen_echo = ath10k_wmi_op_gen_echo, 9375 9376 .pull_scan = ath10k_wmi_op_pull_scan_ev, 9377 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev, 9378 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, 9379 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, 9380 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, 9381 .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev, 9382 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, 9383 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, 9384 .pull_rdy = ath10k_wmi_op_pull_rdy_ev, 9385 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, 9386 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, 9387 9388 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, 9389 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, 9390 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, 9391 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, 9392 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, 9393 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, 9394 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, 9395 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, 9396 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, 9397 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, 9398 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, 9399 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, 9400 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, 9401 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, 9402 .gen_peer_create = ath10k_wmi_op_gen_peer_create, 9403 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, 9404 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, 9405 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, 9406 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, 9407 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, 9408 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, 9409 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, 9410 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, 9411 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, 9412 .gen_request_stats = ath10k_wmi_op_gen_request_stats, 9413 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, 9414 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, 9415 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, 9416 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, 9417 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, 9418 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, 9419 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, 9420 .gen_addba_send = ath10k_wmi_op_gen_addba_send, 9421 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 9422 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 9423 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config, 9424 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, 9425 .gen_pdev_enable_adaptive_cca = 9426 ath10k_wmi_op_gen_pdev_enable_adaptive_cca, 9427 .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, 9428 .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing, 9429 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, 9430 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, 9431 /* .gen_bcn_tmpl not implemented */ 9432 /* .gen_prb_tmpl not implemented */ 9433 /* .gen_p2p_go_bcn_ie not implemented */ 9434 /* .gen_adaptive_qcs not implemented */ 9435 }; 9436 9437 static const struct wmi_ops wmi_10_4_ops = { 9438 .rx = ath10k_wmi_10_4_op_rx, 9439 .map_svc = wmi_10_4_svc_map, 9440 9441 .pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats, 9442 .pull_scan = ath10k_wmi_op_pull_scan_ev, 9443 .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev, 9444 .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev, 9445 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, 9446 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, 9447 .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev, 9448 .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr, 9449 .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev, 9450 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, 9451 .pull_rdy = ath10k_wmi_op_pull_rdy_ev, 9452 .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, 9453 .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev, 9454 .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, 9455 9456 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, 9457 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, 9458 .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr, 9459 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, 9460 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, 9461 .gen_init = ath10k_wmi_10_4_op_gen_init, 9462 .gen_start_scan = ath10k_wmi_op_gen_start_scan, 9463 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, 9464 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, 9465 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, 9466 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, 9467 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, 9468 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, 9469 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, 9470 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, 9471 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, 9472 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, 9473 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, 9474 .gen_peer_create = ath10k_wmi_op_gen_peer_create, 9475 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, 9476 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, 9477 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, 9478 .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc, 9479 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, 9480 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, 9481 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, 9482 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, 9483 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, 9484 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, 9485 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, 9486 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, 9487 .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg, 9488 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, 9489 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, 9490 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, 9491 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, 9492 .gen_addba_send = ath10k_wmi_op_gen_addba_send, 9493 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 9494 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 9495 .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, 9496 .ext_resource_config = ath10k_wmi_10_4_ext_resource_config, 9497 .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state, 9498 .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update, 9499 .gen_pdev_get_tpc_table_cmdid = 9500 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid, 9501 .gen_radar_found = ath10k_wmi_10_4_gen_radar_found, 9502 .gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg, 9503 9504 /* shared with 10.2 */ 9505 .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, 9506 .gen_request_stats = ath10k_wmi_op_gen_request_stats, 9507 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, 9508 .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype, 9509 .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, 9510 .gen_echo = ath10k_wmi_op_gen_echo, 9511 .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config, 9512 .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, 9513 .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, 9514 }; 9515 9516 int ath10k_wmi_attach(struct ath10k *ar) 9517 { 9518 switch (ar->running_fw->fw_file.wmi_op_version) { 9519 case ATH10K_FW_WMI_OP_VERSION_10_4: 9520 ar->wmi.ops = &wmi_10_4_ops; 9521 ar->wmi.cmd = &wmi_10_4_cmd_map; 9522 ar->wmi.vdev_param = &wmi_10_4_vdev_param_map; 9523 ar->wmi.pdev_param = &wmi_10_4_pdev_param_map; 9524 ar->wmi.peer_param = &wmi_peer_param_map; 9525 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; 9526 ar->wmi_key_cipher = wmi_key_cipher_suites; 9527 break; 9528 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 9529 ar->wmi.cmd = &wmi_10_2_4_cmd_map; 9530 ar->wmi.ops = &wmi_10_2_4_ops; 9531 ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map; 9532 ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map; 9533 ar->wmi.peer_param = &wmi_peer_param_map; 9534 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; 9535 ar->wmi_key_cipher = wmi_key_cipher_suites; 9536 break; 9537 case ATH10K_FW_WMI_OP_VERSION_10_2: 9538 ar->wmi.cmd = &wmi_10_2_cmd_map; 9539 ar->wmi.ops = &wmi_10_2_ops; 9540 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; 9541 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; 9542 ar->wmi.peer_param = &wmi_peer_param_map; 9543 ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; 9544 ar->wmi_key_cipher = wmi_key_cipher_suites; 9545 break; 9546 case ATH10K_FW_WMI_OP_VERSION_10_1: 9547 ar->wmi.cmd = &wmi_10x_cmd_map; 9548 ar->wmi.ops = &wmi_10_1_ops; 9549 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; 9550 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; 9551 ar->wmi.peer_param = &wmi_peer_param_map; 9552 ar->wmi.peer_flags = &wmi_10x_peer_flags_map; 9553 ar->wmi_key_cipher = wmi_key_cipher_suites; 9554 break; 9555 case ATH10K_FW_WMI_OP_VERSION_MAIN: 9556 ar->wmi.cmd = &wmi_cmd_map; 9557 ar->wmi.ops = &wmi_ops; 9558 ar->wmi.vdev_param = &wmi_vdev_param_map; 9559 ar->wmi.pdev_param = &wmi_pdev_param_map; 9560 ar->wmi.peer_param = &wmi_peer_param_map; 9561 ar->wmi.peer_flags = &wmi_peer_flags_map; 9562 ar->wmi_key_cipher = wmi_key_cipher_suites; 9563 break; 9564 case ATH10K_FW_WMI_OP_VERSION_TLV: 9565 ath10k_wmi_tlv_attach(ar); 9566 ar->wmi_key_cipher = wmi_tlv_key_cipher_suites; 9567 break; 9568 case ATH10K_FW_WMI_OP_VERSION_UNSET: 9569 case ATH10K_FW_WMI_OP_VERSION_MAX: 9570 ath10k_err(ar, "unsupported WMI op version: %d\n", 9571 ar->running_fw->fw_file.wmi_op_version); 9572 return -EINVAL; 9573 } 9574 9575 init_completion(&ar->wmi.service_ready); 9576 init_completion(&ar->wmi.unified_ready); 9577 init_completion(&ar->wmi.barrier); 9578 init_completion(&ar->wmi.radar_confirm); 9579 9580 INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work); 9581 INIT_WORK(&ar->radar_confirmation_work, 9582 ath10k_radar_confirmation_work); 9583 9584 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 9585 ar->running_fw->fw_file.fw_features)) { 9586 idr_init(&ar->wmi.mgmt_pending_tx); 9587 } 9588 9589 return 0; 9590 } 9591 9592 void ath10k_wmi_free_host_mem(struct ath10k *ar) 9593 { 9594 int i; 9595 9596 /* free the host memory chunks requested by firmware */ 9597 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 9598 dma_free_coherent(ar->dev, 9599 ar->wmi.mem_chunks[i].len, 9600 ar->wmi.mem_chunks[i].vaddr, 9601 ar->wmi.mem_chunks[i].paddr); 9602 } 9603 9604 ar->wmi.num_mem_chunks = 0; 9605 } 9606 9607 static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, 9608 void *ctx) 9609 { 9610 struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr; 9611 struct ath10k *ar = ctx; 9612 struct sk_buff *msdu; 9613 9614 ath10k_dbg(ar, ATH10K_DBG_WMI, 9615 "force cleanup mgmt msdu_id %u\n", msdu_id); 9616 9617 msdu = pkt_addr->vaddr; 9618 dma_unmap_single(ar->dev, pkt_addr->paddr, 9619 msdu->len, DMA_TO_DEVICE); 9620 ieee80211_free_txskb(ar->hw, msdu); 9621 kfree(pkt_addr); 9622 9623 return 0; 9624 } 9625 9626 void ath10k_wmi_detach(struct ath10k *ar) 9627 { 9628 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 9629 ar->running_fw->fw_file.fw_features)) { 9630 spin_lock_bh(&ar->data_lock); 9631 idr_for_each(&ar->wmi.mgmt_pending_tx, 9632 ath10k_wmi_mgmt_tx_clean_up_pending, ar); 9633 idr_destroy(&ar->wmi.mgmt_pending_tx); 9634 spin_unlock_bh(&ar->data_lock); 9635 } 9636 9637 cancel_work_sync(&ar->svc_rdy_work); 9638 dev_kfree_skb(ar->svc_rdy_skb); 9639 } 9640