1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 6 */ 7 8 #ifndef _WMI_OPS_H_ 9 #define _WMI_OPS_H_ 10 11 struct ath10k; 12 struct sk_buff; 13 14 struct wmi_ops { 15 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 16 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 17 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); 18 19 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 20 struct wmi_scan_ev_arg *arg); 21 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 22 struct wmi_mgmt_rx_ev_arg *arg); 23 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, 24 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); 25 int (*pull_mgmt_tx_bundle_compl)( 26 struct ath10k *ar, struct sk_buff *skb, 27 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg); 28 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 29 struct wmi_ch_info_ev_arg *arg); 30 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_vdev_start_ev_arg *arg); 32 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_peer_kick_ev_arg *arg); 34 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_swba_ev_arg *arg); 36 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_phyerr_hdr_arg *arg); 38 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 39 int left_len, struct wmi_phyerr_ev_arg *arg); 40 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_svc_rdy_ev_arg *arg); 42 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 43 struct wmi_rdy_ev_arg *arg); 44 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 45 struct ath10k_fw_stats *stats); 46 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 47 struct wmi_roam_ev_arg *arg); 48 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 49 struct wmi_wow_ev_arg *arg); 50 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_echo_ev_arg *arg); 52 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, 53 struct wmi_dfs_status_ev_arg *arg); 54 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_svc_avail_ev_arg *arg); 56 57 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 58 59 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 60 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 61 struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar, 62 const u8 macaddr[ETH_ALEN]); 63 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 64 u16 rd5g, u16 ctl2g, u16 ctl5g, 65 enum wmi_dfs_region dfs_reg); 66 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 67 u32 value); 68 struct sk_buff *(*gen_init)(struct ath10k *ar); 69 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 70 const struct wmi_start_scan_arg *arg); 71 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 72 const struct wmi_stop_scan_arg *arg); 73 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 74 enum wmi_vdev_type type, 75 enum wmi_vdev_subtype subtype, 76 const u8 macaddr[ETH_ALEN]); 77 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 78 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 79 const struct wmi_vdev_start_request_arg *arg, 80 bool restart); 81 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 82 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 83 const u8 *bssid); 84 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 85 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 86 u32 param_id, u32 param_value); 87 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 88 const struct wmi_vdev_install_key_arg *arg); 89 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 90 const struct wmi_vdev_spectral_conf_arg *arg); 91 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 92 u32 trigger, u32 enable); 93 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 94 const struct wmi_wmm_params_all_arg *arg); 95 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 96 const u8 peer_addr[ETH_ALEN], 97 enum wmi_peer_type peer_type); 98 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 99 const u8 peer_addr[ETH_ALEN]); 100 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 101 const u8 peer_addr[ETH_ALEN], 102 u32 tid_bitmap); 103 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 104 const u8 *peer_addr, 105 enum wmi_peer_param param_id, 106 u32 param_value); 107 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 108 const struct wmi_peer_assoc_complete_arg *arg); 109 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 110 enum wmi_sta_ps_mode psmode); 111 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 112 enum wmi_sta_powersave_param param_id, 113 u32 value); 114 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 115 const u8 *mac, 116 enum wmi_ap_ps_peer_param param_id, 117 u32 value); 118 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 119 const struct wmi_scan_chan_list_arg *arg); 120 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, 121 u32 prob_req_oui); 122 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 123 const void *bcn, size_t bcn_len, 124 u32 bcn_paddr, bool dtim_zero, 125 bool deliver_cab); 126 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 127 const struct wmi_wmm_params_all_arg *arg); 128 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 129 struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar, 130 u32 vdev_id, 131 enum 132 wmi_peer_stats_info_request_type 133 type, 134 u8 *addr, 135 u32 reset); 136 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 137 enum wmi_force_fw_hang_type type, 138 u32 delay_ms); 139 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 140 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, 141 struct sk_buff *skb, 142 dma_addr_t paddr); 143 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 144 u32 log_level); 145 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 146 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 147 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 148 u32 period, u32 duration, 149 u32 next_offset, 150 u32 enabled); 151 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 152 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 153 const u8 *mac); 154 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 155 const u8 *mac, u32 tid, u32 buf_size); 156 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 157 const u8 *mac, u32 tid, 158 u32 status); 159 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 160 const u8 *mac, u32 tid, u32 initiator, 161 u32 reason); 162 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 163 u32 tim_ie_offset, struct sk_buff *bcn, 164 u32 prb_caps, u32 prb_erp, 165 void *prb_ies, size_t prb_ies_len); 166 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 167 struct sk_buff *bcn); 168 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 169 const u8 *p2p_ie); 170 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 171 const u8 peer_addr[ETH_ALEN], 172 const struct wmi_sta_uapsd_auto_trig_arg *args, 173 u32 num_ac); 174 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 175 const struct wmi_sta_keepalive_arg *arg); 176 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 177 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 178 enum wmi_wow_wakeup_event event, 179 u32 enable); 180 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 181 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 182 u32 pattern_id, 183 const u8 *pattern, 184 const u8 *mask, 185 int pattern_len, 186 int pattern_offset); 187 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 188 u32 pattern_id); 189 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 190 u32 vdev_id, 191 enum wmi_tdls_state state); 192 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 193 const struct wmi_tdls_peer_update_cmd_arg *arg, 194 const struct wmi_tdls_peer_capab_arg *cap, 195 const struct wmi_channel_arg *chan); 196 struct sk_buff *(*gen_radar_found) 197 (struct ath10k *ar, 198 const struct ath10k_radar_found_info *arg); 199 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 200 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 201 u32 param); 202 void (*fw_stats_fill)(struct ath10k *ar, 203 struct ath10k_fw_stats *fw_stats, 204 char *buf); 205 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 206 u8 enable, 207 u32 detect_level, 208 u32 detect_margin); 209 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 210 enum wmi_host_platform_type type, 211 u32 fw_feature_bitmap); 212 int (*get_vdev_subtype)(struct ath10k *ar, 213 enum wmi_vdev_subtype subtype); 214 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar, 215 u32 vdev_id, 216 struct wmi_pno_scan_req *pno_scan); 217 struct sk_buff *(*gen_pdev_bss_chan_info_req) 218 (struct ath10k *ar, 219 enum wmi_bss_survey_req_type type); 220 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 221 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, 222 u32 param); 223 struct sk_buff *(*gen_bb_timing) 224 (struct ath10k *ar, 225 const struct wmi_bb_timing_cfg_arg *arg); 226 227 }; 228 229 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 230 231 static inline int 232 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 233 { 234 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 235 return -EOPNOTSUPP; 236 237 ar->wmi.ops->rx(ar, skb); 238 return 0; 239 } 240 241 static inline int 242 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 243 size_t len) 244 { 245 if (!ar->wmi.ops->map_svc) 246 return -EOPNOTSUPP; 247 248 ar->wmi.ops->map_svc(in, out, len); 249 return 0; 250 } 251 252 static inline int 253 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, 254 size_t len) 255 { 256 if (!ar->wmi.ops->map_svc_ext) 257 return -EOPNOTSUPP; 258 259 ar->wmi.ops->map_svc_ext(in, out, len); 260 return 0; 261 } 262 263 static inline int 264 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 265 struct wmi_scan_ev_arg *arg) 266 { 267 if (!ar->wmi.ops->pull_scan) 268 return -EOPNOTSUPP; 269 270 return ar->wmi.ops->pull_scan(ar, skb, arg); 271 } 272 273 static inline int 274 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, 275 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) 276 { 277 if (!ar->wmi.ops->pull_mgmt_tx_compl) 278 return -EOPNOTSUPP; 279 280 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); 281 } 282 283 static inline int 284 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb, 285 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg) 286 { 287 if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl) 288 return -EOPNOTSUPP; 289 290 return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg); 291 } 292 293 static inline int 294 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 295 struct wmi_mgmt_rx_ev_arg *arg) 296 { 297 if (!ar->wmi.ops->pull_mgmt_rx) 298 return -EOPNOTSUPP; 299 300 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 301 } 302 303 static inline int 304 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 305 struct wmi_ch_info_ev_arg *arg) 306 { 307 if (!ar->wmi.ops->pull_ch_info) 308 return -EOPNOTSUPP; 309 310 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 311 } 312 313 static inline int 314 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 315 struct wmi_vdev_start_ev_arg *arg) 316 { 317 if (!ar->wmi.ops->pull_vdev_start) 318 return -EOPNOTSUPP; 319 320 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 321 } 322 323 static inline int 324 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 325 struct wmi_peer_kick_ev_arg *arg) 326 { 327 if (!ar->wmi.ops->pull_peer_kick) 328 return -EOPNOTSUPP; 329 330 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 331 } 332 333 static inline int 334 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 335 struct wmi_swba_ev_arg *arg) 336 { 337 if (!ar->wmi.ops->pull_swba) 338 return -EOPNOTSUPP; 339 340 return ar->wmi.ops->pull_swba(ar, skb, arg); 341 } 342 343 static inline int 344 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 345 struct wmi_phyerr_hdr_arg *arg) 346 { 347 if (!ar->wmi.ops->pull_phyerr_hdr) 348 return -EOPNOTSUPP; 349 350 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 351 } 352 353 static inline int 354 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 355 int left_len, struct wmi_phyerr_ev_arg *arg) 356 { 357 if (!ar->wmi.ops->pull_phyerr) 358 return -EOPNOTSUPP; 359 360 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 361 } 362 363 static inline int 364 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 365 struct wmi_svc_rdy_ev_arg *arg) 366 { 367 if (!ar->wmi.ops->pull_svc_rdy) 368 return -EOPNOTSUPP; 369 370 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 371 } 372 373 static inline int 374 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 375 struct wmi_rdy_ev_arg *arg) 376 { 377 if (!ar->wmi.ops->pull_rdy) 378 return -EOPNOTSUPP; 379 380 return ar->wmi.ops->pull_rdy(ar, skb, arg); 381 } 382 383 static inline int 384 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, 385 struct wmi_svc_avail_ev_arg *arg) 386 { 387 if (!ar->wmi.ops->pull_svc_avail) 388 return -EOPNOTSUPP; 389 return ar->wmi.ops->pull_svc_avail(ar, skb, arg); 390 } 391 392 static inline int 393 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 394 struct ath10k_fw_stats *stats) 395 { 396 if (!ar->wmi.ops->pull_fw_stats) 397 return -EOPNOTSUPP; 398 399 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 400 } 401 402 static inline int 403 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 404 struct wmi_roam_ev_arg *arg) 405 { 406 if (!ar->wmi.ops->pull_roam_ev) 407 return -EOPNOTSUPP; 408 409 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 410 } 411 412 static inline int 413 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 414 struct wmi_wow_ev_arg *arg) 415 { 416 if (!ar->wmi.ops->pull_wow_event) 417 return -EOPNOTSUPP; 418 419 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 420 } 421 422 static inline int 423 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 424 struct wmi_echo_ev_arg *arg) 425 { 426 if (!ar->wmi.ops->pull_echo_ev) 427 return -EOPNOTSUPP; 428 429 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 430 } 431 432 static inline int 433 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, 434 struct wmi_dfs_status_ev_arg *arg) 435 { 436 if (!ar->wmi.ops->pull_dfs_status_ev) 437 return -EOPNOTSUPP; 438 439 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); 440 } 441 442 static inline enum wmi_txbf_conf 443 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 444 { 445 if (!ar->wmi.ops->get_txbf_conf_scheme) 446 return WMI_TXBF_CONF_UNSUPPORTED; 447 448 return ar->wmi.ops->get_txbf_conf_scheme(ar); 449 } 450 451 static inline int 452 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 453 dma_addr_t paddr) 454 { 455 struct sk_buff *skb; 456 int ret; 457 458 if (!ar->wmi.ops->gen_mgmt_tx_send) 459 return -EOPNOTSUPP; 460 461 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); 462 if (IS_ERR(skb)) 463 return PTR_ERR(skb); 464 465 ret = ath10k_wmi_cmd_send(ar, skb, 466 ar->wmi.cmd->mgmt_tx_send_cmdid); 467 if (ret) 468 return ret; 469 470 return 0; 471 } 472 473 static inline int 474 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 475 { 476 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 477 struct sk_buff *skb; 478 int ret; 479 480 if (!ar->wmi.ops->gen_mgmt_tx) 481 return -EOPNOTSUPP; 482 483 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 484 if (IS_ERR(skb)) 485 return PTR_ERR(skb); 486 487 ret = ath10k_wmi_cmd_send(ar, skb, 488 ar->wmi.cmd->mgmt_tx_cmdid); 489 if (ret) 490 return ret; 491 492 /* FIXME There's no ACK event for Management Tx. This probably 493 * shouldn't be called here either. 494 */ 495 info->flags |= IEEE80211_TX_STAT_ACK; 496 ieee80211_tx_status_irqsafe(ar->hw, msdu); 497 498 return 0; 499 } 500 501 static inline int 502 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 503 u16 ctl2g, u16 ctl5g, 504 enum wmi_dfs_region dfs_reg) 505 { 506 struct sk_buff *skb; 507 508 if (!ar->wmi.ops->gen_pdev_set_rd) 509 return -EOPNOTSUPP; 510 511 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 512 dfs_reg); 513 if (IS_ERR(skb)) 514 return PTR_ERR(skb); 515 516 return ath10k_wmi_cmd_send(ar, skb, 517 ar->wmi.cmd->pdev_set_regdomain_cmdid); 518 } 519 520 static inline int 521 ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN]) 522 { 523 struct sk_buff *skb; 524 525 if (!ar->wmi.ops->gen_pdev_set_base_macaddr) 526 return -EOPNOTSUPP; 527 528 skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr); 529 if (IS_ERR(skb)) 530 return PTR_ERR(skb); 531 532 return ath10k_wmi_cmd_send(ar, skb, 533 ar->wmi.cmd->pdev_set_base_macaddr_cmdid); 534 } 535 536 static inline int 537 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 538 { 539 struct sk_buff *skb; 540 541 if (!ar->wmi.ops->gen_pdev_suspend) 542 return -EOPNOTSUPP; 543 544 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 545 if (IS_ERR(skb)) 546 return PTR_ERR(skb); 547 548 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 549 } 550 551 static inline int 552 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 553 { 554 struct sk_buff *skb; 555 556 if (!ar->wmi.ops->gen_pdev_resume) 557 return -EOPNOTSUPP; 558 559 skb = ar->wmi.ops->gen_pdev_resume(ar); 560 if (IS_ERR(skb)) 561 return PTR_ERR(skb); 562 563 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 564 } 565 566 static inline int 567 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 568 { 569 struct sk_buff *skb; 570 571 if (!ar->wmi.ops->gen_pdev_set_param) 572 return -EOPNOTSUPP; 573 574 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 575 if (IS_ERR(skb)) 576 return PTR_ERR(skb); 577 578 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 579 } 580 581 static inline int 582 ath10k_wmi_cmd_init(struct ath10k *ar) 583 { 584 struct sk_buff *skb; 585 586 if (!ar->wmi.ops->gen_init) 587 return -EOPNOTSUPP; 588 589 skb = ar->wmi.ops->gen_init(ar); 590 if (IS_ERR(skb)) 591 return PTR_ERR(skb); 592 593 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 594 } 595 596 static inline int 597 ath10k_wmi_start_scan(struct ath10k *ar, 598 const struct wmi_start_scan_arg *arg) 599 { 600 struct sk_buff *skb; 601 602 if (!ar->wmi.ops->gen_start_scan) 603 return -EOPNOTSUPP; 604 605 skb = ar->wmi.ops->gen_start_scan(ar, arg); 606 if (IS_ERR(skb)) 607 return PTR_ERR(skb); 608 609 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 610 } 611 612 static inline int 613 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 614 { 615 struct sk_buff *skb; 616 617 if (!ar->wmi.ops->gen_stop_scan) 618 return -EOPNOTSUPP; 619 620 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 621 if (IS_ERR(skb)) 622 return PTR_ERR(skb); 623 624 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 625 } 626 627 static inline int 628 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 629 enum wmi_vdev_type type, 630 enum wmi_vdev_subtype subtype, 631 const u8 macaddr[ETH_ALEN]) 632 { 633 struct sk_buff *skb; 634 635 if (!ar->wmi.ops->gen_vdev_create) 636 return -EOPNOTSUPP; 637 638 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 639 if (IS_ERR(skb)) 640 return PTR_ERR(skb); 641 642 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 643 } 644 645 static inline int 646 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 647 { 648 struct sk_buff *skb; 649 650 if (!ar->wmi.ops->gen_vdev_delete) 651 return -EOPNOTSUPP; 652 653 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 654 if (IS_ERR(skb)) 655 return PTR_ERR(skb); 656 657 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 658 } 659 660 static inline int 661 ath10k_wmi_vdev_start(struct ath10k *ar, 662 const struct wmi_vdev_start_request_arg *arg) 663 { 664 struct sk_buff *skb; 665 666 if (!ar->wmi.ops->gen_vdev_start) 667 return -EOPNOTSUPP; 668 669 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 670 if (IS_ERR(skb)) 671 return PTR_ERR(skb); 672 673 return ath10k_wmi_cmd_send(ar, skb, 674 ar->wmi.cmd->vdev_start_request_cmdid); 675 } 676 677 static inline int 678 ath10k_wmi_vdev_restart(struct ath10k *ar, 679 const struct wmi_vdev_start_request_arg *arg) 680 { 681 struct sk_buff *skb; 682 683 if (!ar->wmi.ops->gen_vdev_start) 684 return -EOPNOTSUPP; 685 686 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 687 if (IS_ERR(skb)) 688 return PTR_ERR(skb); 689 690 return ath10k_wmi_cmd_send(ar, skb, 691 ar->wmi.cmd->vdev_restart_request_cmdid); 692 } 693 694 static inline int 695 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 696 { 697 struct sk_buff *skb; 698 699 if (!ar->wmi.ops->gen_vdev_stop) 700 return -EOPNOTSUPP; 701 702 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 703 if (IS_ERR(skb)) 704 return PTR_ERR(skb); 705 706 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 707 } 708 709 static inline int 710 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 711 { 712 struct sk_buff *skb; 713 714 if (!ar->wmi.ops->gen_vdev_up) 715 return -EOPNOTSUPP; 716 717 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 718 if (IS_ERR(skb)) 719 return PTR_ERR(skb); 720 721 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 722 } 723 724 static inline int 725 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 726 { 727 struct sk_buff *skb; 728 729 if (!ar->wmi.ops->gen_vdev_down) 730 return -EOPNOTSUPP; 731 732 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 733 if (IS_ERR(skb)) 734 return PTR_ERR(skb); 735 736 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 737 } 738 739 static inline int 740 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 741 u32 param_value) 742 { 743 struct sk_buff *skb; 744 745 if (!ar->wmi.ops->gen_vdev_set_param) 746 return -EOPNOTSUPP; 747 748 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 749 param_value); 750 if (IS_ERR(skb)) 751 return PTR_ERR(skb); 752 753 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 754 } 755 756 static inline int 757 ath10k_wmi_vdev_install_key(struct ath10k *ar, 758 const struct wmi_vdev_install_key_arg *arg) 759 { 760 struct sk_buff *skb; 761 762 if (!ar->wmi.ops->gen_vdev_install_key) 763 return -EOPNOTSUPP; 764 765 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 766 if (IS_ERR(skb)) 767 return PTR_ERR(skb); 768 769 return ath10k_wmi_cmd_send(ar, skb, 770 ar->wmi.cmd->vdev_install_key_cmdid); 771 } 772 773 static inline int 774 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 775 const struct wmi_vdev_spectral_conf_arg *arg) 776 { 777 struct sk_buff *skb; 778 u32 cmd_id; 779 780 if (!ar->wmi.ops->gen_vdev_spectral_conf) 781 return -EOPNOTSUPP; 782 783 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 784 if (IS_ERR(skb)) 785 return PTR_ERR(skb); 786 787 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 788 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 789 } 790 791 static inline int 792 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 793 u32 enable) 794 { 795 struct sk_buff *skb; 796 u32 cmd_id; 797 798 if (!ar->wmi.ops->gen_vdev_spectral_enable) 799 return -EOPNOTSUPP; 800 801 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 802 enable); 803 if (IS_ERR(skb)) 804 return PTR_ERR(skb); 805 806 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 807 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 808 } 809 810 static inline int 811 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 812 const u8 peer_addr[ETH_ALEN], 813 const struct wmi_sta_uapsd_auto_trig_arg *args, 814 u32 num_ac) 815 { 816 struct sk_buff *skb; 817 u32 cmd_id; 818 819 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 820 return -EOPNOTSUPP; 821 822 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 823 num_ac); 824 if (IS_ERR(skb)) 825 return PTR_ERR(skb); 826 827 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 828 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 829 } 830 831 static inline int 832 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 833 const struct wmi_wmm_params_all_arg *arg) 834 { 835 struct sk_buff *skb; 836 u32 cmd_id; 837 838 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 839 if (IS_ERR(skb)) 840 return PTR_ERR(skb); 841 842 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 843 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 844 } 845 846 static inline int 847 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 848 const u8 peer_addr[ETH_ALEN], 849 enum wmi_peer_type peer_type) 850 { 851 struct sk_buff *skb; 852 853 if (!ar->wmi.ops->gen_peer_create) 854 return -EOPNOTSUPP; 855 856 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 857 if (IS_ERR(skb)) 858 return PTR_ERR(skb); 859 860 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 861 } 862 863 static inline int 864 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 865 const u8 peer_addr[ETH_ALEN]) 866 { 867 struct sk_buff *skb; 868 869 if (!ar->wmi.ops->gen_peer_delete) 870 return -EOPNOTSUPP; 871 872 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 873 if (IS_ERR(skb)) 874 return PTR_ERR(skb); 875 876 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 877 } 878 879 static inline int 880 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 881 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 882 { 883 struct sk_buff *skb; 884 885 if (!ar->wmi.ops->gen_peer_flush) 886 return -EOPNOTSUPP; 887 888 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 889 if (IS_ERR(skb)) 890 return PTR_ERR(skb); 891 892 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 893 } 894 895 static inline int 896 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 897 enum wmi_peer_param param_id, u32 param_value) 898 { 899 struct sk_buff *skb; 900 901 if (!ar->wmi.ops->gen_peer_set_param) 902 return -EOPNOTSUPP; 903 904 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 905 param_value); 906 if (IS_ERR(skb)) 907 return PTR_ERR(skb); 908 909 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 910 } 911 912 static inline int 913 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 914 enum wmi_sta_ps_mode psmode) 915 { 916 struct sk_buff *skb; 917 918 if (!ar->wmi.ops->gen_set_psmode) 919 return -EOPNOTSUPP; 920 921 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 922 if (IS_ERR(skb)) 923 return PTR_ERR(skb); 924 925 return ath10k_wmi_cmd_send(ar, skb, 926 ar->wmi.cmd->sta_powersave_mode_cmdid); 927 } 928 929 static inline int 930 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 931 enum wmi_sta_powersave_param param_id, u32 value) 932 { 933 struct sk_buff *skb; 934 935 if (!ar->wmi.ops->gen_set_sta_ps) 936 return -EOPNOTSUPP; 937 938 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 939 if (IS_ERR(skb)) 940 return PTR_ERR(skb); 941 942 return ath10k_wmi_cmd_send(ar, skb, 943 ar->wmi.cmd->sta_powersave_param_cmdid); 944 } 945 946 static inline int 947 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 948 enum wmi_ap_ps_peer_param param_id, u32 value) 949 { 950 struct sk_buff *skb; 951 952 if (!ar->wmi.ops->gen_set_ap_ps) 953 return -EOPNOTSUPP; 954 955 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 956 if (IS_ERR(skb)) 957 return PTR_ERR(skb); 958 959 return ath10k_wmi_cmd_send(ar, skb, 960 ar->wmi.cmd->ap_ps_peer_param_cmdid); 961 } 962 963 static inline int 964 ath10k_wmi_scan_chan_list(struct ath10k *ar, 965 const struct wmi_scan_chan_list_arg *arg) 966 { 967 struct sk_buff *skb; 968 969 if (!ar->wmi.ops->gen_scan_chan_list) 970 return -EOPNOTSUPP; 971 972 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 973 if (IS_ERR(skb)) 974 return PTR_ERR(skb); 975 976 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 977 } 978 979 static inline int 980 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) 981 { 982 struct sk_buff *skb; 983 u32 prob_req_oui; 984 985 prob_req_oui = (((u32)mac_addr[0]) << 16) | 986 (((u32)mac_addr[1]) << 8) | mac_addr[2]; 987 988 if (!ar->wmi.ops->gen_scan_prob_req_oui) 989 return -EOPNOTSUPP; 990 991 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); 992 if (IS_ERR(skb)) 993 return PTR_ERR(skb); 994 995 return ath10k_wmi_cmd_send(ar, skb, 996 ar->wmi.cmd->scan_prob_req_oui_cmdid); 997 } 998 999 static inline int 1000 ath10k_wmi_peer_assoc(struct ath10k *ar, 1001 const struct wmi_peer_assoc_complete_arg *arg) 1002 { 1003 struct sk_buff *skb; 1004 1005 if (!ar->wmi.ops->gen_peer_assoc) 1006 return -EOPNOTSUPP; 1007 1008 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 1009 if (IS_ERR(skb)) 1010 return PTR_ERR(skb); 1011 1012 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 1013 } 1014 1015 static inline int 1016 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 1017 const void *bcn, size_t bcn_len, 1018 u32 bcn_paddr, bool dtim_zero, 1019 bool deliver_cab) 1020 { 1021 struct sk_buff *skb; 1022 int ret; 1023 1024 if (!ar->wmi.ops->gen_beacon_dma) 1025 return -EOPNOTSUPP; 1026 1027 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 1028 dtim_zero, deliver_cab); 1029 if (IS_ERR(skb)) 1030 return PTR_ERR(skb); 1031 1032 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 1033 ar->wmi.cmd->pdev_send_bcn_cmdid); 1034 if (ret) { 1035 dev_kfree_skb(skb); 1036 return ret; 1037 } 1038 1039 return 0; 1040 } 1041 1042 static inline int 1043 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 1044 const struct wmi_wmm_params_all_arg *arg) 1045 { 1046 struct sk_buff *skb; 1047 1048 if (!ar->wmi.ops->gen_pdev_set_wmm) 1049 return -EOPNOTSUPP; 1050 1051 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 1052 if (IS_ERR(skb)) 1053 return PTR_ERR(skb); 1054 1055 return ath10k_wmi_cmd_send(ar, skb, 1056 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 1057 } 1058 1059 static inline int 1060 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 1061 { 1062 struct sk_buff *skb; 1063 1064 if (!ar->wmi.ops->gen_request_stats) 1065 return -EOPNOTSUPP; 1066 1067 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 1068 if (IS_ERR(skb)) 1069 return PTR_ERR(skb); 1070 1071 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 1072 } 1073 1074 static inline int 1075 ath10k_wmi_request_peer_stats_info(struct ath10k *ar, 1076 u32 vdev_id, 1077 enum wmi_peer_stats_info_request_type type, 1078 u8 *addr, 1079 u32 reset) 1080 { 1081 struct sk_buff *skb; 1082 1083 if (!ar->wmi.ops->gen_request_peer_stats_info) 1084 return -EOPNOTSUPP; 1085 1086 skb = ar->wmi.ops->gen_request_peer_stats_info(ar, 1087 vdev_id, 1088 type, 1089 addr, 1090 reset); 1091 if (IS_ERR(skb)) 1092 return PTR_ERR(skb); 1093 1094 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid); 1095 } 1096 1097 static inline int 1098 ath10k_wmi_force_fw_hang(struct ath10k *ar, 1099 enum wmi_force_fw_hang_type type, u32 delay_ms) 1100 { 1101 struct sk_buff *skb; 1102 1103 if (!ar->wmi.ops->gen_force_fw_hang) 1104 return -EOPNOTSUPP; 1105 1106 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 1107 if (IS_ERR(skb)) 1108 return PTR_ERR(skb); 1109 1110 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 1111 } 1112 1113 static inline int 1114 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 1115 { 1116 struct sk_buff *skb; 1117 1118 if (!ar->wmi.ops->gen_dbglog_cfg) 1119 return -EOPNOTSUPP; 1120 1121 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 1122 if (IS_ERR(skb)) 1123 return PTR_ERR(skb); 1124 1125 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 1126 } 1127 1128 static inline int 1129 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 1130 { 1131 struct sk_buff *skb; 1132 1133 if (!ar->wmi.ops->gen_pktlog_enable) 1134 return -EOPNOTSUPP; 1135 1136 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 1137 if (IS_ERR(skb)) 1138 return PTR_ERR(skb); 1139 1140 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 1141 } 1142 1143 static inline int 1144 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 1145 { 1146 struct sk_buff *skb; 1147 1148 if (!ar->wmi.ops->gen_pktlog_disable) 1149 return -EOPNOTSUPP; 1150 1151 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1152 if (IS_ERR(skb)) 1153 return PTR_ERR(skb); 1154 1155 return ath10k_wmi_cmd_send(ar, skb, 1156 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1157 } 1158 1159 static inline int 1160 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1161 u32 next_offset, u32 enabled) 1162 { 1163 struct sk_buff *skb; 1164 1165 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1166 return -EOPNOTSUPP; 1167 1168 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1169 next_offset, enabled); 1170 if (IS_ERR(skb)) 1171 return PTR_ERR(skb); 1172 1173 return ath10k_wmi_cmd_send(ar, skb, 1174 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1175 } 1176 1177 static inline int 1178 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1179 { 1180 struct sk_buff *skb; 1181 1182 if (!ar->wmi.ops->gen_pdev_get_temperature) 1183 return -EOPNOTSUPP; 1184 1185 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1186 if (IS_ERR(skb)) 1187 return PTR_ERR(skb); 1188 1189 return ath10k_wmi_cmd_send(ar, skb, 1190 ar->wmi.cmd->pdev_get_temperature_cmdid); 1191 } 1192 1193 static inline int 1194 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1195 { 1196 struct sk_buff *skb; 1197 1198 if (!ar->wmi.ops->gen_addba_clear_resp) 1199 return -EOPNOTSUPP; 1200 1201 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1202 if (IS_ERR(skb)) 1203 return PTR_ERR(skb); 1204 1205 return ath10k_wmi_cmd_send(ar, skb, 1206 ar->wmi.cmd->addba_clear_resp_cmdid); 1207 } 1208 1209 static inline int 1210 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1211 u32 tid, u32 buf_size) 1212 { 1213 struct sk_buff *skb; 1214 1215 if (!ar->wmi.ops->gen_addba_send) 1216 return -EOPNOTSUPP; 1217 1218 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1219 if (IS_ERR(skb)) 1220 return PTR_ERR(skb); 1221 1222 return ath10k_wmi_cmd_send(ar, skb, 1223 ar->wmi.cmd->addba_send_cmdid); 1224 } 1225 1226 static inline int 1227 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1228 u32 tid, u32 status) 1229 { 1230 struct sk_buff *skb; 1231 1232 if (!ar->wmi.ops->gen_addba_set_resp) 1233 return -EOPNOTSUPP; 1234 1235 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1236 if (IS_ERR(skb)) 1237 return PTR_ERR(skb); 1238 1239 return ath10k_wmi_cmd_send(ar, skb, 1240 ar->wmi.cmd->addba_set_resp_cmdid); 1241 } 1242 1243 static inline int 1244 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1245 u32 tid, u32 initiator, u32 reason) 1246 { 1247 struct sk_buff *skb; 1248 1249 if (!ar->wmi.ops->gen_delba_send) 1250 return -EOPNOTSUPP; 1251 1252 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1253 reason); 1254 if (IS_ERR(skb)) 1255 return PTR_ERR(skb); 1256 1257 return ath10k_wmi_cmd_send(ar, skb, 1258 ar->wmi.cmd->delba_send_cmdid); 1259 } 1260 1261 static inline int 1262 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1263 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1264 void *prb_ies, size_t prb_ies_len) 1265 { 1266 struct sk_buff *skb; 1267 1268 if (!ar->wmi.ops->gen_bcn_tmpl) 1269 return -EOPNOTSUPP; 1270 1271 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1272 prb_caps, prb_erp, prb_ies, 1273 prb_ies_len); 1274 if (IS_ERR(skb)) 1275 return PTR_ERR(skb); 1276 1277 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1278 } 1279 1280 static inline int 1281 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1282 { 1283 struct sk_buff *skb; 1284 1285 if (!ar->wmi.ops->gen_prb_tmpl) 1286 return -EOPNOTSUPP; 1287 1288 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1289 if (IS_ERR(skb)) 1290 return PTR_ERR(skb); 1291 1292 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1293 } 1294 1295 static inline int 1296 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1297 { 1298 struct sk_buff *skb; 1299 1300 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1301 return -EOPNOTSUPP; 1302 1303 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1304 if (IS_ERR(skb)) 1305 return PTR_ERR(skb); 1306 1307 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1308 } 1309 1310 static inline int 1311 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1312 const struct wmi_sta_keepalive_arg *arg) 1313 { 1314 struct sk_buff *skb; 1315 u32 cmd_id; 1316 1317 if (!ar->wmi.ops->gen_sta_keepalive) 1318 return -EOPNOTSUPP; 1319 1320 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1321 if (IS_ERR(skb)) 1322 return PTR_ERR(skb); 1323 1324 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1325 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1326 } 1327 1328 static inline int 1329 ath10k_wmi_wow_enable(struct ath10k *ar) 1330 { 1331 struct sk_buff *skb; 1332 u32 cmd_id; 1333 1334 if (!ar->wmi.ops->gen_wow_enable) 1335 return -EOPNOTSUPP; 1336 1337 skb = ar->wmi.ops->gen_wow_enable(ar); 1338 if (IS_ERR(skb)) 1339 return PTR_ERR(skb); 1340 1341 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1342 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1343 } 1344 1345 static inline int 1346 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1347 enum wmi_wow_wakeup_event event, 1348 u32 enable) 1349 { 1350 struct sk_buff *skb; 1351 u32 cmd_id; 1352 1353 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1354 return -EOPNOTSUPP; 1355 1356 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1357 if (IS_ERR(skb)) 1358 return PTR_ERR(skb); 1359 1360 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1361 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1362 } 1363 1364 static inline int 1365 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1366 { 1367 struct sk_buff *skb; 1368 u32 cmd_id; 1369 1370 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1371 return -EOPNOTSUPP; 1372 1373 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1374 if (IS_ERR(skb)) 1375 return PTR_ERR(skb); 1376 1377 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1378 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1379 } 1380 1381 static inline int 1382 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1383 const u8 *pattern, const u8 *mask, 1384 int pattern_len, int pattern_offset) 1385 { 1386 struct sk_buff *skb; 1387 u32 cmd_id; 1388 1389 if (!ar->wmi.ops->gen_wow_add_pattern) 1390 return -EOPNOTSUPP; 1391 1392 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1393 pattern, mask, pattern_len, 1394 pattern_offset); 1395 if (IS_ERR(skb)) 1396 return PTR_ERR(skb); 1397 1398 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1399 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1400 } 1401 1402 static inline int 1403 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1404 { 1405 struct sk_buff *skb; 1406 u32 cmd_id; 1407 1408 if (!ar->wmi.ops->gen_wow_del_pattern) 1409 return -EOPNOTSUPP; 1410 1411 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1412 if (IS_ERR(skb)) 1413 return PTR_ERR(skb); 1414 1415 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1416 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1417 } 1418 1419 static inline int 1420 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id, 1421 struct wmi_pno_scan_req *pno_scan) 1422 { 1423 struct sk_buff *skb; 1424 u32 cmd_id; 1425 1426 if (!ar->wmi.ops->gen_wow_config_pno) 1427 return -EOPNOTSUPP; 1428 1429 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan); 1430 if (IS_ERR(skb)) 1431 return PTR_ERR(skb); 1432 1433 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid; 1434 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1435 } 1436 1437 static inline int 1438 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1439 enum wmi_tdls_state state) 1440 { 1441 struct sk_buff *skb; 1442 1443 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1444 return -EOPNOTSUPP; 1445 1446 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1447 if (IS_ERR(skb)) 1448 return PTR_ERR(skb); 1449 1450 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1451 } 1452 1453 static inline int 1454 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1455 const struct wmi_tdls_peer_update_cmd_arg *arg, 1456 const struct wmi_tdls_peer_capab_arg *cap, 1457 const struct wmi_channel_arg *chan) 1458 { 1459 struct sk_buff *skb; 1460 1461 if (!ar->wmi.ops->gen_tdls_peer_update) 1462 return -EOPNOTSUPP; 1463 1464 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1465 if (IS_ERR(skb)) 1466 return PTR_ERR(skb); 1467 1468 return ath10k_wmi_cmd_send(ar, skb, 1469 ar->wmi.cmd->tdls_peer_update_cmdid); 1470 } 1471 1472 static inline int 1473 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1474 { 1475 struct sk_buff *skb; 1476 1477 if (!ar->wmi.ops->gen_adaptive_qcs) 1478 return -EOPNOTSUPP; 1479 1480 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1481 if (IS_ERR(skb)) 1482 return PTR_ERR(skb); 1483 1484 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1485 } 1486 1487 static inline int 1488 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1489 { 1490 struct sk_buff *skb; 1491 1492 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1493 return -EOPNOTSUPP; 1494 1495 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1496 1497 if (IS_ERR(skb)) 1498 return PTR_ERR(skb); 1499 1500 return ath10k_wmi_cmd_send(ar, skb, 1501 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1502 } 1503 1504 static inline int 1505 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1506 char *buf) 1507 { 1508 if (!ar->wmi.ops->fw_stats_fill) 1509 return -EOPNOTSUPP; 1510 1511 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1512 return 0; 1513 } 1514 1515 static inline int 1516 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1517 u32 detect_level, u32 detect_margin) 1518 { 1519 struct sk_buff *skb; 1520 1521 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1522 return -EOPNOTSUPP; 1523 1524 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1525 detect_level, 1526 detect_margin); 1527 1528 if (IS_ERR(skb)) 1529 return PTR_ERR(skb); 1530 1531 return ath10k_wmi_cmd_send(ar, skb, 1532 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1533 } 1534 1535 static inline int 1536 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1537 enum wmi_host_platform_type type, 1538 u32 fw_feature_bitmap) 1539 { 1540 struct sk_buff *skb; 1541 1542 if (!ar->wmi.ops->ext_resource_config) 1543 return -EOPNOTSUPP; 1544 1545 skb = ar->wmi.ops->ext_resource_config(ar, type, 1546 fw_feature_bitmap); 1547 1548 if (IS_ERR(skb)) 1549 return PTR_ERR(skb); 1550 1551 return ath10k_wmi_cmd_send(ar, skb, 1552 ar->wmi.cmd->ext_resource_cfg_cmdid); 1553 } 1554 1555 static inline int 1556 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1557 { 1558 if (!ar->wmi.ops->get_vdev_subtype) 1559 return -EOPNOTSUPP; 1560 1561 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1562 } 1563 1564 static inline int 1565 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1566 enum wmi_bss_survey_req_type type) 1567 { 1568 struct ath10k_wmi *wmi = &ar->wmi; 1569 struct sk_buff *skb; 1570 1571 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1572 return -EOPNOTSUPP; 1573 1574 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1575 if (IS_ERR(skb)) 1576 return PTR_ERR(skb); 1577 1578 return ath10k_wmi_cmd_send(ar, skb, 1579 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1580 } 1581 1582 static inline int 1583 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1584 { 1585 struct ath10k_wmi *wmi = &ar->wmi; 1586 struct sk_buff *skb; 1587 1588 if (!wmi->ops->gen_echo) 1589 return -EOPNOTSUPP; 1590 1591 skb = wmi->ops->gen_echo(ar, value); 1592 if (IS_ERR(skb)) 1593 return PTR_ERR(skb); 1594 1595 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1596 } 1597 1598 static inline int 1599 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 1600 { 1601 struct sk_buff *skb; 1602 1603 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) 1604 return -EOPNOTSUPP; 1605 1606 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); 1607 1608 if (IS_ERR(skb)) 1609 return PTR_ERR(skb); 1610 1611 return ath10k_wmi_cmd_send(ar, skb, 1612 ar->wmi.cmd->pdev_get_tpc_table_cmdid); 1613 } 1614 1615 static inline int 1616 ath10k_wmi_report_radar_found(struct ath10k *ar, 1617 const struct ath10k_radar_found_info *arg) 1618 { 1619 struct sk_buff *skb; 1620 1621 if (!ar->wmi.ops->gen_radar_found) 1622 return -EOPNOTSUPP; 1623 1624 skb = ar->wmi.ops->gen_radar_found(ar, arg); 1625 if (IS_ERR(skb)) 1626 return PTR_ERR(skb); 1627 1628 return ath10k_wmi_cmd_send(ar, skb, 1629 ar->wmi.cmd->radar_found_cmdid); 1630 } 1631 1632 static inline int 1633 ath10k_wmi_pdev_bb_timing(struct ath10k *ar, 1634 const struct wmi_bb_timing_cfg_arg *arg) 1635 { 1636 struct sk_buff *skb; 1637 1638 if (!ar->wmi.ops->gen_bb_timing) 1639 return -EOPNOTSUPP; 1640 1641 skb = ar->wmi.ops->gen_bb_timing(ar, arg); 1642 1643 if (IS_ERR(skb)) 1644 return PTR_ERR(skb); 1645 1646 return ath10k_wmi_cmd_send(ar, skb, 1647 ar->wmi.cmd->set_bb_timing_cmdid); 1648 } 1649 #endif 1650