1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _WMI_OPS_H_ 19 #define _WMI_OPS_H_ 20 21 struct ath10k; 22 struct sk_buff; 23 24 struct wmi_ops { 25 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 27 28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 29 struct wmi_scan_ev_arg *arg); 30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_mgmt_rx_ev_arg *arg); 32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_ch_info_ev_arg *arg); 34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_vdev_start_ev_arg *arg); 36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_peer_kick_ev_arg *arg); 38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_swba_ev_arg *arg); 40 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_phyerr_hdr_arg *arg); 42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 43 int left_len, struct wmi_phyerr_ev_arg *arg); 44 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 45 struct wmi_svc_rdy_ev_arg *arg); 46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 47 struct wmi_rdy_ev_arg *arg); 48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 49 struct ath10k_fw_stats *stats); 50 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_roam_ev_arg *arg); 52 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 53 struct wmi_wow_ev_arg *arg); 54 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_echo_ev_arg *arg); 56 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 57 58 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 59 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 60 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 61 u16 rd5g, u16 ctl2g, u16 ctl5g, 62 enum wmi_dfs_region dfs_reg); 63 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 64 u32 value); 65 struct sk_buff *(*gen_init)(struct ath10k *ar); 66 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 67 const struct wmi_start_scan_arg *arg); 68 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 69 const struct wmi_stop_scan_arg *arg); 70 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 71 enum wmi_vdev_type type, 72 enum wmi_vdev_subtype subtype, 73 const u8 macaddr[ETH_ALEN]); 74 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 75 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 76 const struct wmi_vdev_start_request_arg *arg, 77 bool restart); 78 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 79 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 80 const u8 *bssid); 81 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 82 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 83 u32 param_id, u32 param_value); 84 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 85 const struct wmi_vdev_install_key_arg *arg); 86 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 87 const struct wmi_vdev_spectral_conf_arg *arg); 88 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 89 u32 trigger, u32 enable); 90 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 91 const struct wmi_wmm_params_all_arg *arg); 92 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 93 const u8 peer_addr[ETH_ALEN], 94 enum wmi_peer_type peer_type); 95 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 96 const u8 peer_addr[ETH_ALEN]); 97 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 98 const u8 peer_addr[ETH_ALEN], 99 u32 tid_bitmap); 100 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 101 const u8 *peer_addr, 102 enum wmi_peer_param param_id, 103 u32 param_value); 104 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 105 const struct wmi_peer_assoc_complete_arg *arg); 106 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 107 enum wmi_sta_ps_mode psmode); 108 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 109 enum wmi_sta_powersave_param param_id, 110 u32 value); 111 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 112 const u8 *mac, 113 enum wmi_ap_ps_peer_param param_id, 114 u32 value); 115 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 116 const struct wmi_scan_chan_list_arg *arg); 117 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 118 const void *bcn, size_t bcn_len, 119 u32 bcn_paddr, bool dtim_zero, 120 bool deliver_cab); 121 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 122 const struct wmi_wmm_params_all_arg *arg); 123 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 124 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 125 enum wmi_force_fw_hang_type type, 126 u32 delay_ms); 127 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 128 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 129 u32 log_level); 130 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 131 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 132 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 133 u32 period, u32 duration, 134 u32 next_offset, 135 u32 enabled); 136 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 137 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 138 const u8 *mac); 139 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 140 const u8 *mac, u32 tid, u32 buf_size); 141 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 142 const u8 *mac, u32 tid, 143 u32 status); 144 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 145 const u8 *mac, u32 tid, u32 initiator, 146 u32 reason); 147 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 148 u32 tim_ie_offset, struct sk_buff *bcn, 149 u32 prb_caps, u32 prb_erp, 150 void *prb_ies, size_t prb_ies_len); 151 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 152 struct sk_buff *bcn); 153 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 154 const u8 *p2p_ie); 155 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 156 const u8 peer_addr[ETH_ALEN], 157 const struct wmi_sta_uapsd_auto_trig_arg *args, 158 u32 num_ac); 159 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 160 const struct wmi_sta_keepalive_arg *arg); 161 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 162 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 163 enum wmi_wow_wakeup_event event, 164 u32 enable); 165 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 166 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 167 u32 pattern_id, 168 const u8 *pattern, 169 const u8 *mask, 170 int pattern_len, 171 int pattern_offset); 172 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 173 u32 pattern_id); 174 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 175 u32 vdev_id, 176 enum wmi_tdls_state state); 177 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 178 const struct wmi_tdls_peer_update_cmd_arg *arg, 179 const struct wmi_tdls_peer_capab_arg *cap, 180 const struct wmi_channel_arg *chan); 181 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 182 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 183 u32 param); 184 void (*fw_stats_fill)(struct ath10k *ar, 185 struct ath10k_fw_stats *fw_stats, 186 char *buf); 187 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 188 u8 enable, 189 u32 detect_level, 190 u32 detect_margin); 191 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 192 enum wmi_host_platform_type type, 193 u32 fw_feature_bitmap); 194 int (*get_vdev_subtype)(struct ath10k *ar, 195 enum wmi_vdev_subtype subtype); 196 struct sk_buff *(*gen_pdev_bss_chan_info_req) 197 (struct ath10k *ar, 198 enum wmi_bss_survey_req_type type); 199 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 200 }; 201 202 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 203 204 static inline int 205 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 206 { 207 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 208 return -EOPNOTSUPP; 209 210 ar->wmi.ops->rx(ar, skb); 211 return 0; 212 } 213 214 static inline int 215 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 216 size_t len) 217 { 218 if (!ar->wmi.ops->map_svc) 219 return -EOPNOTSUPP; 220 221 ar->wmi.ops->map_svc(in, out, len); 222 return 0; 223 } 224 225 static inline int 226 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 227 struct wmi_scan_ev_arg *arg) 228 { 229 if (!ar->wmi.ops->pull_scan) 230 return -EOPNOTSUPP; 231 232 return ar->wmi.ops->pull_scan(ar, skb, arg); 233 } 234 235 static inline int 236 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 237 struct wmi_mgmt_rx_ev_arg *arg) 238 { 239 if (!ar->wmi.ops->pull_mgmt_rx) 240 return -EOPNOTSUPP; 241 242 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 243 } 244 245 static inline int 246 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 247 struct wmi_ch_info_ev_arg *arg) 248 { 249 if (!ar->wmi.ops->pull_ch_info) 250 return -EOPNOTSUPP; 251 252 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 253 } 254 255 static inline int 256 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 257 struct wmi_vdev_start_ev_arg *arg) 258 { 259 if (!ar->wmi.ops->pull_vdev_start) 260 return -EOPNOTSUPP; 261 262 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 263 } 264 265 static inline int 266 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 267 struct wmi_peer_kick_ev_arg *arg) 268 { 269 if (!ar->wmi.ops->pull_peer_kick) 270 return -EOPNOTSUPP; 271 272 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 273 } 274 275 static inline int 276 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 277 struct wmi_swba_ev_arg *arg) 278 { 279 if (!ar->wmi.ops->pull_swba) 280 return -EOPNOTSUPP; 281 282 return ar->wmi.ops->pull_swba(ar, skb, arg); 283 } 284 285 static inline int 286 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 287 struct wmi_phyerr_hdr_arg *arg) 288 { 289 if (!ar->wmi.ops->pull_phyerr_hdr) 290 return -EOPNOTSUPP; 291 292 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 293 } 294 295 static inline int 296 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 297 int left_len, struct wmi_phyerr_ev_arg *arg) 298 { 299 if (!ar->wmi.ops->pull_phyerr) 300 return -EOPNOTSUPP; 301 302 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 303 } 304 305 static inline int 306 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 307 struct wmi_svc_rdy_ev_arg *arg) 308 { 309 if (!ar->wmi.ops->pull_svc_rdy) 310 return -EOPNOTSUPP; 311 312 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 313 } 314 315 static inline int 316 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 317 struct wmi_rdy_ev_arg *arg) 318 { 319 if (!ar->wmi.ops->pull_rdy) 320 return -EOPNOTSUPP; 321 322 return ar->wmi.ops->pull_rdy(ar, skb, arg); 323 } 324 325 static inline int 326 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 327 struct ath10k_fw_stats *stats) 328 { 329 if (!ar->wmi.ops->pull_fw_stats) 330 return -EOPNOTSUPP; 331 332 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 333 } 334 335 static inline int 336 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 337 struct wmi_roam_ev_arg *arg) 338 { 339 if (!ar->wmi.ops->pull_roam_ev) 340 return -EOPNOTSUPP; 341 342 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 343 } 344 345 static inline int 346 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 347 struct wmi_wow_ev_arg *arg) 348 { 349 if (!ar->wmi.ops->pull_wow_event) 350 return -EOPNOTSUPP; 351 352 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 353 } 354 355 static inline int 356 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 357 struct wmi_echo_ev_arg *arg) 358 { 359 if (!ar->wmi.ops->pull_echo_ev) 360 return -EOPNOTSUPP; 361 362 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 363 } 364 365 static inline enum wmi_txbf_conf 366 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 367 { 368 if (!ar->wmi.ops->get_txbf_conf_scheme) 369 return WMI_TXBF_CONF_UNSUPPORTED; 370 371 return ar->wmi.ops->get_txbf_conf_scheme(ar); 372 } 373 374 static inline int 375 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 376 { 377 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 378 struct sk_buff *skb; 379 int ret; 380 381 if (!ar->wmi.ops->gen_mgmt_tx) 382 return -EOPNOTSUPP; 383 384 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 385 if (IS_ERR(skb)) 386 return PTR_ERR(skb); 387 388 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); 389 if (ret) 390 return ret; 391 392 /* FIXME There's no ACK event for Management Tx. This probably 393 * shouldn't be called here either. */ 394 info->flags |= IEEE80211_TX_STAT_ACK; 395 ieee80211_tx_status_irqsafe(ar->hw, msdu); 396 397 return 0; 398 } 399 400 static inline int 401 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 402 u16 ctl2g, u16 ctl5g, 403 enum wmi_dfs_region dfs_reg) 404 { 405 struct sk_buff *skb; 406 407 if (!ar->wmi.ops->gen_pdev_set_rd) 408 return -EOPNOTSUPP; 409 410 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 411 dfs_reg); 412 if (IS_ERR(skb)) 413 return PTR_ERR(skb); 414 415 return ath10k_wmi_cmd_send(ar, skb, 416 ar->wmi.cmd->pdev_set_regdomain_cmdid); 417 } 418 419 static inline int 420 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 421 { 422 struct sk_buff *skb; 423 424 if (!ar->wmi.ops->gen_pdev_suspend) 425 return -EOPNOTSUPP; 426 427 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 428 if (IS_ERR(skb)) 429 return PTR_ERR(skb); 430 431 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 432 } 433 434 static inline int 435 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 436 { 437 struct sk_buff *skb; 438 439 if (!ar->wmi.ops->gen_pdev_resume) 440 return -EOPNOTSUPP; 441 442 skb = ar->wmi.ops->gen_pdev_resume(ar); 443 if (IS_ERR(skb)) 444 return PTR_ERR(skb); 445 446 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 447 } 448 449 static inline int 450 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 451 { 452 struct sk_buff *skb; 453 454 if (!ar->wmi.ops->gen_pdev_set_param) 455 return -EOPNOTSUPP; 456 457 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 458 if (IS_ERR(skb)) 459 return PTR_ERR(skb); 460 461 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 462 } 463 464 static inline int 465 ath10k_wmi_cmd_init(struct ath10k *ar) 466 { 467 struct sk_buff *skb; 468 469 if (!ar->wmi.ops->gen_init) 470 return -EOPNOTSUPP; 471 472 skb = ar->wmi.ops->gen_init(ar); 473 if (IS_ERR(skb)) 474 return PTR_ERR(skb); 475 476 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 477 } 478 479 static inline int 480 ath10k_wmi_start_scan(struct ath10k *ar, 481 const struct wmi_start_scan_arg *arg) 482 { 483 struct sk_buff *skb; 484 485 if (!ar->wmi.ops->gen_start_scan) 486 return -EOPNOTSUPP; 487 488 skb = ar->wmi.ops->gen_start_scan(ar, arg); 489 if (IS_ERR(skb)) 490 return PTR_ERR(skb); 491 492 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 493 } 494 495 static inline int 496 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 497 { 498 struct sk_buff *skb; 499 500 if (!ar->wmi.ops->gen_stop_scan) 501 return -EOPNOTSUPP; 502 503 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 504 if (IS_ERR(skb)) 505 return PTR_ERR(skb); 506 507 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 508 } 509 510 static inline int 511 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 512 enum wmi_vdev_type type, 513 enum wmi_vdev_subtype subtype, 514 const u8 macaddr[ETH_ALEN]) 515 { 516 struct sk_buff *skb; 517 518 if (!ar->wmi.ops->gen_vdev_create) 519 return -EOPNOTSUPP; 520 521 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 522 if (IS_ERR(skb)) 523 return PTR_ERR(skb); 524 525 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 526 } 527 528 static inline int 529 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 530 { 531 struct sk_buff *skb; 532 533 if (!ar->wmi.ops->gen_vdev_delete) 534 return -EOPNOTSUPP; 535 536 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 537 if (IS_ERR(skb)) 538 return PTR_ERR(skb); 539 540 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 541 } 542 543 static inline int 544 ath10k_wmi_vdev_start(struct ath10k *ar, 545 const struct wmi_vdev_start_request_arg *arg) 546 { 547 struct sk_buff *skb; 548 549 if (!ar->wmi.ops->gen_vdev_start) 550 return -EOPNOTSUPP; 551 552 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 553 if (IS_ERR(skb)) 554 return PTR_ERR(skb); 555 556 return ath10k_wmi_cmd_send(ar, skb, 557 ar->wmi.cmd->vdev_start_request_cmdid); 558 } 559 560 static inline int 561 ath10k_wmi_vdev_restart(struct ath10k *ar, 562 const struct wmi_vdev_start_request_arg *arg) 563 { 564 struct sk_buff *skb; 565 566 if (!ar->wmi.ops->gen_vdev_start) 567 return -EOPNOTSUPP; 568 569 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 570 if (IS_ERR(skb)) 571 return PTR_ERR(skb); 572 573 return ath10k_wmi_cmd_send(ar, skb, 574 ar->wmi.cmd->vdev_restart_request_cmdid); 575 } 576 577 static inline int 578 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 579 { 580 struct sk_buff *skb; 581 582 if (!ar->wmi.ops->gen_vdev_stop) 583 return -EOPNOTSUPP; 584 585 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 586 if (IS_ERR(skb)) 587 return PTR_ERR(skb); 588 589 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 590 } 591 592 static inline int 593 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 594 { 595 struct sk_buff *skb; 596 597 if (!ar->wmi.ops->gen_vdev_up) 598 return -EOPNOTSUPP; 599 600 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 601 if (IS_ERR(skb)) 602 return PTR_ERR(skb); 603 604 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 605 } 606 607 static inline int 608 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 609 { 610 struct sk_buff *skb; 611 612 if (!ar->wmi.ops->gen_vdev_down) 613 return -EOPNOTSUPP; 614 615 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 616 if (IS_ERR(skb)) 617 return PTR_ERR(skb); 618 619 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 620 } 621 622 static inline int 623 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 624 u32 param_value) 625 { 626 struct sk_buff *skb; 627 628 if (!ar->wmi.ops->gen_vdev_set_param) 629 return -EOPNOTSUPP; 630 631 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 632 param_value); 633 if (IS_ERR(skb)) 634 return PTR_ERR(skb); 635 636 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 637 } 638 639 static inline int 640 ath10k_wmi_vdev_install_key(struct ath10k *ar, 641 const struct wmi_vdev_install_key_arg *arg) 642 { 643 struct sk_buff *skb; 644 645 if (!ar->wmi.ops->gen_vdev_install_key) 646 return -EOPNOTSUPP; 647 648 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 649 if (IS_ERR(skb)) 650 return PTR_ERR(skb); 651 652 return ath10k_wmi_cmd_send(ar, skb, 653 ar->wmi.cmd->vdev_install_key_cmdid); 654 } 655 656 static inline int 657 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 658 const struct wmi_vdev_spectral_conf_arg *arg) 659 { 660 struct sk_buff *skb; 661 u32 cmd_id; 662 663 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 664 if (IS_ERR(skb)) 665 return PTR_ERR(skb); 666 667 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 668 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 669 } 670 671 static inline int 672 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 673 u32 enable) 674 { 675 struct sk_buff *skb; 676 u32 cmd_id; 677 678 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 679 enable); 680 if (IS_ERR(skb)) 681 return PTR_ERR(skb); 682 683 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 684 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 685 } 686 687 static inline int 688 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 689 const u8 peer_addr[ETH_ALEN], 690 const struct wmi_sta_uapsd_auto_trig_arg *args, 691 u32 num_ac) 692 { 693 struct sk_buff *skb; 694 u32 cmd_id; 695 696 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 697 return -EOPNOTSUPP; 698 699 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 700 num_ac); 701 if (IS_ERR(skb)) 702 return PTR_ERR(skb); 703 704 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 705 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 706 } 707 708 static inline int 709 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 710 const struct wmi_wmm_params_all_arg *arg) 711 { 712 struct sk_buff *skb; 713 u32 cmd_id; 714 715 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 716 if (IS_ERR(skb)) 717 return PTR_ERR(skb); 718 719 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 720 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 721 } 722 723 static inline int 724 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 725 const u8 peer_addr[ETH_ALEN], 726 enum wmi_peer_type peer_type) 727 { 728 struct sk_buff *skb; 729 730 if (!ar->wmi.ops->gen_peer_create) 731 return -EOPNOTSUPP; 732 733 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 734 if (IS_ERR(skb)) 735 return PTR_ERR(skb); 736 737 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 738 } 739 740 static inline int 741 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 742 const u8 peer_addr[ETH_ALEN]) 743 { 744 struct sk_buff *skb; 745 746 if (!ar->wmi.ops->gen_peer_delete) 747 return -EOPNOTSUPP; 748 749 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 750 if (IS_ERR(skb)) 751 return PTR_ERR(skb); 752 753 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 754 } 755 756 static inline int 757 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 758 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 759 { 760 struct sk_buff *skb; 761 762 if (!ar->wmi.ops->gen_peer_flush) 763 return -EOPNOTSUPP; 764 765 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 766 if (IS_ERR(skb)) 767 return PTR_ERR(skb); 768 769 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 770 } 771 772 static inline int 773 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 774 enum wmi_peer_param param_id, u32 param_value) 775 { 776 struct sk_buff *skb; 777 778 if (!ar->wmi.ops->gen_peer_set_param) 779 return -EOPNOTSUPP; 780 781 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 782 param_value); 783 if (IS_ERR(skb)) 784 return PTR_ERR(skb); 785 786 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 787 } 788 789 static inline int 790 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 791 enum wmi_sta_ps_mode psmode) 792 { 793 struct sk_buff *skb; 794 795 if (!ar->wmi.ops->gen_set_psmode) 796 return -EOPNOTSUPP; 797 798 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 799 if (IS_ERR(skb)) 800 return PTR_ERR(skb); 801 802 return ath10k_wmi_cmd_send(ar, skb, 803 ar->wmi.cmd->sta_powersave_mode_cmdid); 804 } 805 806 static inline int 807 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 808 enum wmi_sta_powersave_param param_id, u32 value) 809 { 810 struct sk_buff *skb; 811 812 if (!ar->wmi.ops->gen_set_sta_ps) 813 return -EOPNOTSUPP; 814 815 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 816 if (IS_ERR(skb)) 817 return PTR_ERR(skb); 818 819 return ath10k_wmi_cmd_send(ar, skb, 820 ar->wmi.cmd->sta_powersave_param_cmdid); 821 } 822 823 static inline int 824 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 825 enum wmi_ap_ps_peer_param param_id, u32 value) 826 { 827 struct sk_buff *skb; 828 829 if (!ar->wmi.ops->gen_set_ap_ps) 830 return -EOPNOTSUPP; 831 832 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 833 if (IS_ERR(skb)) 834 return PTR_ERR(skb); 835 836 return ath10k_wmi_cmd_send(ar, skb, 837 ar->wmi.cmd->ap_ps_peer_param_cmdid); 838 } 839 840 static inline int 841 ath10k_wmi_scan_chan_list(struct ath10k *ar, 842 const struct wmi_scan_chan_list_arg *arg) 843 { 844 struct sk_buff *skb; 845 846 if (!ar->wmi.ops->gen_scan_chan_list) 847 return -EOPNOTSUPP; 848 849 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 850 if (IS_ERR(skb)) 851 return PTR_ERR(skb); 852 853 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 854 } 855 856 static inline int 857 ath10k_wmi_peer_assoc(struct ath10k *ar, 858 const struct wmi_peer_assoc_complete_arg *arg) 859 { 860 struct sk_buff *skb; 861 862 if (!ar->wmi.ops->gen_peer_assoc) 863 return -EOPNOTSUPP; 864 865 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 866 if (IS_ERR(skb)) 867 return PTR_ERR(skb); 868 869 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 870 } 871 872 static inline int 873 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 874 const void *bcn, size_t bcn_len, 875 u32 bcn_paddr, bool dtim_zero, 876 bool deliver_cab) 877 { 878 struct sk_buff *skb; 879 int ret; 880 881 if (!ar->wmi.ops->gen_beacon_dma) 882 return -EOPNOTSUPP; 883 884 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 885 dtim_zero, deliver_cab); 886 if (IS_ERR(skb)) 887 return PTR_ERR(skb); 888 889 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 890 ar->wmi.cmd->pdev_send_bcn_cmdid); 891 if (ret) { 892 dev_kfree_skb(skb); 893 return ret; 894 } 895 896 return 0; 897 } 898 899 static inline int 900 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 901 const struct wmi_wmm_params_all_arg *arg) 902 { 903 struct sk_buff *skb; 904 905 if (!ar->wmi.ops->gen_pdev_set_wmm) 906 return -EOPNOTSUPP; 907 908 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 909 if (IS_ERR(skb)) 910 return PTR_ERR(skb); 911 912 return ath10k_wmi_cmd_send(ar, skb, 913 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 914 } 915 916 static inline int 917 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 918 { 919 struct sk_buff *skb; 920 921 if (!ar->wmi.ops->gen_request_stats) 922 return -EOPNOTSUPP; 923 924 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 925 if (IS_ERR(skb)) 926 return PTR_ERR(skb); 927 928 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 929 } 930 931 static inline int 932 ath10k_wmi_force_fw_hang(struct ath10k *ar, 933 enum wmi_force_fw_hang_type type, u32 delay_ms) 934 { 935 struct sk_buff *skb; 936 937 if (!ar->wmi.ops->gen_force_fw_hang) 938 return -EOPNOTSUPP; 939 940 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 941 if (IS_ERR(skb)) 942 return PTR_ERR(skb); 943 944 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 945 } 946 947 static inline int 948 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 949 { 950 struct sk_buff *skb; 951 952 if (!ar->wmi.ops->gen_dbglog_cfg) 953 return -EOPNOTSUPP; 954 955 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 956 if (IS_ERR(skb)) 957 return PTR_ERR(skb); 958 959 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 960 } 961 962 static inline int 963 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 964 { 965 struct sk_buff *skb; 966 967 if (!ar->wmi.ops->gen_pktlog_enable) 968 return -EOPNOTSUPP; 969 970 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 971 if (IS_ERR(skb)) 972 return PTR_ERR(skb); 973 974 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 975 } 976 977 static inline int 978 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 979 { 980 struct sk_buff *skb; 981 982 if (!ar->wmi.ops->gen_pktlog_disable) 983 return -EOPNOTSUPP; 984 985 skb = ar->wmi.ops->gen_pktlog_disable(ar); 986 if (IS_ERR(skb)) 987 return PTR_ERR(skb); 988 989 return ath10k_wmi_cmd_send(ar, skb, 990 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 991 } 992 993 static inline int 994 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 995 u32 next_offset, u32 enabled) 996 { 997 struct sk_buff *skb; 998 999 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1000 return -EOPNOTSUPP; 1001 1002 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1003 next_offset, enabled); 1004 if (IS_ERR(skb)) 1005 return PTR_ERR(skb); 1006 1007 return ath10k_wmi_cmd_send(ar, skb, 1008 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1009 } 1010 1011 static inline int 1012 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1013 { 1014 struct sk_buff *skb; 1015 1016 if (!ar->wmi.ops->gen_pdev_get_temperature) 1017 return -EOPNOTSUPP; 1018 1019 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1020 if (IS_ERR(skb)) 1021 return PTR_ERR(skb); 1022 1023 return ath10k_wmi_cmd_send(ar, skb, 1024 ar->wmi.cmd->pdev_get_temperature_cmdid); 1025 } 1026 1027 static inline int 1028 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1029 { 1030 struct sk_buff *skb; 1031 1032 if (!ar->wmi.ops->gen_addba_clear_resp) 1033 return -EOPNOTSUPP; 1034 1035 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1036 if (IS_ERR(skb)) 1037 return PTR_ERR(skb); 1038 1039 return ath10k_wmi_cmd_send(ar, skb, 1040 ar->wmi.cmd->addba_clear_resp_cmdid); 1041 } 1042 1043 static inline int 1044 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1045 u32 tid, u32 buf_size) 1046 { 1047 struct sk_buff *skb; 1048 1049 if (!ar->wmi.ops->gen_addba_send) 1050 return -EOPNOTSUPP; 1051 1052 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1053 if (IS_ERR(skb)) 1054 return PTR_ERR(skb); 1055 1056 return ath10k_wmi_cmd_send(ar, skb, 1057 ar->wmi.cmd->addba_send_cmdid); 1058 } 1059 1060 static inline int 1061 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1062 u32 tid, u32 status) 1063 { 1064 struct sk_buff *skb; 1065 1066 if (!ar->wmi.ops->gen_addba_set_resp) 1067 return -EOPNOTSUPP; 1068 1069 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1070 if (IS_ERR(skb)) 1071 return PTR_ERR(skb); 1072 1073 return ath10k_wmi_cmd_send(ar, skb, 1074 ar->wmi.cmd->addba_set_resp_cmdid); 1075 } 1076 1077 static inline int 1078 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1079 u32 tid, u32 initiator, u32 reason) 1080 { 1081 struct sk_buff *skb; 1082 1083 if (!ar->wmi.ops->gen_delba_send) 1084 return -EOPNOTSUPP; 1085 1086 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1087 reason); 1088 if (IS_ERR(skb)) 1089 return PTR_ERR(skb); 1090 1091 return ath10k_wmi_cmd_send(ar, skb, 1092 ar->wmi.cmd->delba_send_cmdid); 1093 } 1094 1095 static inline int 1096 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1097 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1098 void *prb_ies, size_t prb_ies_len) 1099 { 1100 struct sk_buff *skb; 1101 1102 if (!ar->wmi.ops->gen_bcn_tmpl) 1103 return -EOPNOTSUPP; 1104 1105 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1106 prb_caps, prb_erp, prb_ies, 1107 prb_ies_len); 1108 if (IS_ERR(skb)) 1109 return PTR_ERR(skb); 1110 1111 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1112 } 1113 1114 static inline int 1115 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1116 { 1117 struct sk_buff *skb; 1118 1119 if (!ar->wmi.ops->gen_prb_tmpl) 1120 return -EOPNOTSUPP; 1121 1122 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1123 if (IS_ERR(skb)) 1124 return PTR_ERR(skb); 1125 1126 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1127 } 1128 1129 static inline int 1130 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1131 { 1132 struct sk_buff *skb; 1133 1134 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1135 return -EOPNOTSUPP; 1136 1137 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1138 if (IS_ERR(skb)) 1139 return PTR_ERR(skb); 1140 1141 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1142 } 1143 1144 static inline int 1145 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1146 const struct wmi_sta_keepalive_arg *arg) 1147 { 1148 struct sk_buff *skb; 1149 u32 cmd_id; 1150 1151 if (!ar->wmi.ops->gen_sta_keepalive) 1152 return -EOPNOTSUPP; 1153 1154 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1155 if (IS_ERR(skb)) 1156 return PTR_ERR(skb); 1157 1158 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1159 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1160 } 1161 1162 static inline int 1163 ath10k_wmi_wow_enable(struct ath10k *ar) 1164 { 1165 struct sk_buff *skb; 1166 u32 cmd_id; 1167 1168 if (!ar->wmi.ops->gen_wow_enable) 1169 return -EOPNOTSUPP; 1170 1171 skb = ar->wmi.ops->gen_wow_enable(ar); 1172 if (IS_ERR(skb)) 1173 return PTR_ERR(skb); 1174 1175 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1176 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1177 } 1178 1179 static inline int 1180 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1181 enum wmi_wow_wakeup_event event, 1182 u32 enable) 1183 { 1184 struct sk_buff *skb; 1185 u32 cmd_id; 1186 1187 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1188 return -EOPNOTSUPP; 1189 1190 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1191 if (IS_ERR(skb)) 1192 return PTR_ERR(skb); 1193 1194 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1195 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1196 } 1197 1198 static inline int 1199 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1200 { 1201 struct sk_buff *skb; 1202 u32 cmd_id; 1203 1204 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1205 return -EOPNOTSUPP; 1206 1207 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1208 if (IS_ERR(skb)) 1209 return PTR_ERR(skb); 1210 1211 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1212 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1213 } 1214 1215 static inline int 1216 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1217 const u8 *pattern, const u8 *mask, 1218 int pattern_len, int pattern_offset) 1219 { 1220 struct sk_buff *skb; 1221 u32 cmd_id; 1222 1223 if (!ar->wmi.ops->gen_wow_add_pattern) 1224 return -EOPNOTSUPP; 1225 1226 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1227 pattern, mask, pattern_len, 1228 pattern_offset); 1229 if (IS_ERR(skb)) 1230 return PTR_ERR(skb); 1231 1232 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1233 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1234 } 1235 1236 static inline int 1237 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1238 { 1239 struct sk_buff *skb; 1240 u32 cmd_id; 1241 1242 if (!ar->wmi.ops->gen_wow_del_pattern) 1243 return -EOPNOTSUPP; 1244 1245 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1246 if (IS_ERR(skb)) 1247 return PTR_ERR(skb); 1248 1249 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1250 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1251 } 1252 1253 static inline int 1254 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1255 enum wmi_tdls_state state) 1256 { 1257 struct sk_buff *skb; 1258 1259 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1260 return -EOPNOTSUPP; 1261 1262 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1263 if (IS_ERR(skb)) 1264 return PTR_ERR(skb); 1265 1266 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1267 } 1268 1269 static inline int 1270 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1271 const struct wmi_tdls_peer_update_cmd_arg *arg, 1272 const struct wmi_tdls_peer_capab_arg *cap, 1273 const struct wmi_channel_arg *chan) 1274 { 1275 struct sk_buff *skb; 1276 1277 if (!ar->wmi.ops->gen_tdls_peer_update) 1278 return -EOPNOTSUPP; 1279 1280 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1281 if (IS_ERR(skb)) 1282 return PTR_ERR(skb); 1283 1284 return ath10k_wmi_cmd_send(ar, skb, 1285 ar->wmi.cmd->tdls_peer_update_cmdid); 1286 } 1287 1288 static inline int 1289 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1290 { 1291 struct sk_buff *skb; 1292 1293 if (!ar->wmi.ops->gen_adaptive_qcs) 1294 return -EOPNOTSUPP; 1295 1296 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1297 if (IS_ERR(skb)) 1298 return PTR_ERR(skb); 1299 1300 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1301 } 1302 1303 static inline int 1304 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1305 { 1306 struct sk_buff *skb; 1307 1308 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1309 return -EOPNOTSUPP; 1310 1311 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1312 1313 if (IS_ERR(skb)) 1314 return PTR_ERR(skb); 1315 1316 return ath10k_wmi_cmd_send(ar, skb, 1317 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1318 } 1319 1320 static inline int 1321 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1322 char *buf) 1323 { 1324 if (!ar->wmi.ops->fw_stats_fill) 1325 return -EOPNOTSUPP; 1326 1327 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1328 return 0; 1329 } 1330 1331 static inline int 1332 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1333 u32 detect_level, u32 detect_margin) 1334 { 1335 struct sk_buff *skb; 1336 1337 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1338 return -EOPNOTSUPP; 1339 1340 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1341 detect_level, 1342 detect_margin); 1343 1344 if (IS_ERR(skb)) 1345 return PTR_ERR(skb); 1346 1347 return ath10k_wmi_cmd_send(ar, skb, 1348 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1349 } 1350 1351 static inline int 1352 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1353 enum wmi_host_platform_type type, 1354 u32 fw_feature_bitmap) 1355 { 1356 struct sk_buff *skb; 1357 1358 if (!ar->wmi.ops->ext_resource_config) 1359 return -EOPNOTSUPP; 1360 1361 skb = ar->wmi.ops->ext_resource_config(ar, type, 1362 fw_feature_bitmap); 1363 1364 if (IS_ERR(skb)) 1365 return PTR_ERR(skb); 1366 1367 return ath10k_wmi_cmd_send(ar, skb, 1368 ar->wmi.cmd->ext_resource_cfg_cmdid); 1369 } 1370 1371 static inline int 1372 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1373 { 1374 if (!ar->wmi.ops->get_vdev_subtype) 1375 return -EOPNOTSUPP; 1376 1377 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1378 } 1379 1380 static inline int 1381 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1382 enum wmi_bss_survey_req_type type) 1383 { 1384 struct ath10k_wmi *wmi = &ar->wmi; 1385 struct sk_buff *skb; 1386 1387 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1388 return -EOPNOTSUPP; 1389 1390 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1391 if (IS_ERR(skb)) 1392 return PTR_ERR(skb); 1393 1394 return ath10k_wmi_cmd_send(ar, skb, 1395 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1396 } 1397 1398 static inline int 1399 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1400 { 1401 struct ath10k_wmi *wmi = &ar->wmi; 1402 struct sk_buff *skb; 1403 1404 if (!wmi->ops->gen_echo) 1405 return -EOPNOTSUPP; 1406 1407 skb = wmi->ops->gen_echo(ar, value); 1408 if (IS_ERR(skb)) 1409 return PTR_ERR(skb); 1410 1411 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1412 } 1413 1414 #endif 1415