1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _WMI_OPS_H_ 19 #define _WMI_OPS_H_ 20 21 struct ath10k; 22 struct sk_buff; 23 24 struct wmi_ops { 25 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 27 28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 29 struct wmi_scan_ev_arg *arg); 30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_mgmt_rx_ev_arg *arg); 32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_ch_info_ev_arg *arg); 34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_vdev_start_ev_arg *arg); 36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_peer_kick_ev_arg *arg); 38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_swba_ev_arg *arg); 40 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_phyerr_hdr_arg *arg); 42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 43 int left_len, struct wmi_phyerr_ev_arg *arg); 44 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 45 struct wmi_svc_rdy_ev_arg *arg); 46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 47 struct wmi_rdy_ev_arg *arg); 48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 49 struct ath10k_fw_stats *stats); 50 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_roam_ev_arg *arg); 52 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 53 struct wmi_wow_ev_arg *arg); 54 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 55 56 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 57 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 58 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 59 u16 rd5g, u16 ctl2g, u16 ctl5g, 60 enum wmi_dfs_region dfs_reg); 61 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 62 u32 value); 63 struct sk_buff *(*gen_init)(struct ath10k *ar); 64 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 65 const struct wmi_start_scan_arg *arg); 66 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 67 const struct wmi_stop_scan_arg *arg); 68 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 69 enum wmi_vdev_type type, 70 enum wmi_vdev_subtype subtype, 71 const u8 macaddr[ETH_ALEN]); 72 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 73 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 74 const struct wmi_vdev_start_request_arg *arg, 75 bool restart); 76 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 77 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 78 const u8 *bssid); 79 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 80 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 81 u32 param_id, u32 param_value); 82 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 83 const struct wmi_vdev_install_key_arg *arg); 84 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 85 const struct wmi_vdev_spectral_conf_arg *arg); 86 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 87 u32 trigger, u32 enable); 88 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 89 const struct wmi_wmm_params_all_arg *arg); 90 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 91 const u8 peer_addr[ETH_ALEN], 92 enum wmi_peer_type peer_type); 93 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 94 const u8 peer_addr[ETH_ALEN]); 95 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 96 const u8 peer_addr[ETH_ALEN], 97 u32 tid_bitmap); 98 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 99 const u8 *peer_addr, 100 enum wmi_peer_param param_id, 101 u32 param_value); 102 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 103 const struct wmi_peer_assoc_complete_arg *arg); 104 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 105 enum wmi_sta_ps_mode psmode); 106 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 107 enum wmi_sta_powersave_param param_id, 108 u32 value); 109 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 110 const u8 *mac, 111 enum wmi_ap_ps_peer_param param_id, 112 u32 value); 113 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 114 const struct wmi_scan_chan_list_arg *arg); 115 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 116 const void *bcn, size_t bcn_len, 117 u32 bcn_paddr, bool dtim_zero, 118 bool deliver_cab); 119 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 120 const struct wmi_wmm_params_all_arg *arg); 121 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 122 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 123 enum wmi_force_fw_hang_type type, 124 u32 delay_ms); 125 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 126 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable, 127 u32 log_level); 128 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 129 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 130 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 131 u32 period, u32 duration, 132 u32 next_offset, 133 u32 enabled); 134 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 135 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 136 const u8 *mac); 137 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 138 const u8 *mac, u32 tid, u32 buf_size); 139 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 140 const u8 *mac, u32 tid, 141 u32 status); 142 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 143 const u8 *mac, u32 tid, u32 initiator, 144 u32 reason); 145 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 146 u32 tim_ie_offset, struct sk_buff *bcn, 147 u32 prb_caps, u32 prb_erp, 148 void *prb_ies, size_t prb_ies_len); 149 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 150 struct sk_buff *bcn); 151 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 152 const u8 *p2p_ie); 153 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 154 const u8 peer_addr[ETH_ALEN], 155 const struct wmi_sta_uapsd_auto_trig_arg *args, 156 u32 num_ac); 157 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 158 const struct wmi_sta_keepalive_arg *arg); 159 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 160 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 161 enum wmi_wow_wakeup_event event, 162 u32 enable); 163 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 164 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 165 u32 pattern_id, 166 const u8 *pattern, 167 const u8 *mask, 168 int pattern_len, 169 int pattern_offset); 170 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 171 u32 pattern_id); 172 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 173 u32 vdev_id, 174 enum wmi_tdls_state state); 175 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 176 const struct wmi_tdls_peer_update_cmd_arg *arg, 177 const struct wmi_tdls_peer_capab_arg *cap, 178 const struct wmi_channel_arg *chan); 179 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 180 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 181 u32 param); 182 void (*fw_stats_fill)(struct ath10k *ar, 183 struct ath10k_fw_stats *fw_stats, 184 char *buf); 185 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 186 u8 enable, 187 u32 detect_level, 188 u32 detect_margin); 189 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 190 enum wmi_host_platform_type type, 191 u32 fw_feature_bitmap); 192 int (*get_vdev_subtype)(struct ath10k *ar, 193 enum wmi_vdev_subtype subtype); 194 struct sk_buff *(*gen_pdev_bss_chan_info_req) 195 (struct ath10k *ar, 196 enum wmi_bss_survey_req_type type); 197 }; 198 199 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 200 201 static inline int 202 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 203 { 204 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 205 return -EOPNOTSUPP; 206 207 ar->wmi.ops->rx(ar, skb); 208 return 0; 209 } 210 211 static inline int 212 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 213 size_t len) 214 { 215 if (!ar->wmi.ops->map_svc) 216 return -EOPNOTSUPP; 217 218 ar->wmi.ops->map_svc(in, out, len); 219 return 0; 220 } 221 222 static inline int 223 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 224 struct wmi_scan_ev_arg *arg) 225 { 226 if (!ar->wmi.ops->pull_scan) 227 return -EOPNOTSUPP; 228 229 return ar->wmi.ops->pull_scan(ar, skb, arg); 230 } 231 232 static inline int 233 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 234 struct wmi_mgmt_rx_ev_arg *arg) 235 { 236 if (!ar->wmi.ops->pull_mgmt_rx) 237 return -EOPNOTSUPP; 238 239 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 240 } 241 242 static inline int 243 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 244 struct wmi_ch_info_ev_arg *arg) 245 { 246 if (!ar->wmi.ops->pull_ch_info) 247 return -EOPNOTSUPP; 248 249 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 250 } 251 252 static inline int 253 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 254 struct wmi_vdev_start_ev_arg *arg) 255 { 256 if (!ar->wmi.ops->pull_vdev_start) 257 return -EOPNOTSUPP; 258 259 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 260 } 261 262 static inline int 263 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 264 struct wmi_peer_kick_ev_arg *arg) 265 { 266 if (!ar->wmi.ops->pull_peer_kick) 267 return -EOPNOTSUPP; 268 269 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 270 } 271 272 static inline int 273 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 274 struct wmi_swba_ev_arg *arg) 275 { 276 if (!ar->wmi.ops->pull_swba) 277 return -EOPNOTSUPP; 278 279 return ar->wmi.ops->pull_swba(ar, skb, arg); 280 } 281 282 static inline int 283 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 284 struct wmi_phyerr_hdr_arg *arg) 285 { 286 if (!ar->wmi.ops->pull_phyerr_hdr) 287 return -EOPNOTSUPP; 288 289 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 290 } 291 292 static inline int 293 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 294 int left_len, struct wmi_phyerr_ev_arg *arg) 295 { 296 if (!ar->wmi.ops->pull_phyerr) 297 return -EOPNOTSUPP; 298 299 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 300 } 301 302 static inline int 303 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 304 struct wmi_svc_rdy_ev_arg *arg) 305 { 306 if (!ar->wmi.ops->pull_svc_rdy) 307 return -EOPNOTSUPP; 308 309 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 310 } 311 312 static inline int 313 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 314 struct wmi_rdy_ev_arg *arg) 315 { 316 if (!ar->wmi.ops->pull_rdy) 317 return -EOPNOTSUPP; 318 319 return ar->wmi.ops->pull_rdy(ar, skb, arg); 320 } 321 322 static inline int 323 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 324 struct ath10k_fw_stats *stats) 325 { 326 if (!ar->wmi.ops->pull_fw_stats) 327 return -EOPNOTSUPP; 328 329 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 330 } 331 332 static inline int 333 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 334 struct wmi_roam_ev_arg *arg) 335 { 336 if (!ar->wmi.ops->pull_roam_ev) 337 return -EOPNOTSUPP; 338 339 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 340 } 341 342 static inline int 343 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 344 struct wmi_wow_ev_arg *arg) 345 { 346 if (!ar->wmi.ops->pull_wow_event) 347 return -EOPNOTSUPP; 348 349 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 350 } 351 352 static inline enum wmi_txbf_conf 353 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 354 { 355 if (!ar->wmi.ops->get_txbf_conf_scheme) 356 return WMI_TXBF_CONF_UNSUPPORTED; 357 358 return ar->wmi.ops->get_txbf_conf_scheme(ar); 359 } 360 361 static inline int 362 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 363 { 364 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 365 struct sk_buff *skb; 366 int ret; 367 368 if (!ar->wmi.ops->gen_mgmt_tx) 369 return -EOPNOTSUPP; 370 371 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 372 if (IS_ERR(skb)) 373 return PTR_ERR(skb); 374 375 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); 376 if (ret) 377 return ret; 378 379 /* FIXME There's no ACK event for Management Tx. This probably 380 * shouldn't be called here either. */ 381 info->flags |= IEEE80211_TX_STAT_ACK; 382 ieee80211_tx_status_irqsafe(ar->hw, msdu); 383 384 return 0; 385 } 386 387 static inline int 388 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 389 u16 ctl2g, u16 ctl5g, 390 enum wmi_dfs_region dfs_reg) 391 { 392 struct sk_buff *skb; 393 394 if (!ar->wmi.ops->gen_pdev_set_rd) 395 return -EOPNOTSUPP; 396 397 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 398 dfs_reg); 399 if (IS_ERR(skb)) 400 return PTR_ERR(skb); 401 402 return ath10k_wmi_cmd_send(ar, skb, 403 ar->wmi.cmd->pdev_set_regdomain_cmdid); 404 } 405 406 static inline int 407 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 408 { 409 struct sk_buff *skb; 410 411 if (!ar->wmi.ops->gen_pdev_suspend) 412 return -EOPNOTSUPP; 413 414 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 415 if (IS_ERR(skb)) 416 return PTR_ERR(skb); 417 418 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 419 } 420 421 static inline int 422 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 423 { 424 struct sk_buff *skb; 425 426 if (!ar->wmi.ops->gen_pdev_resume) 427 return -EOPNOTSUPP; 428 429 skb = ar->wmi.ops->gen_pdev_resume(ar); 430 if (IS_ERR(skb)) 431 return PTR_ERR(skb); 432 433 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 434 } 435 436 static inline int 437 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 438 { 439 struct sk_buff *skb; 440 441 if (!ar->wmi.ops->gen_pdev_set_param) 442 return -EOPNOTSUPP; 443 444 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 445 if (IS_ERR(skb)) 446 return PTR_ERR(skb); 447 448 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 449 } 450 451 static inline int 452 ath10k_wmi_cmd_init(struct ath10k *ar) 453 { 454 struct sk_buff *skb; 455 456 if (!ar->wmi.ops->gen_init) 457 return -EOPNOTSUPP; 458 459 skb = ar->wmi.ops->gen_init(ar); 460 if (IS_ERR(skb)) 461 return PTR_ERR(skb); 462 463 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 464 } 465 466 static inline int 467 ath10k_wmi_start_scan(struct ath10k *ar, 468 const struct wmi_start_scan_arg *arg) 469 { 470 struct sk_buff *skb; 471 472 if (!ar->wmi.ops->gen_start_scan) 473 return -EOPNOTSUPP; 474 475 skb = ar->wmi.ops->gen_start_scan(ar, arg); 476 if (IS_ERR(skb)) 477 return PTR_ERR(skb); 478 479 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 480 } 481 482 static inline int 483 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 484 { 485 struct sk_buff *skb; 486 487 if (!ar->wmi.ops->gen_stop_scan) 488 return -EOPNOTSUPP; 489 490 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 491 if (IS_ERR(skb)) 492 return PTR_ERR(skb); 493 494 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 495 } 496 497 static inline int 498 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 499 enum wmi_vdev_type type, 500 enum wmi_vdev_subtype subtype, 501 const u8 macaddr[ETH_ALEN]) 502 { 503 struct sk_buff *skb; 504 505 if (!ar->wmi.ops->gen_vdev_create) 506 return -EOPNOTSUPP; 507 508 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 509 if (IS_ERR(skb)) 510 return PTR_ERR(skb); 511 512 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 513 } 514 515 static inline int 516 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 517 { 518 struct sk_buff *skb; 519 520 if (!ar->wmi.ops->gen_vdev_delete) 521 return -EOPNOTSUPP; 522 523 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 524 if (IS_ERR(skb)) 525 return PTR_ERR(skb); 526 527 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 528 } 529 530 static inline int 531 ath10k_wmi_vdev_start(struct ath10k *ar, 532 const struct wmi_vdev_start_request_arg *arg) 533 { 534 struct sk_buff *skb; 535 536 if (!ar->wmi.ops->gen_vdev_start) 537 return -EOPNOTSUPP; 538 539 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 540 if (IS_ERR(skb)) 541 return PTR_ERR(skb); 542 543 return ath10k_wmi_cmd_send(ar, skb, 544 ar->wmi.cmd->vdev_start_request_cmdid); 545 } 546 547 static inline int 548 ath10k_wmi_vdev_restart(struct ath10k *ar, 549 const struct wmi_vdev_start_request_arg *arg) 550 { 551 struct sk_buff *skb; 552 553 if (!ar->wmi.ops->gen_vdev_start) 554 return -EOPNOTSUPP; 555 556 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 557 if (IS_ERR(skb)) 558 return PTR_ERR(skb); 559 560 return ath10k_wmi_cmd_send(ar, skb, 561 ar->wmi.cmd->vdev_restart_request_cmdid); 562 } 563 564 static inline int 565 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 566 { 567 struct sk_buff *skb; 568 569 if (!ar->wmi.ops->gen_vdev_stop) 570 return -EOPNOTSUPP; 571 572 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 573 if (IS_ERR(skb)) 574 return PTR_ERR(skb); 575 576 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 577 } 578 579 static inline int 580 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 581 { 582 struct sk_buff *skb; 583 584 if (!ar->wmi.ops->gen_vdev_up) 585 return -EOPNOTSUPP; 586 587 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 588 if (IS_ERR(skb)) 589 return PTR_ERR(skb); 590 591 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 592 } 593 594 static inline int 595 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 596 { 597 struct sk_buff *skb; 598 599 if (!ar->wmi.ops->gen_vdev_down) 600 return -EOPNOTSUPP; 601 602 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 603 if (IS_ERR(skb)) 604 return PTR_ERR(skb); 605 606 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 607 } 608 609 static inline int 610 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 611 u32 param_value) 612 { 613 struct sk_buff *skb; 614 615 if (!ar->wmi.ops->gen_vdev_set_param) 616 return -EOPNOTSUPP; 617 618 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 619 param_value); 620 if (IS_ERR(skb)) 621 return PTR_ERR(skb); 622 623 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 624 } 625 626 static inline int 627 ath10k_wmi_vdev_install_key(struct ath10k *ar, 628 const struct wmi_vdev_install_key_arg *arg) 629 { 630 struct sk_buff *skb; 631 632 if (!ar->wmi.ops->gen_vdev_install_key) 633 return -EOPNOTSUPP; 634 635 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 636 if (IS_ERR(skb)) 637 return PTR_ERR(skb); 638 639 return ath10k_wmi_cmd_send(ar, skb, 640 ar->wmi.cmd->vdev_install_key_cmdid); 641 } 642 643 static inline int 644 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 645 const struct wmi_vdev_spectral_conf_arg *arg) 646 { 647 struct sk_buff *skb; 648 u32 cmd_id; 649 650 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 651 if (IS_ERR(skb)) 652 return PTR_ERR(skb); 653 654 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 655 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 656 } 657 658 static inline int 659 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 660 u32 enable) 661 { 662 struct sk_buff *skb; 663 u32 cmd_id; 664 665 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 666 enable); 667 if (IS_ERR(skb)) 668 return PTR_ERR(skb); 669 670 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 671 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 672 } 673 674 static inline int 675 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 676 const u8 peer_addr[ETH_ALEN], 677 const struct wmi_sta_uapsd_auto_trig_arg *args, 678 u32 num_ac) 679 { 680 struct sk_buff *skb; 681 u32 cmd_id; 682 683 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 684 return -EOPNOTSUPP; 685 686 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 687 num_ac); 688 if (IS_ERR(skb)) 689 return PTR_ERR(skb); 690 691 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 692 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 693 } 694 695 static inline int 696 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 697 const struct wmi_wmm_params_all_arg *arg) 698 { 699 struct sk_buff *skb; 700 u32 cmd_id; 701 702 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 703 if (IS_ERR(skb)) 704 return PTR_ERR(skb); 705 706 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 707 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 708 } 709 710 static inline int 711 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 712 const u8 peer_addr[ETH_ALEN], 713 enum wmi_peer_type peer_type) 714 { 715 struct sk_buff *skb; 716 717 if (!ar->wmi.ops->gen_peer_create) 718 return -EOPNOTSUPP; 719 720 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 721 if (IS_ERR(skb)) 722 return PTR_ERR(skb); 723 724 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 725 } 726 727 static inline int 728 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 729 const u8 peer_addr[ETH_ALEN]) 730 { 731 struct sk_buff *skb; 732 733 if (!ar->wmi.ops->gen_peer_delete) 734 return -EOPNOTSUPP; 735 736 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 737 if (IS_ERR(skb)) 738 return PTR_ERR(skb); 739 740 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 741 } 742 743 static inline int 744 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 745 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 746 { 747 struct sk_buff *skb; 748 749 if (!ar->wmi.ops->gen_peer_flush) 750 return -EOPNOTSUPP; 751 752 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 753 if (IS_ERR(skb)) 754 return PTR_ERR(skb); 755 756 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 757 } 758 759 static inline int 760 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 761 enum wmi_peer_param param_id, u32 param_value) 762 { 763 struct sk_buff *skb; 764 765 if (!ar->wmi.ops->gen_peer_set_param) 766 return -EOPNOTSUPP; 767 768 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 769 param_value); 770 if (IS_ERR(skb)) 771 return PTR_ERR(skb); 772 773 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 774 } 775 776 static inline int 777 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 778 enum wmi_sta_ps_mode psmode) 779 { 780 struct sk_buff *skb; 781 782 if (!ar->wmi.ops->gen_set_psmode) 783 return -EOPNOTSUPP; 784 785 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 786 if (IS_ERR(skb)) 787 return PTR_ERR(skb); 788 789 return ath10k_wmi_cmd_send(ar, skb, 790 ar->wmi.cmd->sta_powersave_mode_cmdid); 791 } 792 793 static inline int 794 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 795 enum wmi_sta_powersave_param param_id, u32 value) 796 { 797 struct sk_buff *skb; 798 799 if (!ar->wmi.ops->gen_set_sta_ps) 800 return -EOPNOTSUPP; 801 802 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 803 if (IS_ERR(skb)) 804 return PTR_ERR(skb); 805 806 return ath10k_wmi_cmd_send(ar, skb, 807 ar->wmi.cmd->sta_powersave_param_cmdid); 808 } 809 810 static inline int 811 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 812 enum wmi_ap_ps_peer_param param_id, u32 value) 813 { 814 struct sk_buff *skb; 815 816 if (!ar->wmi.ops->gen_set_ap_ps) 817 return -EOPNOTSUPP; 818 819 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 820 if (IS_ERR(skb)) 821 return PTR_ERR(skb); 822 823 return ath10k_wmi_cmd_send(ar, skb, 824 ar->wmi.cmd->ap_ps_peer_param_cmdid); 825 } 826 827 static inline int 828 ath10k_wmi_scan_chan_list(struct ath10k *ar, 829 const struct wmi_scan_chan_list_arg *arg) 830 { 831 struct sk_buff *skb; 832 833 if (!ar->wmi.ops->gen_scan_chan_list) 834 return -EOPNOTSUPP; 835 836 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 837 if (IS_ERR(skb)) 838 return PTR_ERR(skb); 839 840 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 841 } 842 843 static inline int 844 ath10k_wmi_peer_assoc(struct ath10k *ar, 845 const struct wmi_peer_assoc_complete_arg *arg) 846 { 847 struct sk_buff *skb; 848 849 if (!ar->wmi.ops->gen_peer_assoc) 850 return -EOPNOTSUPP; 851 852 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 853 if (IS_ERR(skb)) 854 return PTR_ERR(skb); 855 856 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 857 } 858 859 static inline int 860 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 861 const void *bcn, size_t bcn_len, 862 u32 bcn_paddr, bool dtim_zero, 863 bool deliver_cab) 864 { 865 struct sk_buff *skb; 866 int ret; 867 868 if (!ar->wmi.ops->gen_beacon_dma) 869 return -EOPNOTSUPP; 870 871 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 872 dtim_zero, deliver_cab); 873 if (IS_ERR(skb)) 874 return PTR_ERR(skb); 875 876 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 877 ar->wmi.cmd->pdev_send_bcn_cmdid); 878 if (ret) { 879 dev_kfree_skb(skb); 880 return ret; 881 } 882 883 return 0; 884 } 885 886 static inline int 887 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 888 const struct wmi_wmm_params_all_arg *arg) 889 { 890 struct sk_buff *skb; 891 892 if (!ar->wmi.ops->gen_pdev_set_wmm) 893 return -EOPNOTSUPP; 894 895 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 896 if (IS_ERR(skb)) 897 return PTR_ERR(skb); 898 899 return ath10k_wmi_cmd_send(ar, skb, 900 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 901 } 902 903 static inline int 904 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 905 { 906 struct sk_buff *skb; 907 908 if (!ar->wmi.ops->gen_request_stats) 909 return -EOPNOTSUPP; 910 911 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 912 if (IS_ERR(skb)) 913 return PTR_ERR(skb); 914 915 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 916 } 917 918 static inline int 919 ath10k_wmi_force_fw_hang(struct ath10k *ar, 920 enum wmi_force_fw_hang_type type, u32 delay_ms) 921 { 922 struct sk_buff *skb; 923 924 if (!ar->wmi.ops->gen_force_fw_hang) 925 return -EOPNOTSUPP; 926 927 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 928 if (IS_ERR(skb)) 929 return PTR_ERR(skb); 930 931 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 932 } 933 934 static inline int 935 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level) 936 { 937 struct sk_buff *skb; 938 939 if (!ar->wmi.ops->gen_dbglog_cfg) 940 return -EOPNOTSUPP; 941 942 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 943 if (IS_ERR(skb)) 944 return PTR_ERR(skb); 945 946 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 947 } 948 949 static inline int 950 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 951 { 952 struct sk_buff *skb; 953 954 if (!ar->wmi.ops->gen_pktlog_enable) 955 return -EOPNOTSUPP; 956 957 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 958 if (IS_ERR(skb)) 959 return PTR_ERR(skb); 960 961 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 962 } 963 964 static inline int 965 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 966 { 967 struct sk_buff *skb; 968 969 if (!ar->wmi.ops->gen_pktlog_disable) 970 return -EOPNOTSUPP; 971 972 skb = ar->wmi.ops->gen_pktlog_disable(ar); 973 if (IS_ERR(skb)) 974 return PTR_ERR(skb); 975 976 return ath10k_wmi_cmd_send(ar, skb, 977 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 978 } 979 980 static inline int 981 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 982 u32 next_offset, u32 enabled) 983 { 984 struct sk_buff *skb; 985 986 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 987 return -EOPNOTSUPP; 988 989 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 990 next_offset, enabled); 991 if (IS_ERR(skb)) 992 return PTR_ERR(skb); 993 994 return ath10k_wmi_cmd_send(ar, skb, 995 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 996 } 997 998 static inline int 999 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1000 { 1001 struct sk_buff *skb; 1002 1003 if (!ar->wmi.ops->gen_pdev_get_temperature) 1004 return -EOPNOTSUPP; 1005 1006 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1007 if (IS_ERR(skb)) 1008 return PTR_ERR(skb); 1009 1010 return ath10k_wmi_cmd_send(ar, skb, 1011 ar->wmi.cmd->pdev_get_temperature_cmdid); 1012 } 1013 1014 static inline int 1015 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1016 { 1017 struct sk_buff *skb; 1018 1019 if (!ar->wmi.ops->gen_addba_clear_resp) 1020 return -EOPNOTSUPP; 1021 1022 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1023 if (IS_ERR(skb)) 1024 return PTR_ERR(skb); 1025 1026 return ath10k_wmi_cmd_send(ar, skb, 1027 ar->wmi.cmd->addba_clear_resp_cmdid); 1028 } 1029 1030 static inline int 1031 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1032 u32 tid, u32 buf_size) 1033 { 1034 struct sk_buff *skb; 1035 1036 if (!ar->wmi.ops->gen_addba_send) 1037 return -EOPNOTSUPP; 1038 1039 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1040 if (IS_ERR(skb)) 1041 return PTR_ERR(skb); 1042 1043 return ath10k_wmi_cmd_send(ar, skb, 1044 ar->wmi.cmd->addba_send_cmdid); 1045 } 1046 1047 static inline int 1048 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1049 u32 tid, u32 status) 1050 { 1051 struct sk_buff *skb; 1052 1053 if (!ar->wmi.ops->gen_addba_set_resp) 1054 return -EOPNOTSUPP; 1055 1056 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1057 if (IS_ERR(skb)) 1058 return PTR_ERR(skb); 1059 1060 return ath10k_wmi_cmd_send(ar, skb, 1061 ar->wmi.cmd->addba_set_resp_cmdid); 1062 } 1063 1064 static inline int 1065 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1066 u32 tid, u32 initiator, u32 reason) 1067 { 1068 struct sk_buff *skb; 1069 1070 if (!ar->wmi.ops->gen_delba_send) 1071 return -EOPNOTSUPP; 1072 1073 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1074 reason); 1075 if (IS_ERR(skb)) 1076 return PTR_ERR(skb); 1077 1078 return ath10k_wmi_cmd_send(ar, skb, 1079 ar->wmi.cmd->delba_send_cmdid); 1080 } 1081 1082 static inline int 1083 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1084 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1085 void *prb_ies, size_t prb_ies_len) 1086 { 1087 struct sk_buff *skb; 1088 1089 if (!ar->wmi.ops->gen_bcn_tmpl) 1090 return -EOPNOTSUPP; 1091 1092 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1093 prb_caps, prb_erp, prb_ies, 1094 prb_ies_len); 1095 if (IS_ERR(skb)) 1096 return PTR_ERR(skb); 1097 1098 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1099 } 1100 1101 static inline int 1102 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1103 { 1104 struct sk_buff *skb; 1105 1106 if (!ar->wmi.ops->gen_prb_tmpl) 1107 return -EOPNOTSUPP; 1108 1109 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1110 if (IS_ERR(skb)) 1111 return PTR_ERR(skb); 1112 1113 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1114 } 1115 1116 static inline int 1117 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1118 { 1119 struct sk_buff *skb; 1120 1121 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1122 return -EOPNOTSUPP; 1123 1124 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1125 if (IS_ERR(skb)) 1126 return PTR_ERR(skb); 1127 1128 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1129 } 1130 1131 static inline int 1132 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1133 const struct wmi_sta_keepalive_arg *arg) 1134 { 1135 struct sk_buff *skb; 1136 u32 cmd_id; 1137 1138 if (!ar->wmi.ops->gen_sta_keepalive) 1139 return -EOPNOTSUPP; 1140 1141 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1142 if (IS_ERR(skb)) 1143 return PTR_ERR(skb); 1144 1145 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1146 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1147 } 1148 1149 static inline int 1150 ath10k_wmi_wow_enable(struct ath10k *ar) 1151 { 1152 struct sk_buff *skb; 1153 u32 cmd_id; 1154 1155 if (!ar->wmi.ops->gen_wow_enable) 1156 return -EOPNOTSUPP; 1157 1158 skb = ar->wmi.ops->gen_wow_enable(ar); 1159 if (IS_ERR(skb)) 1160 return PTR_ERR(skb); 1161 1162 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1163 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1164 } 1165 1166 static inline int 1167 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1168 enum wmi_wow_wakeup_event event, 1169 u32 enable) 1170 { 1171 struct sk_buff *skb; 1172 u32 cmd_id; 1173 1174 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1175 return -EOPNOTSUPP; 1176 1177 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1178 if (IS_ERR(skb)) 1179 return PTR_ERR(skb); 1180 1181 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1182 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1183 } 1184 1185 static inline int 1186 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1187 { 1188 struct sk_buff *skb; 1189 u32 cmd_id; 1190 1191 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1192 return -EOPNOTSUPP; 1193 1194 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1195 if (IS_ERR(skb)) 1196 return PTR_ERR(skb); 1197 1198 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1199 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1200 } 1201 1202 static inline int 1203 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1204 const u8 *pattern, const u8 *mask, 1205 int pattern_len, int pattern_offset) 1206 { 1207 struct sk_buff *skb; 1208 u32 cmd_id; 1209 1210 if (!ar->wmi.ops->gen_wow_add_pattern) 1211 return -EOPNOTSUPP; 1212 1213 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1214 pattern, mask, pattern_len, 1215 pattern_offset); 1216 if (IS_ERR(skb)) 1217 return PTR_ERR(skb); 1218 1219 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1220 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1221 } 1222 1223 static inline int 1224 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1225 { 1226 struct sk_buff *skb; 1227 u32 cmd_id; 1228 1229 if (!ar->wmi.ops->gen_wow_del_pattern) 1230 return -EOPNOTSUPP; 1231 1232 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1233 if (IS_ERR(skb)) 1234 return PTR_ERR(skb); 1235 1236 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1237 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1238 } 1239 1240 static inline int 1241 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1242 enum wmi_tdls_state state) 1243 { 1244 struct sk_buff *skb; 1245 1246 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1247 return -EOPNOTSUPP; 1248 1249 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1250 if (IS_ERR(skb)) 1251 return PTR_ERR(skb); 1252 1253 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1254 } 1255 1256 static inline int 1257 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1258 const struct wmi_tdls_peer_update_cmd_arg *arg, 1259 const struct wmi_tdls_peer_capab_arg *cap, 1260 const struct wmi_channel_arg *chan) 1261 { 1262 struct sk_buff *skb; 1263 1264 if (!ar->wmi.ops->gen_tdls_peer_update) 1265 return -EOPNOTSUPP; 1266 1267 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1268 if (IS_ERR(skb)) 1269 return PTR_ERR(skb); 1270 1271 return ath10k_wmi_cmd_send(ar, skb, 1272 ar->wmi.cmd->tdls_peer_update_cmdid); 1273 } 1274 1275 static inline int 1276 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1277 { 1278 struct sk_buff *skb; 1279 1280 if (!ar->wmi.ops->gen_adaptive_qcs) 1281 return -EOPNOTSUPP; 1282 1283 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1284 if (IS_ERR(skb)) 1285 return PTR_ERR(skb); 1286 1287 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1288 } 1289 1290 static inline int 1291 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1292 { 1293 struct sk_buff *skb; 1294 1295 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1296 return -EOPNOTSUPP; 1297 1298 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1299 1300 if (IS_ERR(skb)) 1301 return PTR_ERR(skb); 1302 1303 return ath10k_wmi_cmd_send(ar, skb, 1304 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1305 } 1306 1307 static inline int 1308 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1309 char *buf) 1310 { 1311 if (!ar->wmi.ops->fw_stats_fill) 1312 return -EOPNOTSUPP; 1313 1314 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1315 return 0; 1316 } 1317 1318 static inline int 1319 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1320 u32 detect_level, u32 detect_margin) 1321 { 1322 struct sk_buff *skb; 1323 1324 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1325 return -EOPNOTSUPP; 1326 1327 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1328 detect_level, 1329 detect_margin); 1330 1331 if (IS_ERR(skb)) 1332 return PTR_ERR(skb); 1333 1334 return ath10k_wmi_cmd_send(ar, skb, 1335 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1336 } 1337 1338 static inline int 1339 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1340 enum wmi_host_platform_type type, 1341 u32 fw_feature_bitmap) 1342 { 1343 struct sk_buff *skb; 1344 1345 if (!ar->wmi.ops->ext_resource_config) 1346 return -EOPNOTSUPP; 1347 1348 skb = ar->wmi.ops->ext_resource_config(ar, type, 1349 fw_feature_bitmap); 1350 1351 if (IS_ERR(skb)) 1352 return PTR_ERR(skb); 1353 1354 return ath10k_wmi_cmd_send(ar, skb, 1355 ar->wmi.cmd->ext_resource_cfg_cmdid); 1356 } 1357 1358 static inline int 1359 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1360 { 1361 if (!ar->wmi.ops->get_vdev_subtype) 1362 return -EOPNOTSUPP; 1363 1364 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1365 } 1366 1367 static inline int 1368 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1369 enum wmi_bss_survey_req_type type) 1370 { 1371 struct ath10k_wmi *wmi = &ar->wmi; 1372 struct sk_buff *skb; 1373 1374 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1375 return -EOPNOTSUPP; 1376 1377 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1378 if (IS_ERR(skb)) 1379 return PTR_ERR(skb); 1380 1381 return ath10k_wmi_cmd_send(ar, skb, 1382 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1383 } 1384 1385 #endif 1386