xref: /linux/drivers/net/wireless/ath/ath10k/wmi-ops.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _WMI_OPS_H_
19 #define _WMI_OPS_H_
20 
21 struct ath10k;
22 struct sk_buff;
23 
24 struct wmi_ops {
25 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27 
28 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 			 struct wmi_scan_ev_arg *arg);
30 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 			    struct wmi_mgmt_rx_ev_arg *arg);
32 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 			    struct wmi_ch_info_ev_arg *arg);
34 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 			       struct wmi_vdev_start_ev_arg *arg);
36 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 			      struct wmi_peer_kick_ev_arg *arg);
38 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 			 struct wmi_swba_ev_arg *arg);
40 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 			       struct wmi_phyerr_hdr_arg *arg);
42 	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 			   int left_len, struct wmi_phyerr_ev_arg *arg);
44 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 			    struct wmi_svc_rdy_ev_arg *arg);
46 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 			struct wmi_rdy_ev_arg *arg);
48 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 			     struct ath10k_fw_stats *stats);
50 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 			    struct wmi_roam_ev_arg *arg);
52 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 			      struct wmi_wow_ev_arg *arg);
54 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
55 
56 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
57 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
58 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
59 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
60 					   enum wmi_dfs_region dfs_reg);
61 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
62 					      u32 value);
63 	struct sk_buff *(*gen_init)(struct ath10k *ar);
64 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
65 					  const struct wmi_start_scan_arg *arg);
66 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
67 					 const struct wmi_stop_scan_arg *arg);
68 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
69 					   enum wmi_vdev_type type,
70 					   enum wmi_vdev_subtype subtype,
71 					   const u8 macaddr[ETH_ALEN]);
72 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
73 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
74 					  const struct wmi_vdev_start_request_arg *arg,
75 					  bool restart);
76 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
77 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
78 				       const u8 *bssid);
79 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
80 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
81 					      u32 param_id, u32 param_value);
82 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
83 						const struct wmi_vdev_install_key_arg *arg);
84 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
85 						  const struct wmi_vdev_spectral_conf_arg *arg);
86 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
87 						    u32 trigger, u32 enable);
88 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
89 					     const struct wmi_wmm_params_all_arg *arg);
90 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
91 					   const u8 peer_addr[ETH_ALEN],
92 					   enum wmi_peer_type peer_type);
93 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
94 					   const u8 peer_addr[ETH_ALEN]);
95 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
96 					  const u8 peer_addr[ETH_ALEN],
97 					  u32 tid_bitmap);
98 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
99 					      const u8 *peer_addr,
100 					      enum wmi_peer_param param_id,
101 					      u32 param_value);
102 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
103 					  const struct wmi_peer_assoc_complete_arg *arg);
104 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
105 					  enum wmi_sta_ps_mode psmode);
106 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
107 					  enum wmi_sta_powersave_param param_id,
108 					  u32 value);
109 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
110 					 const u8 *mac,
111 					 enum wmi_ap_ps_peer_param param_id,
112 					 u32 value);
113 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
114 					      const struct wmi_scan_chan_list_arg *arg);
115 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
116 					  const void *bcn, size_t bcn_len,
117 					  u32 bcn_paddr, bool dtim_zero,
118 					  bool deliver_cab);
119 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
120 					    const struct wmi_wmm_params_all_arg *arg);
121 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
122 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
123 					     enum wmi_force_fw_hang_type type,
124 					     u32 delay_ms);
125 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
126 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
127 					  u32 log_level);
128 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
129 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
130 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
131 						   u32 period, u32 duration,
132 						   u32 next_offset,
133 						   u32 enabled);
134 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
135 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
136 						const u8 *mac);
137 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
138 					  const u8 *mac, u32 tid, u32 buf_size);
139 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
140 					      const u8 *mac, u32 tid,
141 					      u32 status);
142 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
143 					  const u8 *mac, u32 tid, u32 initiator,
144 					  u32 reason);
145 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
146 					u32 tim_ie_offset, struct sk_buff *bcn,
147 					u32 prb_caps, u32 prb_erp,
148 					void *prb_ies, size_t prb_ies_len);
149 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
150 					struct sk_buff *bcn);
151 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
152 					     const u8 *p2p_ie);
153 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
154 					      const u8 peer_addr[ETH_ALEN],
155 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
156 					      u32 num_ac);
157 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
158 					     const struct wmi_sta_keepalive_arg *arg);
159 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
160 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
161 						    enum wmi_wow_wakeup_event event,
162 						    u32 enable);
163 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
164 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
165 					       u32 pattern_id,
166 					       const u8 *pattern,
167 					       const u8 *mask,
168 					       int pattern_len,
169 					       int pattern_offset);
170 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
171 					       u32 pattern_id);
172 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
173 						    u32 vdev_id,
174 						    enum wmi_tdls_state state);
175 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
176 						const struct wmi_tdls_peer_update_cmd_arg *arg,
177 						const struct wmi_tdls_peer_capab_arg *cap,
178 						const struct wmi_channel_arg *chan);
179 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
180 };
181 
182 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
183 
184 static inline int
185 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
186 {
187 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
188 		return -EOPNOTSUPP;
189 
190 	ar->wmi.ops->rx(ar, skb);
191 	return 0;
192 }
193 
194 static inline int
195 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
196 		   size_t len)
197 {
198 	if (!ar->wmi.ops->map_svc)
199 		return -EOPNOTSUPP;
200 
201 	ar->wmi.ops->map_svc(in, out, len);
202 	return 0;
203 }
204 
205 static inline int
206 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
207 		     struct wmi_scan_ev_arg *arg)
208 {
209 	if (!ar->wmi.ops->pull_scan)
210 		return -EOPNOTSUPP;
211 
212 	return ar->wmi.ops->pull_scan(ar, skb, arg);
213 }
214 
215 static inline int
216 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
217 			struct wmi_mgmt_rx_ev_arg *arg)
218 {
219 	if (!ar->wmi.ops->pull_mgmt_rx)
220 		return -EOPNOTSUPP;
221 
222 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
223 }
224 
225 static inline int
226 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
227 			struct wmi_ch_info_ev_arg *arg)
228 {
229 	if (!ar->wmi.ops->pull_ch_info)
230 		return -EOPNOTSUPP;
231 
232 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
233 }
234 
235 static inline int
236 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
237 			   struct wmi_vdev_start_ev_arg *arg)
238 {
239 	if (!ar->wmi.ops->pull_vdev_start)
240 		return -EOPNOTSUPP;
241 
242 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
243 }
244 
245 static inline int
246 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
247 			  struct wmi_peer_kick_ev_arg *arg)
248 {
249 	if (!ar->wmi.ops->pull_peer_kick)
250 		return -EOPNOTSUPP;
251 
252 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
253 }
254 
255 static inline int
256 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
257 		     struct wmi_swba_ev_arg *arg)
258 {
259 	if (!ar->wmi.ops->pull_swba)
260 		return -EOPNOTSUPP;
261 
262 	return ar->wmi.ops->pull_swba(ar, skb, arg);
263 }
264 
265 static inline int
266 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
267 			   struct wmi_phyerr_hdr_arg *arg)
268 {
269 	if (!ar->wmi.ops->pull_phyerr_hdr)
270 		return -EOPNOTSUPP;
271 
272 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
273 }
274 
275 static inline int
276 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
277 		       int left_len, struct wmi_phyerr_ev_arg *arg)
278 {
279 	if (!ar->wmi.ops->pull_phyerr)
280 		return -EOPNOTSUPP;
281 
282 	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
283 }
284 
285 static inline int
286 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
287 			struct wmi_svc_rdy_ev_arg *arg)
288 {
289 	if (!ar->wmi.ops->pull_svc_rdy)
290 		return -EOPNOTSUPP;
291 
292 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
293 }
294 
295 static inline int
296 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
297 		    struct wmi_rdy_ev_arg *arg)
298 {
299 	if (!ar->wmi.ops->pull_rdy)
300 		return -EOPNOTSUPP;
301 
302 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
303 }
304 
305 static inline int
306 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
307 			 struct ath10k_fw_stats *stats)
308 {
309 	if (!ar->wmi.ops->pull_fw_stats)
310 		return -EOPNOTSUPP;
311 
312 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
313 }
314 
315 static inline int
316 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
317 			struct wmi_roam_ev_arg *arg)
318 {
319 	if (!ar->wmi.ops->pull_roam_ev)
320 		return -EOPNOTSUPP;
321 
322 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
323 }
324 
325 static inline int
326 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
327 			  struct wmi_wow_ev_arg *arg)
328 {
329 	if (!ar->wmi.ops->pull_wow_event)
330 		return -EOPNOTSUPP;
331 
332 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
333 }
334 
335 static inline enum wmi_txbf_conf
336 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
337 {
338 	if (!ar->wmi.ops->get_txbf_conf_scheme)
339 		return WMI_TXBF_CONF_UNSUPPORTED;
340 
341 	return ar->wmi.ops->get_txbf_conf_scheme(ar);
342 }
343 
344 static inline int
345 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
346 {
347 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
348 	struct sk_buff *skb;
349 	int ret;
350 
351 	if (!ar->wmi.ops->gen_mgmt_tx)
352 		return -EOPNOTSUPP;
353 
354 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
355 	if (IS_ERR(skb))
356 		return PTR_ERR(skb);
357 
358 	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
359 	if (ret)
360 		return ret;
361 
362 	/* FIXME There's no ACK event for Management Tx. This probably
363 	 * shouldn't be called here either. */
364 	info->flags |= IEEE80211_TX_STAT_ACK;
365 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
366 
367 	return 0;
368 }
369 
370 static inline int
371 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
372 			      u16 ctl2g, u16 ctl5g,
373 			      enum wmi_dfs_region dfs_reg)
374 {
375 	struct sk_buff *skb;
376 
377 	if (!ar->wmi.ops->gen_pdev_set_rd)
378 		return -EOPNOTSUPP;
379 
380 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
381 					   dfs_reg);
382 	if (IS_ERR(skb))
383 		return PTR_ERR(skb);
384 
385 	return ath10k_wmi_cmd_send(ar, skb,
386 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
387 }
388 
389 static inline int
390 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
391 {
392 	struct sk_buff *skb;
393 
394 	if (!ar->wmi.ops->gen_pdev_suspend)
395 		return -EOPNOTSUPP;
396 
397 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
398 	if (IS_ERR(skb))
399 		return PTR_ERR(skb);
400 
401 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
402 }
403 
404 static inline int
405 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
406 {
407 	struct sk_buff *skb;
408 
409 	if (!ar->wmi.ops->gen_pdev_resume)
410 		return -EOPNOTSUPP;
411 
412 	skb = ar->wmi.ops->gen_pdev_resume(ar);
413 	if (IS_ERR(skb))
414 		return PTR_ERR(skb);
415 
416 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
417 }
418 
419 static inline int
420 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
421 {
422 	struct sk_buff *skb;
423 
424 	if (!ar->wmi.ops->gen_pdev_set_param)
425 		return -EOPNOTSUPP;
426 
427 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
428 	if (IS_ERR(skb))
429 		return PTR_ERR(skb);
430 
431 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
432 }
433 
434 static inline int
435 ath10k_wmi_cmd_init(struct ath10k *ar)
436 {
437 	struct sk_buff *skb;
438 
439 	if (!ar->wmi.ops->gen_init)
440 		return -EOPNOTSUPP;
441 
442 	skb = ar->wmi.ops->gen_init(ar);
443 	if (IS_ERR(skb))
444 		return PTR_ERR(skb);
445 
446 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
447 }
448 
449 static inline int
450 ath10k_wmi_start_scan(struct ath10k *ar,
451 		      const struct wmi_start_scan_arg *arg)
452 {
453 	struct sk_buff *skb;
454 
455 	if (!ar->wmi.ops->gen_start_scan)
456 		return -EOPNOTSUPP;
457 
458 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
459 	if (IS_ERR(skb))
460 		return PTR_ERR(skb);
461 
462 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
463 }
464 
465 static inline int
466 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
467 {
468 	struct sk_buff *skb;
469 
470 	if (!ar->wmi.ops->gen_stop_scan)
471 		return -EOPNOTSUPP;
472 
473 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
474 	if (IS_ERR(skb))
475 		return PTR_ERR(skb);
476 
477 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
478 }
479 
480 static inline int
481 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
482 		       enum wmi_vdev_type type,
483 		       enum wmi_vdev_subtype subtype,
484 		       const u8 macaddr[ETH_ALEN])
485 {
486 	struct sk_buff *skb;
487 
488 	if (!ar->wmi.ops->gen_vdev_create)
489 		return -EOPNOTSUPP;
490 
491 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
492 	if (IS_ERR(skb))
493 		return PTR_ERR(skb);
494 
495 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
496 }
497 
498 static inline int
499 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
500 {
501 	struct sk_buff *skb;
502 
503 	if (!ar->wmi.ops->gen_vdev_delete)
504 		return -EOPNOTSUPP;
505 
506 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
507 	if (IS_ERR(skb))
508 		return PTR_ERR(skb);
509 
510 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
511 }
512 
513 static inline int
514 ath10k_wmi_vdev_start(struct ath10k *ar,
515 		      const struct wmi_vdev_start_request_arg *arg)
516 {
517 	struct sk_buff *skb;
518 
519 	if (!ar->wmi.ops->gen_vdev_start)
520 		return -EOPNOTSUPP;
521 
522 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
523 	if (IS_ERR(skb))
524 		return PTR_ERR(skb);
525 
526 	return ath10k_wmi_cmd_send(ar, skb,
527 				   ar->wmi.cmd->vdev_start_request_cmdid);
528 }
529 
530 static inline int
531 ath10k_wmi_vdev_restart(struct ath10k *ar,
532 			const struct wmi_vdev_start_request_arg *arg)
533 {
534 	struct sk_buff *skb;
535 
536 	if (!ar->wmi.ops->gen_vdev_start)
537 		return -EOPNOTSUPP;
538 
539 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
540 	if (IS_ERR(skb))
541 		return PTR_ERR(skb);
542 
543 	return ath10k_wmi_cmd_send(ar, skb,
544 				   ar->wmi.cmd->vdev_restart_request_cmdid);
545 }
546 
547 static inline int
548 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
549 {
550 	struct sk_buff *skb;
551 
552 	if (!ar->wmi.ops->gen_vdev_stop)
553 		return -EOPNOTSUPP;
554 
555 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
556 	if (IS_ERR(skb))
557 		return PTR_ERR(skb);
558 
559 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
560 }
561 
562 static inline int
563 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
564 {
565 	struct sk_buff *skb;
566 
567 	if (!ar->wmi.ops->gen_vdev_up)
568 		return -EOPNOTSUPP;
569 
570 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
571 	if (IS_ERR(skb))
572 		return PTR_ERR(skb);
573 
574 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
575 }
576 
577 static inline int
578 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
579 {
580 	struct sk_buff *skb;
581 
582 	if (!ar->wmi.ops->gen_vdev_down)
583 		return -EOPNOTSUPP;
584 
585 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
586 	if (IS_ERR(skb))
587 		return PTR_ERR(skb);
588 
589 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
590 }
591 
592 static inline int
593 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
594 			  u32 param_value)
595 {
596 	struct sk_buff *skb;
597 
598 	if (!ar->wmi.ops->gen_vdev_set_param)
599 		return -EOPNOTSUPP;
600 
601 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
602 					      param_value);
603 	if (IS_ERR(skb))
604 		return PTR_ERR(skb);
605 
606 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
607 }
608 
609 static inline int
610 ath10k_wmi_vdev_install_key(struct ath10k *ar,
611 			    const struct wmi_vdev_install_key_arg *arg)
612 {
613 	struct sk_buff *skb;
614 
615 	if (!ar->wmi.ops->gen_vdev_install_key)
616 		return -EOPNOTSUPP;
617 
618 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
619 	if (IS_ERR(skb))
620 		return PTR_ERR(skb);
621 
622 	return ath10k_wmi_cmd_send(ar, skb,
623 				   ar->wmi.cmd->vdev_install_key_cmdid);
624 }
625 
626 static inline int
627 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
628 			      const struct wmi_vdev_spectral_conf_arg *arg)
629 {
630 	struct sk_buff *skb;
631 	u32 cmd_id;
632 
633 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
634 	if (IS_ERR(skb))
635 		return PTR_ERR(skb);
636 
637 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
638 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
639 }
640 
641 static inline int
642 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
643 				u32 enable)
644 {
645 	struct sk_buff *skb;
646 	u32 cmd_id;
647 
648 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
649 						    enable);
650 	if (IS_ERR(skb))
651 		return PTR_ERR(skb);
652 
653 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
654 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
655 }
656 
657 static inline int
658 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
659 			  const u8 peer_addr[ETH_ALEN],
660 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
661 			  u32 num_ac)
662 {
663 	struct sk_buff *skb;
664 	u32 cmd_id;
665 
666 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
667 		return -EOPNOTSUPP;
668 
669 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
670 					      num_ac);
671 	if (IS_ERR(skb))
672 		return PTR_ERR(skb);
673 
674 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
675 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
676 }
677 
678 static inline int
679 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
680 			 const struct wmi_wmm_params_all_arg *arg)
681 {
682 	struct sk_buff *skb;
683 	u32 cmd_id;
684 
685 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
686 	if (IS_ERR(skb))
687 		return PTR_ERR(skb);
688 
689 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
690 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
691 }
692 
693 static inline int
694 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
695 		       const u8 peer_addr[ETH_ALEN],
696 		       enum wmi_peer_type peer_type)
697 {
698 	struct sk_buff *skb;
699 
700 	if (!ar->wmi.ops->gen_peer_create)
701 		return -EOPNOTSUPP;
702 
703 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
704 	if (IS_ERR(skb))
705 		return PTR_ERR(skb);
706 
707 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
708 }
709 
710 static inline int
711 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
712 		       const u8 peer_addr[ETH_ALEN])
713 {
714 	struct sk_buff *skb;
715 
716 	if (!ar->wmi.ops->gen_peer_delete)
717 		return -EOPNOTSUPP;
718 
719 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
720 	if (IS_ERR(skb))
721 		return PTR_ERR(skb);
722 
723 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
724 }
725 
726 static inline int
727 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
728 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
729 {
730 	struct sk_buff *skb;
731 
732 	if (!ar->wmi.ops->gen_peer_flush)
733 		return -EOPNOTSUPP;
734 
735 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
736 	if (IS_ERR(skb))
737 		return PTR_ERR(skb);
738 
739 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
740 }
741 
742 static inline int
743 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
744 			  enum wmi_peer_param param_id, u32 param_value)
745 {
746 	struct sk_buff *skb;
747 
748 	if (!ar->wmi.ops->gen_peer_set_param)
749 		return -EOPNOTSUPP;
750 
751 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
752 					      param_value);
753 	if (IS_ERR(skb))
754 		return PTR_ERR(skb);
755 
756 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
757 }
758 
759 static inline int
760 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
761 		      enum wmi_sta_ps_mode psmode)
762 {
763 	struct sk_buff *skb;
764 
765 	if (!ar->wmi.ops->gen_set_psmode)
766 		return -EOPNOTSUPP;
767 
768 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
769 	if (IS_ERR(skb))
770 		return PTR_ERR(skb);
771 
772 	return ath10k_wmi_cmd_send(ar, skb,
773 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
774 }
775 
776 static inline int
777 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
778 			    enum wmi_sta_powersave_param param_id, u32 value)
779 {
780 	struct sk_buff *skb;
781 
782 	if (!ar->wmi.ops->gen_set_sta_ps)
783 		return -EOPNOTSUPP;
784 
785 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
786 	if (IS_ERR(skb))
787 		return PTR_ERR(skb);
788 
789 	return ath10k_wmi_cmd_send(ar, skb,
790 				   ar->wmi.cmd->sta_powersave_param_cmdid);
791 }
792 
793 static inline int
794 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
795 			   enum wmi_ap_ps_peer_param param_id, u32 value)
796 {
797 	struct sk_buff *skb;
798 
799 	if (!ar->wmi.ops->gen_set_ap_ps)
800 		return -EOPNOTSUPP;
801 
802 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
803 	if (IS_ERR(skb))
804 		return PTR_ERR(skb);
805 
806 	return ath10k_wmi_cmd_send(ar, skb,
807 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
808 }
809 
810 static inline int
811 ath10k_wmi_scan_chan_list(struct ath10k *ar,
812 			  const struct wmi_scan_chan_list_arg *arg)
813 {
814 	struct sk_buff *skb;
815 
816 	if (!ar->wmi.ops->gen_scan_chan_list)
817 		return -EOPNOTSUPP;
818 
819 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
820 	if (IS_ERR(skb))
821 		return PTR_ERR(skb);
822 
823 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
824 }
825 
826 static inline int
827 ath10k_wmi_peer_assoc(struct ath10k *ar,
828 		      const struct wmi_peer_assoc_complete_arg *arg)
829 {
830 	struct sk_buff *skb;
831 
832 	if (!ar->wmi.ops->gen_peer_assoc)
833 		return -EOPNOTSUPP;
834 
835 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
836 	if (IS_ERR(skb))
837 		return PTR_ERR(skb);
838 
839 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
840 }
841 
842 static inline int
843 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
844 				  const void *bcn, size_t bcn_len,
845 				  u32 bcn_paddr, bool dtim_zero,
846 				  bool deliver_cab)
847 {
848 	struct sk_buff *skb;
849 	int ret;
850 
851 	if (!ar->wmi.ops->gen_beacon_dma)
852 		return -EOPNOTSUPP;
853 
854 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
855 					  dtim_zero, deliver_cab);
856 	if (IS_ERR(skb))
857 		return PTR_ERR(skb);
858 
859 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
860 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
861 	if (ret) {
862 		dev_kfree_skb(skb);
863 		return ret;
864 	}
865 
866 	return 0;
867 }
868 
869 static inline int
870 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
871 			       const struct wmi_wmm_params_all_arg *arg)
872 {
873 	struct sk_buff *skb;
874 
875 	if (!ar->wmi.ops->gen_pdev_set_wmm)
876 		return -EOPNOTSUPP;
877 
878 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
879 	if (IS_ERR(skb))
880 		return PTR_ERR(skb);
881 
882 	return ath10k_wmi_cmd_send(ar, skb,
883 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
884 }
885 
886 static inline int
887 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
888 {
889 	struct sk_buff *skb;
890 
891 	if (!ar->wmi.ops->gen_request_stats)
892 		return -EOPNOTSUPP;
893 
894 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
895 	if (IS_ERR(skb))
896 		return PTR_ERR(skb);
897 
898 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
899 }
900 
901 static inline int
902 ath10k_wmi_force_fw_hang(struct ath10k *ar,
903 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
904 {
905 	struct sk_buff *skb;
906 
907 	if (!ar->wmi.ops->gen_force_fw_hang)
908 		return -EOPNOTSUPP;
909 
910 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
911 	if (IS_ERR(skb))
912 		return PTR_ERR(skb);
913 
914 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
915 }
916 
917 static inline int
918 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
919 {
920 	struct sk_buff *skb;
921 
922 	if (!ar->wmi.ops->gen_dbglog_cfg)
923 		return -EOPNOTSUPP;
924 
925 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
926 	if (IS_ERR(skb))
927 		return PTR_ERR(skb);
928 
929 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
930 }
931 
932 static inline int
933 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
934 {
935 	struct sk_buff *skb;
936 
937 	if (!ar->wmi.ops->gen_pktlog_enable)
938 		return -EOPNOTSUPP;
939 
940 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
941 	if (IS_ERR(skb))
942 		return PTR_ERR(skb);
943 
944 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
945 }
946 
947 static inline int
948 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
949 {
950 	struct sk_buff *skb;
951 
952 	if (!ar->wmi.ops->gen_pktlog_disable)
953 		return -EOPNOTSUPP;
954 
955 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
956 	if (IS_ERR(skb))
957 		return PTR_ERR(skb);
958 
959 	return ath10k_wmi_cmd_send(ar, skb,
960 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
961 }
962 
963 static inline int
964 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
965 			       u32 next_offset, u32 enabled)
966 {
967 	struct sk_buff *skb;
968 
969 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
970 		return -EOPNOTSUPP;
971 
972 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
973 						   next_offset, enabled);
974 	if (IS_ERR(skb))
975 		return PTR_ERR(skb);
976 
977 	return ath10k_wmi_cmd_send(ar, skb,
978 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
979 }
980 
981 static inline int
982 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
983 {
984 	struct sk_buff *skb;
985 
986 	if (!ar->wmi.ops->gen_pdev_get_temperature)
987 		return -EOPNOTSUPP;
988 
989 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
990 	if (IS_ERR(skb))
991 		return PTR_ERR(skb);
992 
993 	return ath10k_wmi_cmd_send(ar, skb,
994 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
995 }
996 
997 static inline int
998 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
999 {
1000 	struct sk_buff *skb;
1001 
1002 	if (!ar->wmi.ops->gen_addba_clear_resp)
1003 		return -EOPNOTSUPP;
1004 
1005 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1006 	if (IS_ERR(skb))
1007 		return PTR_ERR(skb);
1008 
1009 	return ath10k_wmi_cmd_send(ar, skb,
1010 				   ar->wmi.cmd->addba_clear_resp_cmdid);
1011 }
1012 
1013 static inline int
1014 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1015 		      u32 tid, u32 buf_size)
1016 {
1017 	struct sk_buff *skb;
1018 
1019 	if (!ar->wmi.ops->gen_addba_send)
1020 		return -EOPNOTSUPP;
1021 
1022 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1023 	if (IS_ERR(skb))
1024 		return PTR_ERR(skb);
1025 
1026 	return ath10k_wmi_cmd_send(ar, skb,
1027 				   ar->wmi.cmd->addba_send_cmdid);
1028 }
1029 
1030 static inline int
1031 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1032 			  u32 tid, u32 status)
1033 {
1034 	struct sk_buff *skb;
1035 
1036 	if (!ar->wmi.ops->gen_addba_set_resp)
1037 		return -EOPNOTSUPP;
1038 
1039 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1040 	if (IS_ERR(skb))
1041 		return PTR_ERR(skb);
1042 
1043 	return ath10k_wmi_cmd_send(ar, skb,
1044 				   ar->wmi.cmd->addba_set_resp_cmdid);
1045 }
1046 
1047 static inline int
1048 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1049 		      u32 tid, u32 initiator, u32 reason)
1050 {
1051 	struct sk_buff *skb;
1052 
1053 	if (!ar->wmi.ops->gen_delba_send)
1054 		return -EOPNOTSUPP;
1055 
1056 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1057 					  reason);
1058 	if (IS_ERR(skb))
1059 		return PTR_ERR(skb);
1060 
1061 	return ath10k_wmi_cmd_send(ar, skb,
1062 				   ar->wmi.cmd->delba_send_cmdid);
1063 }
1064 
1065 static inline int
1066 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1067 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1068 		    void *prb_ies, size_t prb_ies_len)
1069 {
1070 	struct sk_buff *skb;
1071 
1072 	if (!ar->wmi.ops->gen_bcn_tmpl)
1073 		return -EOPNOTSUPP;
1074 
1075 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1076 					prb_caps, prb_erp, prb_ies,
1077 					prb_ies_len);
1078 	if (IS_ERR(skb))
1079 		return PTR_ERR(skb);
1080 
1081 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1082 }
1083 
1084 static inline int
1085 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1086 {
1087 	struct sk_buff *skb;
1088 
1089 	if (!ar->wmi.ops->gen_prb_tmpl)
1090 		return -EOPNOTSUPP;
1091 
1092 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1093 	if (IS_ERR(skb))
1094 		return PTR_ERR(skb);
1095 
1096 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1097 }
1098 
1099 static inline int
1100 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1101 {
1102 	struct sk_buff *skb;
1103 
1104 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1105 		return -EOPNOTSUPP;
1106 
1107 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1108 	if (IS_ERR(skb))
1109 		return PTR_ERR(skb);
1110 
1111 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1112 }
1113 
1114 static inline int
1115 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1116 			 const struct wmi_sta_keepalive_arg *arg)
1117 {
1118 	struct sk_buff *skb;
1119 	u32 cmd_id;
1120 
1121 	if (!ar->wmi.ops->gen_sta_keepalive)
1122 		return -EOPNOTSUPP;
1123 
1124 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1125 	if (IS_ERR(skb))
1126 		return PTR_ERR(skb);
1127 
1128 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1129 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1130 }
1131 
1132 static inline int
1133 ath10k_wmi_wow_enable(struct ath10k *ar)
1134 {
1135 	struct sk_buff *skb;
1136 	u32 cmd_id;
1137 
1138 	if (!ar->wmi.ops->gen_wow_enable)
1139 		return -EOPNOTSUPP;
1140 
1141 	skb = ar->wmi.ops->gen_wow_enable(ar);
1142 	if (IS_ERR(skb))
1143 		return PTR_ERR(skb);
1144 
1145 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1146 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1147 }
1148 
1149 static inline int
1150 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1151 				enum wmi_wow_wakeup_event event,
1152 				u32 enable)
1153 {
1154 	struct sk_buff *skb;
1155 	u32 cmd_id;
1156 
1157 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1158 		return -EOPNOTSUPP;
1159 
1160 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1161 	if (IS_ERR(skb))
1162 		return PTR_ERR(skb);
1163 
1164 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1165 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1166 }
1167 
1168 static inline int
1169 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1170 {
1171 	struct sk_buff *skb;
1172 	u32 cmd_id;
1173 
1174 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1175 		return -EOPNOTSUPP;
1176 
1177 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1178 	if (IS_ERR(skb))
1179 		return PTR_ERR(skb);
1180 
1181 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1182 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1183 }
1184 
1185 static inline int
1186 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1187 			   const u8 *pattern, const u8 *mask,
1188 			   int pattern_len, int pattern_offset)
1189 {
1190 	struct sk_buff *skb;
1191 	u32 cmd_id;
1192 
1193 	if (!ar->wmi.ops->gen_wow_add_pattern)
1194 		return -EOPNOTSUPP;
1195 
1196 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1197 					       pattern, mask, pattern_len,
1198 					       pattern_offset);
1199 	if (IS_ERR(skb))
1200 		return PTR_ERR(skb);
1201 
1202 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1203 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1204 }
1205 
1206 static inline int
1207 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1208 {
1209 	struct sk_buff *skb;
1210 	u32 cmd_id;
1211 
1212 	if (!ar->wmi.ops->gen_wow_del_pattern)
1213 		return -EOPNOTSUPP;
1214 
1215 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1216 	if (IS_ERR(skb))
1217 		return PTR_ERR(skb);
1218 
1219 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1220 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1221 }
1222 
1223 static inline int
1224 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1225 				enum wmi_tdls_state state)
1226 {
1227 	struct sk_buff *skb;
1228 
1229 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1230 		return -EOPNOTSUPP;
1231 
1232 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1233 	if (IS_ERR(skb))
1234 		return PTR_ERR(skb);
1235 
1236 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1237 }
1238 
1239 static inline int
1240 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1241 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1242 			    const struct wmi_tdls_peer_capab_arg *cap,
1243 			    const struct wmi_channel_arg *chan)
1244 {
1245 	struct sk_buff *skb;
1246 
1247 	if (!ar->wmi.ops->gen_tdls_peer_update)
1248 		return -EOPNOTSUPP;
1249 
1250 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1251 	if (IS_ERR(skb))
1252 		return PTR_ERR(skb);
1253 
1254 	return ath10k_wmi_cmd_send(ar, skb,
1255 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1256 }
1257 
1258 static inline int
1259 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1260 {
1261 	struct sk_buff *skb;
1262 
1263 	if (!ar->wmi.ops->gen_adaptive_qcs)
1264 		return -EOPNOTSUPP;
1265 
1266 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1267 	if (IS_ERR(skb))
1268 		return PTR_ERR(skb);
1269 
1270 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1271 }
1272 
1273 #endif
1274