xref: /linux/drivers/net/wireless/ath/ath10k/wmi-ops.h (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _WMI_OPS_H_
19 #define _WMI_OPS_H_
20 
21 struct ath10k;
22 struct sk_buff;
23 
24 struct wmi_ops {
25 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27 
28 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 			 struct wmi_scan_ev_arg *arg);
30 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 			    struct wmi_mgmt_rx_ev_arg *arg);
32 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 			    struct wmi_ch_info_ev_arg *arg);
34 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 			       struct wmi_vdev_start_ev_arg *arg);
36 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 			      struct wmi_peer_kick_ev_arg *arg);
38 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 			 struct wmi_swba_ev_arg *arg);
40 	int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 			   struct wmi_phyerr_ev_arg *arg);
42 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 			    struct wmi_svc_rdy_ev_arg *arg);
44 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 			struct wmi_rdy_ev_arg *arg);
46 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 			     struct ath10k_fw_stats *stats);
48 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
49 			    struct wmi_roam_ev_arg *arg);
50 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
51 			      struct wmi_wow_ev_arg *arg);
52 
53 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
54 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
55 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
56 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
57 					   enum wmi_dfs_region dfs_reg);
58 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
59 					      u32 value);
60 	struct sk_buff *(*gen_init)(struct ath10k *ar);
61 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
62 					  const struct wmi_start_scan_arg *arg);
63 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
64 					 const struct wmi_stop_scan_arg *arg);
65 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
66 					   enum wmi_vdev_type type,
67 					   enum wmi_vdev_subtype subtype,
68 					   const u8 macaddr[ETH_ALEN]);
69 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
70 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
71 					  const struct wmi_vdev_start_request_arg *arg,
72 					  bool restart);
73 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
74 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
75 				       const u8 *bssid);
76 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
77 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
78 					      u32 param_id, u32 param_value);
79 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
80 						const struct wmi_vdev_install_key_arg *arg);
81 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
82 						  const struct wmi_vdev_spectral_conf_arg *arg);
83 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
84 						    u32 trigger, u32 enable);
85 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
86 					     const struct wmi_wmm_params_all_arg *arg);
87 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
88 					   const u8 peer_addr[ETH_ALEN],
89 					   enum wmi_peer_type peer_type);
90 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
91 					   const u8 peer_addr[ETH_ALEN]);
92 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
93 					  const u8 peer_addr[ETH_ALEN],
94 					  u32 tid_bitmap);
95 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
96 					      const u8 *peer_addr,
97 					      enum wmi_peer_param param_id,
98 					      u32 param_value);
99 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
100 					  const struct wmi_peer_assoc_complete_arg *arg);
101 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
102 					  enum wmi_sta_ps_mode psmode);
103 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
104 					  enum wmi_sta_powersave_param param_id,
105 					  u32 value);
106 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
107 					 const u8 *mac,
108 					 enum wmi_ap_ps_peer_param param_id,
109 					 u32 value);
110 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
111 					      const struct wmi_scan_chan_list_arg *arg);
112 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
113 					  const void *bcn, size_t bcn_len,
114 					  u32 bcn_paddr, bool dtim_zero,
115 					  bool deliver_cab);
116 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
117 					    const struct wmi_wmm_params_all_arg *arg);
118 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
119 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
120 					     enum wmi_force_fw_hang_type type,
121 					     u32 delay_ms);
122 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
123 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
124 					  u32 log_level);
125 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
126 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
127 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
128 						   u32 period, u32 duration,
129 						   u32 next_offset,
130 						   u32 enabled);
131 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
132 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
133 						const u8 *mac);
134 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
135 					  const u8 *mac, u32 tid, u32 buf_size);
136 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
137 					      const u8 *mac, u32 tid,
138 					      u32 status);
139 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
140 					  const u8 *mac, u32 tid, u32 initiator,
141 					  u32 reason);
142 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
143 					u32 tim_ie_offset, struct sk_buff *bcn,
144 					u32 prb_caps, u32 prb_erp,
145 					void *prb_ies, size_t prb_ies_len);
146 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
147 					struct sk_buff *bcn);
148 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
149 					     const u8 *p2p_ie);
150 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
151 					      const u8 peer_addr[ETH_ALEN],
152 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
153 					      u32 num_ac);
154 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
155 					     const struct wmi_sta_keepalive_arg *arg);
156 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
157 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
158 						    enum wmi_wow_wakeup_event event,
159 						    u32 enable);
160 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
161 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
162 					       u32 pattern_id,
163 					       const u8 *pattern,
164 					       const u8 *mask,
165 					       int pattern_len,
166 					       int pattern_offset);
167 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
168 					       u32 pattern_id);
169 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
170 						    u32 vdev_id,
171 						    enum wmi_tdls_state state);
172 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
173 						const struct wmi_tdls_peer_update_cmd_arg *arg,
174 						const struct wmi_tdls_peer_capab_arg *cap,
175 						const struct wmi_channel_arg *chan);
176 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
177 };
178 
179 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
180 
181 static inline int
182 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
183 {
184 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
185 		return -EOPNOTSUPP;
186 
187 	ar->wmi.ops->rx(ar, skb);
188 	return 0;
189 }
190 
191 static inline int
192 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
193 		   size_t len)
194 {
195 	if (!ar->wmi.ops->map_svc)
196 		return -EOPNOTSUPP;
197 
198 	ar->wmi.ops->map_svc(in, out, len);
199 	return 0;
200 }
201 
202 static inline int
203 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
204 		     struct wmi_scan_ev_arg *arg)
205 {
206 	if (!ar->wmi.ops->pull_scan)
207 		return -EOPNOTSUPP;
208 
209 	return ar->wmi.ops->pull_scan(ar, skb, arg);
210 }
211 
212 static inline int
213 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
214 			struct wmi_mgmt_rx_ev_arg *arg)
215 {
216 	if (!ar->wmi.ops->pull_mgmt_rx)
217 		return -EOPNOTSUPP;
218 
219 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
220 }
221 
222 static inline int
223 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
224 			struct wmi_ch_info_ev_arg *arg)
225 {
226 	if (!ar->wmi.ops->pull_ch_info)
227 		return -EOPNOTSUPP;
228 
229 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
230 }
231 
232 static inline int
233 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
234 			   struct wmi_vdev_start_ev_arg *arg)
235 {
236 	if (!ar->wmi.ops->pull_vdev_start)
237 		return -EOPNOTSUPP;
238 
239 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
240 }
241 
242 static inline int
243 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
244 			  struct wmi_peer_kick_ev_arg *arg)
245 {
246 	if (!ar->wmi.ops->pull_peer_kick)
247 		return -EOPNOTSUPP;
248 
249 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
250 }
251 
252 static inline int
253 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
254 		     struct wmi_swba_ev_arg *arg)
255 {
256 	if (!ar->wmi.ops->pull_swba)
257 		return -EOPNOTSUPP;
258 
259 	return ar->wmi.ops->pull_swba(ar, skb, arg);
260 }
261 
262 static inline int
263 ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
264 		       struct wmi_phyerr_ev_arg *arg)
265 {
266 	if (!ar->wmi.ops->pull_phyerr)
267 		return -EOPNOTSUPP;
268 
269 	return ar->wmi.ops->pull_phyerr(ar, skb, arg);
270 }
271 
272 static inline int
273 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
274 			struct wmi_svc_rdy_ev_arg *arg)
275 {
276 	if (!ar->wmi.ops->pull_svc_rdy)
277 		return -EOPNOTSUPP;
278 
279 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
280 }
281 
282 static inline int
283 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
284 		    struct wmi_rdy_ev_arg *arg)
285 {
286 	if (!ar->wmi.ops->pull_rdy)
287 		return -EOPNOTSUPP;
288 
289 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
290 }
291 
292 static inline int
293 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
294 			 struct ath10k_fw_stats *stats)
295 {
296 	if (!ar->wmi.ops->pull_fw_stats)
297 		return -EOPNOTSUPP;
298 
299 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
300 }
301 
302 static inline int
303 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
304 			struct wmi_roam_ev_arg *arg)
305 {
306 	if (!ar->wmi.ops->pull_roam_ev)
307 		return -EOPNOTSUPP;
308 
309 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
310 }
311 
312 static inline int
313 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
314 			  struct wmi_wow_ev_arg *arg)
315 {
316 	if (!ar->wmi.ops->pull_wow_event)
317 		return -EOPNOTSUPP;
318 
319 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
320 }
321 
322 static inline int
323 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
324 {
325 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
326 	struct sk_buff *skb;
327 	int ret;
328 
329 	if (!ar->wmi.ops->gen_mgmt_tx)
330 		return -EOPNOTSUPP;
331 
332 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
333 	if (IS_ERR(skb))
334 		return PTR_ERR(skb);
335 
336 	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
337 	if (ret)
338 		return ret;
339 
340 	/* FIXME There's no ACK event for Management Tx. This probably
341 	 * shouldn't be called here either. */
342 	info->flags |= IEEE80211_TX_STAT_ACK;
343 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
344 
345 	return 0;
346 }
347 
348 static inline int
349 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
350 			      u16 ctl2g, u16 ctl5g,
351 			      enum wmi_dfs_region dfs_reg)
352 {
353 	struct sk_buff *skb;
354 
355 	if (!ar->wmi.ops->gen_pdev_set_rd)
356 		return -EOPNOTSUPP;
357 
358 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
359 					   dfs_reg);
360 	if (IS_ERR(skb))
361 		return PTR_ERR(skb);
362 
363 	return ath10k_wmi_cmd_send(ar, skb,
364 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
365 }
366 
367 static inline int
368 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
369 {
370 	struct sk_buff *skb;
371 
372 	if (!ar->wmi.ops->gen_pdev_suspend)
373 		return -EOPNOTSUPP;
374 
375 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
376 	if (IS_ERR(skb))
377 		return PTR_ERR(skb);
378 
379 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
380 }
381 
382 static inline int
383 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
384 {
385 	struct sk_buff *skb;
386 
387 	if (!ar->wmi.ops->gen_pdev_resume)
388 		return -EOPNOTSUPP;
389 
390 	skb = ar->wmi.ops->gen_pdev_resume(ar);
391 	if (IS_ERR(skb))
392 		return PTR_ERR(skb);
393 
394 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
395 }
396 
397 static inline int
398 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
399 {
400 	struct sk_buff *skb;
401 
402 	if (!ar->wmi.ops->gen_pdev_set_param)
403 		return -EOPNOTSUPP;
404 
405 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
406 	if (IS_ERR(skb))
407 		return PTR_ERR(skb);
408 
409 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
410 }
411 
412 static inline int
413 ath10k_wmi_cmd_init(struct ath10k *ar)
414 {
415 	struct sk_buff *skb;
416 
417 	if (!ar->wmi.ops->gen_init)
418 		return -EOPNOTSUPP;
419 
420 	skb = ar->wmi.ops->gen_init(ar);
421 	if (IS_ERR(skb))
422 		return PTR_ERR(skb);
423 
424 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
425 }
426 
427 static inline int
428 ath10k_wmi_start_scan(struct ath10k *ar,
429 		      const struct wmi_start_scan_arg *arg)
430 {
431 	struct sk_buff *skb;
432 
433 	if (!ar->wmi.ops->gen_start_scan)
434 		return -EOPNOTSUPP;
435 
436 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
437 	if (IS_ERR(skb))
438 		return PTR_ERR(skb);
439 
440 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
441 }
442 
443 static inline int
444 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
445 {
446 	struct sk_buff *skb;
447 
448 	if (!ar->wmi.ops->gen_stop_scan)
449 		return -EOPNOTSUPP;
450 
451 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
452 	if (IS_ERR(skb))
453 		return PTR_ERR(skb);
454 
455 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
456 }
457 
458 static inline int
459 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
460 		       enum wmi_vdev_type type,
461 		       enum wmi_vdev_subtype subtype,
462 		       const u8 macaddr[ETH_ALEN])
463 {
464 	struct sk_buff *skb;
465 
466 	if (!ar->wmi.ops->gen_vdev_create)
467 		return -EOPNOTSUPP;
468 
469 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
470 	if (IS_ERR(skb))
471 		return PTR_ERR(skb);
472 
473 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
474 }
475 
476 static inline int
477 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
478 {
479 	struct sk_buff *skb;
480 
481 	if (!ar->wmi.ops->gen_vdev_delete)
482 		return -EOPNOTSUPP;
483 
484 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
485 	if (IS_ERR(skb))
486 		return PTR_ERR(skb);
487 
488 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
489 }
490 
491 static inline int
492 ath10k_wmi_vdev_start(struct ath10k *ar,
493 		      const struct wmi_vdev_start_request_arg *arg)
494 {
495 	struct sk_buff *skb;
496 
497 	if (!ar->wmi.ops->gen_vdev_start)
498 		return -EOPNOTSUPP;
499 
500 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
501 	if (IS_ERR(skb))
502 		return PTR_ERR(skb);
503 
504 	return ath10k_wmi_cmd_send(ar, skb,
505 				   ar->wmi.cmd->vdev_start_request_cmdid);
506 }
507 
508 static inline int
509 ath10k_wmi_vdev_restart(struct ath10k *ar,
510 			const struct wmi_vdev_start_request_arg *arg)
511 {
512 	struct sk_buff *skb;
513 
514 	if (!ar->wmi.ops->gen_vdev_start)
515 		return -EOPNOTSUPP;
516 
517 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
518 	if (IS_ERR(skb))
519 		return PTR_ERR(skb);
520 
521 	return ath10k_wmi_cmd_send(ar, skb,
522 				   ar->wmi.cmd->vdev_restart_request_cmdid);
523 }
524 
525 static inline int
526 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
527 {
528 	struct sk_buff *skb;
529 
530 	if (!ar->wmi.ops->gen_vdev_stop)
531 		return -EOPNOTSUPP;
532 
533 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
534 	if (IS_ERR(skb))
535 		return PTR_ERR(skb);
536 
537 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
538 }
539 
540 static inline int
541 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
542 {
543 	struct sk_buff *skb;
544 
545 	if (!ar->wmi.ops->gen_vdev_up)
546 		return -EOPNOTSUPP;
547 
548 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
549 	if (IS_ERR(skb))
550 		return PTR_ERR(skb);
551 
552 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
553 }
554 
555 static inline int
556 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
557 {
558 	struct sk_buff *skb;
559 
560 	if (!ar->wmi.ops->gen_vdev_down)
561 		return -EOPNOTSUPP;
562 
563 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
564 	if (IS_ERR(skb))
565 		return PTR_ERR(skb);
566 
567 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
568 }
569 
570 static inline int
571 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
572 			  u32 param_value)
573 {
574 	struct sk_buff *skb;
575 
576 	if (!ar->wmi.ops->gen_vdev_set_param)
577 		return -EOPNOTSUPP;
578 
579 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
580 					      param_value);
581 	if (IS_ERR(skb))
582 		return PTR_ERR(skb);
583 
584 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
585 }
586 
587 static inline int
588 ath10k_wmi_vdev_install_key(struct ath10k *ar,
589 			    const struct wmi_vdev_install_key_arg *arg)
590 {
591 	struct sk_buff *skb;
592 
593 	if (!ar->wmi.ops->gen_vdev_install_key)
594 		return -EOPNOTSUPP;
595 
596 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
597 	if (IS_ERR(skb))
598 		return PTR_ERR(skb);
599 
600 	return ath10k_wmi_cmd_send(ar, skb,
601 				   ar->wmi.cmd->vdev_install_key_cmdid);
602 }
603 
604 static inline int
605 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
606 			      const struct wmi_vdev_spectral_conf_arg *arg)
607 {
608 	struct sk_buff *skb;
609 	u32 cmd_id;
610 
611 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
612 	if (IS_ERR(skb))
613 		return PTR_ERR(skb);
614 
615 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
616 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
617 }
618 
619 static inline int
620 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
621 				u32 enable)
622 {
623 	struct sk_buff *skb;
624 	u32 cmd_id;
625 
626 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
627 						    enable);
628 	if (IS_ERR(skb))
629 		return PTR_ERR(skb);
630 
631 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
632 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
633 }
634 
635 static inline int
636 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
637 			  const u8 peer_addr[ETH_ALEN],
638 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
639 			  u32 num_ac)
640 {
641 	struct sk_buff *skb;
642 	u32 cmd_id;
643 
644 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
645 		return -EOPNOTSUPP;
646 
647 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
648 					      num_ac);
649 	if (IS_ERR(skb))
650 		return PTR_ERR(skb);
651 
652 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
653 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
654 }
655 
656 static inline int
657 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
658 			 const struct wmi_wmm_params_all_arg *arg)
659 {
660 	struct sk_buff *skb;
661 	u32 cmd_id;
662 
663 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
664 	if (IS_ERR(skb))
665 		return PTR_ERR(skb);
666 
667 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
668 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
669 }
670 
671 static inline int
672 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
673 		       const u8 peer_addr[ETH_ALEN],
674 		       enum wmi_peer_type peer_type)
675 {
676 	struct sk_buff *skb;
677 
678 	if (!ar->wmi.ops->gen_peer_create)
679 		return -EOPNOTSUPP;
680 
681 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
682 	if (IS_ERR(skb))
683 		return PTR_ERR(skb);
684 
685 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
686 }
687 
688 static inline int
689 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
690 		       const u8 peer_addr[ETH_ALEN])
691 {
692 	struct sk_buff *skb;
693 
694 	if (!ar->wmi.ops->gen_peer_delete)
695 		return -EOPNOTSUPP;
696 
697 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
698 	if (IS_ERR(skb))
699 		return PTR_ERR(skb);
700 
701 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
702 }
703 
704 static inline int
705 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
706 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
707 {
708 	struct sk_buff *skb;
709 
710 	if (!ar->wmi.ops->gen_peer_flush)
711 		return -EOPNOTSUPP;
712 
713 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
714 	if (IS_ERR(skb))
715 		return PTR_ERR(skb);
716 
717 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
718 }
719 
720 static inline int
721 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
722 			  enum wmi_peer_param param_id, u32 param_value)
723 {
724 	struct sk_buff *skb;
725 
726 	if (!ar->wmi.ops->gen_peer_set_param)
727 		return -EOPNOTSUPP;
728 
729 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
730 					      param_value);
731 	if (IS_ERR(skb))
732 		return PTR_ERR(skb);
733 
734 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
735 }
736 
737 static inline int
738 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
739 		      enum wmi_sta_ps_mode psmode)
740 {
741 	struct sk_buff *skb;
742 
743 	if (!ar->wmi.ops->gen_set_psmode)
744 		return -EOPNOTSUPP;
745 
746 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
747 	if (IS_ERR(skb))
748 		return PTR_ERR(skb);
749 
750 	return ath10k_wmi_cmd_send(ar, skb,
751 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
752 }
753 
754 static inline int
755 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
756 			    enum wmi_sta_powersave_param param_id, u32 value)
757 {
758 	struct sk_buff *skb;
759 
760 	if (!ar->wmi.ops->gen_set_sta_ps)
761 		return -EOPNOTSUPP;
762 
763 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
764 	if (IS_ERR(skb))
765 		return PTR_ERR(skb);
766 
767 	return ath10k_wmi_cmd_send(ar, skb,
768 				   ar->wmi.cmd->sta_powersave_param_cmdid);
769 }
770 
771 static inline int
772 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
773 			   enum wmi_ap_ps_peer_param param_id, u32 value)
774 {
775 	struct sk_buff *skb;
776 
777 	if (!ar->wmi.ops->gen_set_ap_ps)
778 		return -EOPNOTSUPP;
779 
780 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
781 	if (IS_ERR(skb))
782 		return PTR_ERR(skb);
783 
784 	return ath10k_wmi_cmd_send(ar, skb,
785 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
786 }
787 
788 static inline int
789 ath10k_wmi_scan_chan_list(struct ath10k *ar,
790 			  const struct wmi_scan_chan_list_arg *arg)
791 {
792 	struct sk_buff *skb;
793 
794 	if (!ar->wmi.ops->gen_scan_chan_list)
795 		return -EOPNOTSUPP;
796 
797 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
798 	if (IS_ERR(skb))
799 		return PTR_ERR(skb);
800 
801 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
802 }
803 
804 static inline int
805 ath10k_wmi_peer_assoc(struct ath10k *ar,
806 		      const struct wmi_peer_assoc_complete_arg *arg)
807 {
808 	struct sk_buff *skb;
809 
810 	if (!ar->wmi.ops->gen_peer_assoc)
811 		return -EOPNOTSUPP;
812 
813 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
814 	if (IS_ERR(skb))
815 		return PTR_ERR(skb);
816 
817 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
818 }
819 
820 static inline int
821 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
822 				  const void *bcn, size_t bcn_len,
823 				  u32 bcn_paddr, bool dtim_zero,
824 				  bool deliver_cab)
825 {
826 	struct sk_buff *skb;
827 	int ret;
828 
829 	if (!ar->wmi.ops->gen_beacon_dma)
830 		return -EOPNOTSUPP;
831 
832 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
833 					  dtim_zero, deliver_cab);
834 	if (IS_ERR(skb))
835 		return PTR_ERR(skb);
836 
837 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
838 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
839 	if (ret) {
840 		dev_kfree_skb(skb);
841 		return ret;
842 	}
843 
844 	return 0;
845 }
846 
847 static inline int
848 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
849 			       const struct wmi_wmm_params_all_arg *arg)
850 {
851 	struct sk_buff *skb;
852 
853 	if (!ar->wmi.ops->gen_pdev_set_wmm)
854 		return -EOPNOTSUPP;
855 
856 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
857 	if (IS_ERR(skb))
858 		return PTR_ERR(skb);
859 
860 	return ath10k_wmi_cmd_send(ar, skb,
861 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
862 }
863 
864 static inline int
865 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
866 {
867 	struct sk_buff *skb;
868 
869 	if (!ar->wmi.ops->gen_request_stats)
870 		return -EOPNOTSUPP;
871 
872 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
873 	if (IS_ERR(skb))
874 		return PTR_ERR(skb);
875 
876 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
877 }
878 
879 static inline int
880 ath10k_wmi_force_fw_hang(struct ath10k *ar,
881 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
882 {
883 	struct sk_buff *skb;
884 
885 	if (!ar->wmi.ops->gen_force_fw_hang)
886 		return -EOPNOTSUPP;
887 
888 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
889 	if (IS_ERR(skb))
890 		return PTR_ERR(skb);
891 
892 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
893 }
894 
895 static inline int
896 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
897 {
898 	struct sk_buff *skb;
899 
900 	if (!ar->wmi.ops->gen_dbglog_cfg)
901 		return -EOPNOTSUPP;
902 
903 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
904 	if (IS_ERR(skb))
905 		return PTR_ERR(skb);
906 
907 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
908 }
909 
910 static inline int
911 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
912 {
913 	struct sk_buff *skb;
914 
915 	if (!ar->wmi.ops->gen_pktlog_enable)
916 		return -EOPNOTSUPP;
917 
918 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
919 	if (IS_ERR(skb))
920 		return PTR_ERR(skb);
921 
922 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
923 }
924 
925 static inline int
926 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
927 {
928 	struct sk_buff *skb;
929 
930 	if (!ar->wmi.ops->gen_pktlog_disable)
931 		return -EOPNOTSUPP;
932 
933 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
934 	if (IS_ERR(skb))
935 		return PTR_ERR(skb);
936 
937 	return ath10k_wmi_cmd_send(ar, skb,
938 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
939 }
940 
941 static inline int
942 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
943 			       u32 next_offset, u32 enabled)
944 {
945 	struct sk_buff *skb;
946 
947 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
948 		return -EOPNOTSUPP;
949 
950 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
951 						   next_offset, enabled);
952 	if (IS_ERR(skb))
953 		return PTR_ERR(skb);
954 
955 	return ath10k_wmi_cmd_send(ar, skb,
956 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
957 }
958 
959 static inline int
960 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
961 {
962 	struct sk_buff *skb;
963 
964 	if (!ar->wmi.ops->gen_pdev_get_temperature)
965 		return -EOPNOTSUPP;
966 
967 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
968 	if (IS_ERR(skb))
969 		return PTR_ERR(skb);
970 
971 	return ath10k_wmi_cmd_send(ar, skb,
972 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
973 }
974 
975 static inline int
976 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
977 {
978 	struct sk_buff *skb;
979 
980 	if (!ar->wmi.ops->gen_addba_clear_resp)
981 		return -EOPNOTSUPP;
982 
983 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
984 	if (IS_ERR(skb))
985 		return PTR_ERR(skb);
986 
987 	return ath10k_wmi_cmd_send(ar, skb,
988 				   ar->wmi.cmd->addba_clear_resp_cmdid);
989 }
990 
991 static inline int
992 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
993 		      u32 tid, u32 buf_size)
994 {
995 	struct sk_buff *skb;
996 
997 	if (!ar->wmi.ops->gen_addba_send)
998 		return -EOPNOTSUPP;
999 
1000 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1001 	if (IS_ERR(skb))
1002 		return PTR_ERR(skb);
1003 
1004 	return ath10k_wmi_cmd_send(ar, skb,
1005 				   ar->wmi.cmd->addba_send_cmdid);
1006 }
1007 
1008 static inline int
1009 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1010 			  u32 tid, u32 status)
1011 {
1012 	struct sk_buff *skb;
1013 
1014 	if (!ar->wmi.ops->gen_addba_set_resp)
1015 		return -EOPNOTSUPP;
1016 
1017 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1018 	if (IS_ERR(skb))
1019 		return PTR_ERR(skb);
1020 
1021 	return ath10k_wmi_cmd_send(ar, skb,
1022 				   ar->wmi.cmd->addba_set_resp_cmdid);
1023 }
1024 
1025 static inline int
1026 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1027 		      u32 tid, u32 initiator, u32 reason)
1028 {
1029 	struct sk_buff *skb;
1030 
1031 	if (!ar->wmi.ops->gen_delba_send)
1032 		return -EOPNOTSUPP;
1033 
1034 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1035 					  reason);
1036 	if (IS_ERR(skb))
1037 		return PTR_ERR(skb);
1038 
1039 	return ath10k_wmi_cmd_send(ar, skb,
1040 				   ar->wmi.cmd->delba_send_cmdid);
1041 }
1042 
1043 static inline int
1044 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1045 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1046 		    void *prb_ies, size_t prb_ies_len)
1047 {
1048 	struct sk_buff *skb;
1049 
1050 	if (!ar->wmi.ops->gen_bcn_tmpl)
1051 		return -EOPNOTSUPP;
1052 
1053 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1054 					prb_caps, prb_erp, prb_ies,
1055 					prb_ies_len);
1056 	if (IS_ERR(skb))
1057 		return PTR_ERR(skb);
1058 
1059 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1060 }
1061 
1062 static inline int
1063 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1064 {
1065 	struct sk_buff *skb;
1066 
1067 	if (!ar->wmi.ops->gen_prb_tmpl)
1068 		return -EOPNOTSUPP;
1069 
1070 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1071 	if (IS_ERR(skb))
1072 		return PTR_ERR(skb);
1073 
1074 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1075 }
1076 
1077 static inline int
1078 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1079 {
1080 	struct sk_buff *skb;
1081 
1082 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1083 		return -EOPNOTSUPP;
1084 
1085 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1086 	if (IS_ERR(skb))
1087 		return PTR_ERR(skb);
1088 
1089 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1090 }
1091 
1092 static inline int
1093 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1094 			 const struct wmi_sta_keepalive_arg *arg)
1095 {
1096 	struct sk_buff *skb;
1097 	u32 cmd_id;
1098 
1099 	if (!ar->wmi.ops->gen_sta_keepalive)
1100 		return -EOPNOTSUPP;
1101 
1102 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1103 	if (IS_ERR(skb))
1104 		return PTR_ERR(skb);
1105 
1106 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1107 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1108 }
1109 
1110 static inline int
1111 ath10k_wmi_wow_enable(struct ath10k *ar)
1112 {
1113 	struct sk_buff *skb;
1114 	u32 cmd_id;
1115 
1116 	if (!ar->wmi.ops->gen_wow_enable)
1117 		return -EOPNOTSUPP;
1118 
1119 	skb = ar->wmi.ops->gen_wow_enable(ar);
1120 	if (IS_ERR(skb))
1121 		return PTR_ERR(skb);
1122 
1123 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1124 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1125 }
1126 
1127 static inline int
1128 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1129 				enum wmi_wow_wakeup_event event,
1130 				u32 enable)
1131 {
1132 	struct sk_buff *skb;
1133 	u32 cmd_id;
1134 
1135 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1136 		return -EOPNOTSUPP;
1137 
1138 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1139 	if (IS_ERR(skb))
1140 		return PTR_ERR(skb);
1141 
1142 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1143 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1144 }
1145 
1146 static inline int
1147 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1148 {
1149 	struct sk_buff *skb;
1150 	u32 cmd_id;
1151 
1152 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1153 		return -EOPNOTSUPP;
1154 
1155 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1156 	if (IS_ERR(skb))
1157 		return PTR_ERR(skb);
1158 
1159 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1160 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1161 }
1162 
1163 static inline int
1164 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1165 			   const u8 *pattern, const u8 *mask,
1166 			   int pattern_len, int pattern_offset)
1167 {
1168 	struct sk_buff *skb;
1169 	u32 cmd_id;
1170 
1171 	if (!ar->wmi.ops->gen_wow_add_pattern)
1172 		return -EOPNOTSUPP;
1173 
1174 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1175 					       pattern, mask, pattern_len,
1176 					       pattern_offset);
1177 	if (IS_ERR(skb))
1178 		return PTR_ERR(skb);
1179 
1180 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1181 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1182 }
1183 
1184 static inline int
1185 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1186 {
1187 	struct sk_buff *skb;
1188 	u32 cmd_id;
1189 
1190 	if (!ar->wmi.ops->gen_wow_del_pattern)
1191 		return -EOPNOTSUPP;
1192 
1193 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1194 	if (IS_ERR(skb))
1195 		return PTR_ERR(skb);
1196 
1197 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1198 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1199 }
1200 
1201 static inline int
1202 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1203 				enum wmi_tdls_state state)
1204 {
1205 	struct sk_buff *skb;
1206 
1207 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1208 		return -EOPNOTSUPP;
1209 
1210 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1211 	if (IS_ERR(skb))
1212 		return PTR_ERR(skb);
1213 
1214 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1215 }
1216 
1217 static inline int
1218 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1219 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1220 			    const struct wmi_tdls_peer_capab_arg *cap,
1221 			    const struct wmi_channel_arg *chan)
1222 {
1223 	struct sk_buff *skb;
1224 
1225 	if (!ar->wmi.ops->gen_tdls_peer_update)
1226 		return -EOPNOTSUPP;
1227 
1228 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1229 	if (IS_ERR(skb))
1230 		return PTR_ERR(skb);
1231 
1232 	return ath10k_wmi_cmd_send(ar, skb,
1233 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1234 }
1235 
1236 static inline int
1237 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1238 {
1239 	struct sk_buff *skb;
1240 
1241 	if (!ar->wmi.ops->gen_adaptive_qcs)
1242 		return -EOPNOTSUPP;
1243 
1244 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1245 	if (IS_ERR(skb))
1246 		return PTR_ERR(skb);
1247 
1248 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1249 }
1250 
1251 #endif
1252