xref: /linux/drivers/net/wireless/ath/ath10k/wmi-ops.h (revision e8d235d4d8fb8957bae5f6ed4521115203a00d8b)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _WMI_OPS_H_
19 #define _WMI_OPS_H_
20 
21 struct ath10k;
22 struct sk_buff;
23 
24 struct wmi_ops {
25 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27 
28 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 			 struct wmi_scan_ev_arg *arg);
30 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 			    struct wmi_mgmt_rx_ev_arg *arg);
32 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 			    struct wmi_ch_info_ev_arg *arg);
34 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 			       struct wmi_vdev_start_ev_arg *arg);
36 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 			      struct wmi_peer_kick_ev_arg *arg);
38 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 			 struct wmi_swba_ev_arg *arg);
40 	int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 			   struct wmi_phyerr_ev_arg *arg);
42 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 			    struct wmi_svc_rdy_ev_arg *arg);
44 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 			struct wmi_rdy_ev_arg *arg);
46 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 			     struct ath10k_fw_stats *stats);
48 
49 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
53 					   enum wmi_dfs_region dfs_reg);
54 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
55 					      u32 value);
56 	struct sk_buff *(*gen_init)(struct ath10k *ar);
57 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 					  const struct wmi_start_scan_arg *arg);
59 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 					 const struct wmi_stop_scan_arg *arg);
61 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 					   enum wmi_vdev_type type,
63 					   enum wmi_vdev_subtype subtype,
64 					   const u8 macaddr[ETH_ALEN]);
65 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 					  const struct wmi_vdev_start_request_arg *arg,
68 					  bool restart);
69 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
71 				       const u8 *bssid);
72 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 					      u32 param_id, u32 param_value);
75 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 						const struct wmi_vdev_install_key_arg *arg);
77 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 						  const struct wmi_vdev_spectral_conf_arg *arg);
79 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 						    u32 trigger, u32 enable);
81 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
82 					     const struct wmi_wmm_params_all_arg *arg);
83 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
84 					   const u8 peer_addr[ETH_ALEN]);
85 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
86 					   const u8 peer_addr[ETH_ALEN]);
87 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
88 					  const u8 peer_addr[ETH_ALEN],
89 					  u32 tid_bitmap);
90 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
91 					      const u8 *peer_addr,
92 					      enum wmi_peer_param param_id,
93 					      u32 param_value);
94 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
95 					  const struct wmi_peer_assoc_complete_arg *arg);
96 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
97 					  enum wmi_sta_ps_mode psmode);
98 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
99 					  enum wmi_sta_powersave_param param_id,
100 					  u32 value);
101 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
102 					 const u8 *mac,
103 					 enum wmi_ap_ps_peer_param param_id,
104 					 u32 value);
105 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
106 					      const struct wmi_scan_chan_list_arg *arg);
107 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
108 					  const void *bcn, size_t bcn_len,
109 					  u32 bcn_paddr, bool dtim_zero,
110 					  bool deliver_cab);
111 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
112 					    const struct wmi_wmm_params_all_arg *arg);
113 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
114 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
115 					     enum wmi_force_fw_hang_type type,
116 					     u32 delay_ms);
117 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
118 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
119 					  u32 log_level);
120 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
121 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
122 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
123 						   u32 period, u32 duration,
124 						   u32 next_offset,
125 						   u32 enabled);
126 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
127 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
128 						const u8 *mac);
129 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
130 					  const u8 *mac, u32 tid, u32 buf_size);
131 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
132 					      const u8 *mac, u32 tid,
133 					      u32 status);
134 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
135 					  const u8 *mac, u32 tid, u32 initiator,
136 					  u32 reason);
137 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
138 					u32 tim_ie_offset, struct sk_buff *bcn,
139 					u32 prb_caps, u32 prb_erp,
140 					void *prb_ies, size_t prb_ies_len);
141 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
142 					struct sk_buff *bcn);
143 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
144 					     const u8 *p2p_ie);
145 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
146 					      const u8 peer_addr[ETH_ALEN],
147 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
148 					      u32 num_ac);
149 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
150 					     const struct wmi_sta_keepalive_arg *arg);
151 };
152 
153 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
154 
155 static inline int
156 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
157 {
158 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
159 		return -EOPNOTSUPP;
160 
161 	ar->wmi.ops->rx(ar, skb);
162 	return 0;
163 }
164 
165 static inline int
166 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
167 		   size_t len)
168 {
169 	if (!ar->wmi.ops->map_svc)
170 		return -EOPNOTSUPP;
171 
172 	ar->wmi.ops->map_svc(in, out, len);
173 	return 0;
174 }
175 
176 static inline int
177 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
178 		     struct wmi_scan_ev_arg *arg)
179 {
180 	if (!ar->wmi.ops->pull_scan)
181 		return -EOPNOTSUPP;
182 
183 	return ar->wmi.ops->pull_scan(ar, skb, arg);
184 }
185 
186 static inline int
187 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
188 			struct wmi_mgmt_rx_ev_arg *arg)
189 {
190 	if (!ar->wmi.ops->pull_mgmt_rx)
191 		return -EOPNOTSUPP;
192 
193 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
194 }
195 
196 static inline int
197 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
198 			struct wmi_ch_info_ev_arg *arg)
199 {
200 	if (!ar->wmi.ops->pull_ch_info)
201 		return -EOPNOTSUPP;
202 
203 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
204 }
205 
206 static inline int
207 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
208 			   struct wmi_vdev_start_ev_arg *arg)
209 {
210 	if (!ar->wmi.ops->pull_vdev_start)
211 		return -EOPNOTSUPP;
212 
213 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
214 }
215 
216 static inline int
217 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
218 			  struct wmi_peer_kick_ev_arg *arg)
219 {
220 	if (!ar->wmi.ops->pull_peer_kick)
221 		return -EOPNOTSUPP;
222 
223 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
224 }
225 
226 static inline int
227 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
228 		     struct wmi_swba_ev_arg *arg)
229 {
230 	if (!ar->wmi.ops->pull_swba)
231 		return -EOPNOTSUPP;
232 
233 	return ar->wmi.ops->pull_swba(ar, skb, arg);
234 }
235 
236 static inline int
237 ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
238 		       struct wmi_phyerr_ev_arg *arg)
239 {
240 	if (!ar->wmi.ops->pull_phyerr)
241 		return -EOPNOTSUPP;
242 
243 	return ar->wmi.ops->pull_phyerr(ar, skb, arg);
244 }
245 
246 static inline int
247 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
248 			struct wmi_svc_rdy_ev_arg *arg)
249 {
250 	if (!ar->wmi.ops->pull_svc_rdy)
251 		return -EOPNOTSUPP;
252 
253 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
254 }
255 
256 static inline int
257 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
258 		    struct wmi_rdy_ev_arg *arg)
259 {
260 	if (!ar->wmi.ops->pull_rdy)
261 		return -EOPNOTSUPP;
262 
263 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
264 }
265 
266 static inline int
267 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
268 			 struct ath10k_fw_stats *stats)
269 {
270 	if (!ar->wmi.ops->pull_fw_stats)
271 		return -EOPNOTSUPP;
272 
273 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
274 }
275 
276 static inline int
277 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
278 {
279 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
280 	struct sk_buff *skb;
281 	int ret;
282 
283 	if (!ar->wmi.ops->gen_mgmt_tx)
284 		return -EOPNOTSUPP;
285 
286 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
287 	if (IS_ERR(skb))
288 		return PTR_ERR(skb);
289 
290 	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
291 	if (ret)
292 		return ret;
293 
294 	/* FIXME There's no ACK event for Management Tx. This probably
295 	 * shouldn't be called here either. */
296 	info->flags |= IEEE80211_TX_STAT_ACK;
297 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
298 
299 	return 0;
300 }
301 
302 static inline int
303 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
304 			      u16 ctl2g, u16 ctl5g,
305 			      enum wmi_dfs_region dfs_reg)
306 {
307 	struct sk_buff *skb;
308 
309 	if (!ar->wmi.ops->gen_pdev_set_rd)
310 		return -EOPNOTSUPP;
311 
312 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
313 					   dfs_reg);
314 	if (IS_ERR(skb))
315 		return PTR_ERR(skb);
316 
317 	return ath10k_wmi_cmd_send(ar, skb,
318 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
319 }
320 
321 static inline int
322 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
323 {
324 	struct sk_buff *skb;
325 
326 	if (!ar->wmi.ops->gen_pdev_suspend)
327 		return -EOPNOTSUPP;
328 
329 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
330 	if (IS_ERR(skb))
331 		return PTR_ERR(skb);
332 
333 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
334 }
335 
336 static inline int
337 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
338 {
339 	struct sk_buff *skb;
340 
341 	if (!ar->wmi.ops->gen_pdev_resume)
342 		return -EOPNOTSUPP;
343 
344 	skb = ar->wmi.ops->gen_pdev_resume(ar);
345 	if (IS_ERR(skb))
346 		return PTR_ERR(skb);
347 
348 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
349 }
350 
351 static inline int
352 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
353 {
354 	struct sk_buff *skb;
355 
356 	if (!ar->wmi.ops->gen_pdev_set_param)
357 		return -EOPNOTSUPP;
358 
359 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
360 	if (IS_ERR(skb))
361 		return PTR_ERR(skb);
362 
363 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
364 }
365 
366 static inline int
367 ath10k_wmi_cmd_init(struct ath10k *ar)
368 {
369 	struct sk_buff *skb;
370 
371 	if (!ar->wmi.ops->gen_init)
372 		return -EOPNOTSUPP;
373 
374 	skb = ar->wmi.ops->gen_init(ar);
375 	if (IS_ERR(skb))
376 		return PTR_ERR(skb);
377 
378 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
379 }
380 
381 static inline int
382 ath10k_wmi_start_scan(struct ath10k *ar,
383 		      const struct wmi_start_scan_arg *arg)
384 {
385 	struct sk_buff *skb;
386 
387 	if (!ar->wmi.ops->gen_start_scan)
388 		return -EOPNOTSUPP;
389 
390 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
391 	if (IS_ERR(skb))
392 		return PTR_ERR(skb);
393 
394 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
395 }
396 
397 static inline int
398 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
399 {
400 	struct sk_buff *skb;
401 
402 	if (!ar->wmi.ops->gen_stop_scan)
403 		return -EOPNOTSUPP;
404 
405 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
406 	if (IS_ERR(skb))
407 		return PTR_ERR(skb);
408 
409 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
410 }
411 
412 static inline int
413 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
414 		       enum wmi_vdev_type type,
415 		       enum wmi_vdev_subtype subtype,
416 		       const u8 macaddr[ETH_ALEN])
417 {
418 	struct sk_buff *skb;
419 
420 	if (!ar->wmi.ops->gen_vdev_create)
421 		return -EOPNOTSUPP;
422 
423 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
424 	if (IS_ERR(skb))
425 		return PTR_ERR(skb);
426 
427 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
428 }
429 
430 static inline int
431 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
432 {
433 	struct sk_buff *skb;
434 
435 	if (!ar->wmi.ops->gen_vdev_delete)
436 		return -EOPNOTSUPP;
437 
438 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
439 	if (IS_ERR(skb))
440 		return PTR_ERR(skb);
441 
442 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
443 }
444 
445 static inline int
446 ath10k_wmi_vdev_start(struct ath10k *ar,
447 		      const struct wmi_vdev_start_request_arg *arg)
448 {
449 	struct sk_buff *skb;
450 
451 	if (!ar->wmi.ops->gen_vdev_start)
452 		return -EOPNOTSUPP;
453 
454 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
455 	if (IS_ERR(skb))
456 		return PTR_ERR(skb);
457 
458 	return ath10k_wmi_cmd_send(ar, skb,
459 				   ar->wmi.cmd->vdev_start_request_cmdid);
460 }
461 
462 static inline int
463 ath10k_wmi_vdev_restart(struct ath10k *ar,
464 			const struct wmi_vdev_start_request_arg *arg)
465 {
466 	struct sk_buff *skb;
467 
468 	if (!ar->wmi.ops->gen_vdev_start)
469 		return -EOPNOTSUPP;
470 
471 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
472 	if (IS_ERR(skb))
473 		return PTR_ERR(skb);
474 
475 	return ath10k_wmi_cmd_send(ar, skb,
476 				   ar->wmi.cmd->vdev_restart_request_cmdid);
477 }
478 
479 static inline int
480 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
481 {
482 	struct sk_buff *skb;
483 
484 	if (!ar->wmi.ops->gen_vdev_stop)
485 		return -EOPNOTSUPP;
486 
487 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
488 	if (IS_ERR(skb))
489 		return PTR_ERR(skb);
490 
491 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
492 }
493 
494 static inline int
495 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
496 {
497 	struct sk_buff *skb;
498 
499 	if (!ar->wmi.ops->gen_vdev_up)
500 		return -EOPNOTSUPP;
501 
502 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
503 	if (IS_ERR(skb))
504 		return PTR_ERR(skb);
505 
506 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
507 }
508 
509 static inline int
510 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
511 {
512 	struct sk_buff *skb;
513 
514 	if (!ar->wmi.ops->gen_vdev_down)
515 		return -EOPNOTSUPP;
516 
517 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
518 	if (IS_ERR(skb))
519 		return PTR_ERR(skb);
520 
521 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
522 }
523 
524 static inline int
525 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
526 			  u32 param_value)
527 {
528 	struct sk_buff *skb;
529 
530 	if (!ar->wmi.ops->gen_vdev_set_param)
531 		return -EOPNOTSUPP;
532 
533 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
534 					      param_value);
535 	if (IS_ERR(skb))
536 		return PTR_ERR(skb);
537 
538 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
539 }
540 
541 static inline int
542 ath10k_wmi_vdev_install_key(struct ath10k *ar,
543 			    const struct wmi_vdev_install_key_arg *arg)
544 {
545 	struct sk_buff *skb;
546 
547 	if (!ar->wmi.ops->gen_vdev_install_key)
548 		return -EOPNOTSUPP;
549 
550 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
551 	if (IS_ERR(skb))
552 		return PTR_ERR(skb);
553 
554 	return ath10k_wmi_cmd_send(ar, skb,
555 				   ar->wmi.cmd->vdev_install_key_cmdid);
556 }
557 
558 static inline int
559 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
560 			      const struct wmi_vdev_spectral_conf_arg *arg)
561 {
562 	struct sk_buff *skb;
563 	u32 cmd_id;
564 
565 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
566 	if (IS_ERR(skb))
567 		return PTR_ERR(skb);
568 
569 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
570 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
571 }
572 
573 static inline int
574 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
575 				u32 enable)
576 {
577 	struct sk_buff *skb;
578 	u32 cmd_id;
579 
580 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
581 						    enable);
582 	if (IS_ERR(skb))
583 		return PTR_ERR(skb);
584 
585 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
586 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
587 }
588 
589 static inline int
590 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
591 			  const u8 peer_addr[ETH_ALEN],
592 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
593 			  u32 num_ac)
594 {
595 	struct sk_buff *skb;
596 	u32 cmd_id;
597 
598 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
599 		return -EOPNOTSUPP;
600 
601 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
602 					      num_ac);
603 	if (IS_ERR(skb))
604 		return PTR_ERR(skb);
605 
606 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
607 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
608 }
609 
610 static inline int
611 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
612 			 const struct wmi_wmm_params_all_arg *arg)
613 {
614 	struct sk_buff *skb;
615 	u32 cmd_id;
616 
617 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
618 	if (IS_ERR(skb))
619 		return PTR_ERR(skb);
620 
621 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
622 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
623 }
624 
625 static inline int
626 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
627 		       const u8 peer_addr[ETH_ALEN])
628 {
629 	struct sk_buff *skb;
630 
631 	if (!ar->wmi.ops->gen_peer_create)
632 		return -EOPNOTSUPP;
633 
634 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
635 	if (IS_ERR(skb))
636 		return PTR_ERR(skb);
637 
638 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
639 }
640 
641 static inline int
642 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
643 		       const u8 peer_addr[ETH_ALEN])
644 {
645 	struct sk_buff *skb;
646 
647 	if (!ar->wmi.ops->gen_peer_delete)
648 		return -EOPNOTSUPP;
649 
650 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
651 	if (IS_ERR(skb))
652 		return PTR_ERR(skb);
653 
654 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
655 }
656 
657 static inline int
658 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
659 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
660 {
661 	struct sk_buff *skb;
662 
663 	if (!ar->wmi.ops->gen_peer_flush)
664 		return -EOPNOTSUPP;
665 
666 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
667 	if (IS_ERR(skb))
668 		return PTR_ERR(skb);
669 
670 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
671 }
672 
673 static inline int
674 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
675 			  enum wmi_peer_param param_id, u32 param_value)
676 {
677 	struct sk_buff *skb;
678 
679 	if (!ar->wmi.ops->gen_peer_set_param)
680 		return -EOPNOTSUPP;
681 
682 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
683 					      param_value);
684 	if (IS_ERR(skb))
685 		return PTR_ERR(skb);
686 
687 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
688 }
689 
690 static inline int
691 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
692 		      enum wmi_sta_ps_mode psmode)
693 {
694 	struct sk_buff *skb;
695 
696 	if (!ar->wmi.ops->gen_set_psmode)
697 		return -EOPNOTSUPP;
698 
699 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
700 	if (IS_ERR(skb))
701 		return PTR_ERR(skb);
702 
703 	return ath10k_wmi_cmd_send(ar, skb,
704 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
705 }
706 
707 static inline int
708 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
709 			    enum wmi_sta_powersave_param param_id, u32 value)
710 {
711 	struct sk_buff *skb;
712 
713 	if (!ar->wmi.ops->gen_set_sta_ps)
714 		return -EOPNOTSUPP;
715 
716 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
717 	if (IS_ERR(skb))
718 		return PTR_ERR(skb);
719 
720 	return ath10k_wmi_cmd_send(ar, skb,
721 				   ar->wmi.cmd->sta_powersave_param_cmdid);
722 }
723 
724 static inline int
725 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
726 			   enum wmi_ap_ps_peer_param param_id, u32 value)
727 {
728 	struct sk_buff *skb;
729 
730 	if (!ar->wmi.ops->gen_set_ap_ps)
731 		return -EOPNOTSUPP;
732 
733 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
734 	if (IS_ERR(skb))
735 		return PTR_ERR(skb);
736 
737 	return ath10k_wmi_cmd_send(ar, skb,
738 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
739 }
740 
741 static inline int
742 ath10k_wmi_scan_chan_list(struct ath10k *ar,
743 			  const struct wmi_scan_chan_list_arg *arg)
744 {
745 	struct sk_buff *skb;
746 
747 	if (!ar->wmi.ops->gen_scan_chan_list)
748 		return -EOPNOTSUPP;
749 
750 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
751 	if (IS_ERR(skb))
752 		return PTR_ERR(skb);
753 
754 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
755 }
756 
757 static inline int
758 ath10k_wmi_peer_assoc(struct ath10k *ar,
759 		      const struct wmi_peer_assoc_complete_arg *arg)
760 {
761 	struct sk_buff *skb;
762 
763 	if (!ar->wmi.ops->gen_peer_assoc)
764 		return -EOPNOTSUPP;
765 
766 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
767 	if (IS_ERR(skb))
768 		return PTR_ERR(skb);
769 
770 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
771 }
772 
773 static inline int
774 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
775 				  const void *bcn, size_t bcn_len,
776 				  u32 bcn_paddr, bool dtim_zero,
777 				  bool deliver_cab)
778 {
779 	struct sk_buff *skb;
780 	int ret;
781 
782 	if (!ar->wmi.ops->gen_beacon_dma)
783 		return -EOPNOTSUPP;
784 
785 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
786 					  dtim_zero, deliver_cab);
787 	if (IS_ERR(skb))
788 		return PTR_ERR(skb);
789 
790 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
791 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
792 	if (ret) {
793 		dev_kfree_skb(skb);
794 		return ret;
795 	}
796 
797 	return 0;
798 }
799 
800 static inline int
801 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
802 			       const struct wmi_wmm_params_all_arg *arg)
803 {
804 	struct sk_buff *skb;
805 
806 	if (!ar->wmi.ops->gen_pdev_set_wmm)
807 		return -EOPNOTSUPP;
808 
809 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
810 	if (IS_ERR(skb))
811 		return PTR_ERR(skb);
812 
813 	return ath10k_wmi_cmd_send(ar, skb,
814 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
815 }
816 
817 static inline int
818 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
819 {
820 	struct sk_buff *skb;
821 
822 	if (!ar->wmi.ops->gen_request_stats)
823 		return -EOPNOTSUPP;
824 
825 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
826 	if (IS_ERR(skb))
827 		return PTR_ERR(skb);
828 
829 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
830 }
831 
832 static inline int
833 ath10k_wmi_force_fw_hang(struct ath10k *ar,
834 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
835 {
836 	struct sk_buff *skb;
837 
838 	if (!ar->wmi.ops->gen_force_fw_hang)
839 		return -EOPNOTSUPP;
840 
841 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
842 	if (IS_ERR(skb))
843 		return PTR_ERR(skb);
844 
845 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
846 }
847 
848 static inline int
849 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
850 {
851 	struct sk_buff *skb;
852 
853 	if (!ar->wmi.ops->gen_dbglog_cfg)
854 		return -EOPNOTSUPP;
855 
856 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
857 	if (IS_ERR(skb))
858 		return PTR_ERR(skb);
859 
860 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
861 }
862 
863 static inline int
864 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
865 {
866 	struct sk_buff *skb;
867 
868 	if (!ar->wmi.ops->gen_pktlog_enable)
869 		return -EOPNOTSUPP;
870 
871 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
872 	if (IS_ERR(skb))
873 		return PTR_ERR(skb);
874 
875 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
876 }
877 
878 static inline int
879 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
880 {
881 	struct sk_buff *skb;
882 
883 	if (!ar->wmi.ops->gen_pktlog_disable)
884 		return -EOPNOTSUPP;
885 
886 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
887 	if (IS_ERR(skb))
888 		return PTR_ERR(skb);
889 
890 	return ath10k_wmi_cmd_send(ar, skb,
891 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
892 }
893 
894 static inline int
895 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
896 			       u32 next_offset, u32 enabled)
897 {
898 	struct sk_buff *skb;
899 
900 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
901 		return -EOPNOTSUPP;
902 
903 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
904 						   next_offset, enabled);
905 	if (IS_ERR(skb))
906 		return PTR_ERR(skb);
907 
908 	return ath10k_wmi_cmd_send(ar, skb,
909 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
910 }
911 
912 static inline int
913 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
914 {
915 	struct sk_buff *skb;
916 
917 	if (!ar->wmi.ops->gen_pdev_get_temperature)
918 		return -EOPNOTSUPP;
919 
920 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
921 	if (IS_ERR(skb))
922 		return PTR_ERR(skb);
923 
924 	return ath10k_wmi_cmd_send(ar, skb,
925 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
926 }
927 
928 static inline int
929 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
930 {
931 	struct sk_buff *skb;
932 
933 	if (!ar->wmi.ops->gen_addba_clear_resp)
934 		return -EOPNOTSUPP;
935 
936 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
937 	if (IS_ERR(skb))
938 		return PTR_ERR(skb);
939 
940 	return ath10k_wmi_cmd_send(ar, skb,
941 				   ar->wmi.cmd->addba_clear_resp_cmdid);
942 }
943 
944 static inline int
945 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
946 		      u32 tid, u32 buf_size)
947 {
948 	struct sk_buff *skb;
949 
950 	if (!ar->wmi.ops->gen_addba_send)
951 		return -EOPNOTSUPP;
952 
953 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
954 	if (IS_ERR(skb))
955 		return PTR_ERR(skb);
956 
957 	return ath10k_wmi_cmd_send(ar, skb,
958 				   ar->wmi.cmd->addba_send_cmdid);
959 }
960 
961 static inline int
962 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
963 			  u32 tid, u32 status)
964 {
965 	struct sk_buff *skb;
966 
967 	if (!ar->wmi.ops->gen_addba_set_resp)
968 		return -EOPNOTSUPP;
969 
970 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
971 	if (IS_ERR(skb))
972 		return PTR_ERR(skb);
973 
974 	return ath10k_wmi_cmd_send(ar, skb,
975 				   ar->wmi.cmd->addba_set_resp_cmdid);
976 }
977 
978 static inline int
979 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
980 		      u32 tid, u32 initiator, u32 reason)
981 {
982 	struct sk_buff *skb;
983 
984 	if (!ar->wmi.ops->gen_delba_send)
985 		return -EOPNOTSUPP;
986 
987 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
988 					  reason);
989 	if (IS_ERR(skb))
990 		return PTR_ERR(skb);
991 
992 	return ath10k_wmi_cmd_send(ar, skb,
993 				   ar->wmi.cmd->delba_send_cmdid);
994 }
995 
996 static inline int
997 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
998 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
999 		    void *prb_ies, size_t prb_ies_len)
1000 {
1001 	struct sk_buff *skb;
1002 
1003 	if (!ar->wmi.ops->gen_bcn_tmpl)
1004 		return -EOPNOTSUPP;
1005 
1006 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1007 					prb_caps, prb_erp, prb_ies,
1008 					prb_ies_len);
1009 	if (IS_ERR(skb))
1010 		return PTR_ERR(skb);
1011 
1012 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1013 }
1014 
1015 static inline int
1016 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1017 {
1018 	struct sk_buff *skb;
1019 
1020 	if (!ar->wmi.ops->gen_prb_tmpl)
1021 		return -EOPNOTSUPP;
1022 
1023 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1024 	if (IS_ERR(skb))
1025 		return PTR_ERR(skb);
1026 
1027 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1028 }
1029 
1030 static inline int
1031 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1032 {
1033 	struct sk_buff *skb;
1034 
1035 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1036 		return -EOPNOTSUPP;
1037 
1038 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1039 	if (IS_ERR(skb))
1040 		return PTR_ERR(skb);
1041 
1042 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1043 }
1044 
1045 static inline int
1046 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1047 			 const struct wmi_sta_keepalive_arg *arg)
1048 {
1049 	struct sk_buff *skb;
1050 	u32 cmd_id;
1051 
1052 	if (!ar->wmi.ops->gen_sta_keepalive)
1053 		return -EOPNOTSUPP;
1054 
1055 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1056 	if (IS_ERR(skb))
1057 		return PTR_ERR(skb);
1058 
1059 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1060 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1061 }
1062 
1063 #endif
1064