xref: /linux/drivers/net/wireless/ath/ath10k/wmi-ops.h (revision 95298d63c67673c654c08952672d016212b26054)
1 /* SPDX-License-Identifier: ISC */
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _WMI_OPS_H_
9 #define _WMI_OPS_H_
10 
11 struct ath10k;
12 struct sk_buff;
13 
14 struct wmi_ops {
15 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
16 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
17 	void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
18 
19 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
20 			 struct wmi_scan_ev_arg *arg);
21 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
22 			    struct wmi_mgmt_rx_ev_arg *arg);
23 	int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
24 				  struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
25 	int (*pull_mgmt_tx_bundle_compl)(
26 				struct ath10k *ar, struct sk_buff *skb,
27 				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
28 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
29 			    struct wmi_ch_info_ev_arg *arg);
30 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
31 			       struct wmi_vdev_start_ev_arg *arg);
32 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
33 			      struct wmi_peer_kick_ev_arg *arg);
34 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
35 			 struct wmi_swba_ev_arg *arg);
36 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
37 			       struct wmi_phyerr_hdr_arg *arg);
38 	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
39 			   int left_len, struct wmi_phyerr_ev_arg *arg);
40 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
41 			    struct wmi_svc_rdy_ev_arg *arg);
42 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 			struct wmi_rdy_ev_arg *arg);
44 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
45 			     struct ath10k_fw_stats *stats);
46 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
47 			    struct wmi_roam_ev_arg *arg);
48 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
49 			      struct wmi_wow_ev_arg *arg);
50 	int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
51 			    struct wmi_echo_ev_arg *arg);
52 	int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
53 				  struct wmi_dfs_status_ev_arg *arg);
54 	int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
55 			      struct wmi_svc_avail_ev_arg *arg);
56 
57 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
58 
59 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
60 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
61 	struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
62 						     const u8 macaddr[ETH_ALEN]);
63 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
64 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
65 					   enum wmi_dfs_region dfs_reg);
66 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
67 					      u32 value);
68 	struct sk_buff *(*gen_init)(struct ath10k *ar);
69 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
70 					  const struct wmi_start_scan_arg *arg);
71 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
72 					 const struct wmi_stop_scan_arg *arg);
73 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
74 					   enum wmi_vdev_type type,
75 					   enum wmi_vdev_subtype subtype,
76 					   const u8 macaddr[ETH_ALEN]);
77 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
78 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
79 					  const struct wmi_vdev_start_request_arg *arg,
80 					  bool restart);
81 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
82 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
83 				       const u8 *bssid);
84 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
85 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
86 					      u32 param_id, u32 param_value);
87 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
88 						const struct wmi_vdev_install_key_arg *arg);
89 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
90 						  const struct wmi_vdev_spectral_conf_arg *arg);
91 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
92 						    u32 trigger, u32 enable);
93 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
94 					     const struct wmi_wmm_params_all_arg *arg);
95 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
96 					   const u8 peer_addr[ETH_ALEN],
97 					   enum wmi_peer_type peer_type);
98 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
99 					   const u8 peer_addr[ETH_ALEN]);
100 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
101 					  const u8 peer_addr[ETH_ALEN],
102 					  u32 tid_bitmap);
103 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
104 					      const u8 *peer_addr,
105 					      enum wmi_peer_param param_id,
106 					      u32 param_value);
107 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
108 					  const struct wmi_peer_assoc_complete_arg *arg);
109 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
110 					  enum wmi_sta_ps_mode psmode);
111 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
112 					  enum wmi_sta_powersave_param param_id,
113 					  u32 value);
114 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
115 					 const u8 *mac,
116 					 enum wmi_ap_ps_peer_param param_id,
117 					 u32 value);
118 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
119 					      const struct wmi_scan_chan_list_arg *arg);
120 	struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
121 						 u32 prob_req_oui);
122 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
123 					  const void *bcn, size_t bcn_len,
124 					  u32 bcn_paddr, bool dtim_zero,
125 					  bool deliver_cab);
126 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
127 					    const struct wmi_wmm_params_all_arg *arg);
128 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
129 	struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
130 						       u32 vdev_id,
131 						       enum
132 						       wmi_peer_stats_info_request_type
133 						       type,
134 						       u8 *addr,
135 						       u32 reset);
136 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
137 					     enum wmi_force_fw_hang_type type,
138 					     u32 delay_ms);
139 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
140 	struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
141 					    struct sk_buff *skb,
142 					    dma_addr_t paddr);
143 	int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
144 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
145 					  u32 log_level);
146 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
147 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
148 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
149 						   u32 period, u32 duration,
150 						   u32 next_offset,
151 						   u32 enabled);
152 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
153 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
154 						const u8 *mac);
155 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
156 					  const u8 *mac, u32 tid, u32 buf_size);
157 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
158 					      const u8 *mac, u32 tid,
159 					      u32 status);
160 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
161 					  const u8 *mac, u32 tid, u32 initiator,
162 					  u32 reason);
163 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
164 					u32 tim_ie_offset, struct sk_buff *bcn,
165 					u32 prb_caps, u32 prb_erp,
166 					void *prb_ies, size_t prb_ies_len);
167 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
168 					struct sk_buff *bcn);
169 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
170 					     const u8 *p2p_ie);
171 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
172 					      const u8 peer_addr[ETH_ALEN],
173 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
174 					      u32 num_ac);
175 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
176 					     const struct wmi_sta_keepalive_arg *arg);
177 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
178 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
179 						    enum wmi_wow_wakeup_event event,
180 						    u32 enable);
181 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
182 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
183 					       u32 pattern_id,
184 					       const u8 *pattern,
185 					       const u8 *mask,
186 					       int pattern_len,
187 					       int pattern_offset);
188 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
189 					       u32 pattern_id);
190 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
191 						    u32 vdev_id,
192 						    enum wmi_tdls_state state);
193 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
194 						const struct wmi_tdls_peer_update_cmd_arg *arg,
195 						const struct wmi_tdls_peer_capab_arg *cap,
196 						const struct wmi_channel_arg *chan);
197 	struct sk_buff *(*gen_radar_found)
198 			(struct ath10k *ar,
199 			 const struct ath10k_radar_found_info *arg);
200 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
201 	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
202 						   u32 param);
203 	void (*fw_stats_fill)(struct ath10k *ar,
204 			      struct ath10k_fw_stats *fw_stats,
205 			      char *buf);
206 	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
207 							u8 enable,
208 							u32 detect_level,
209 							u32 detect_margin);
210 	struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
211 					       enum wmi_host_platform_type type,
212 					       u32 fw_feature_bitmap);
213 	int (*get_vdev_subtype)(struct ath10k *ar,
214 				enum wmi_vdev_subtype subtype);
215 	struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
216 					      u32 vdev_id,
217 					      struct wmi_pno_scan_req *pno_scan);
218 	struct sk_buff *(*gen_pdev_bss_chan_info_req)
219 					(struct ath10k *ar,
220 					 enum wmi_bss_survey_req_type type);
221 	struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
222 	struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
223 							u32 param);
224 	struct sk_buff *(*gen_bb_timing)
225 			(struct ath10k *ar,
226 			 const struct wmi_bb_timing_cfg_arg *arg);
227 
228 };
229 
230 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
231 
232 static inline int
233 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
234 {
235 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
236 		return -EOPNOTSUPP;
237 
238 	ar->wmi.ops->rx(ar, skb);
239 	return 0;
240 }
241 
242 static inline int
243 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
244 		   size_t len)
245 {
246 	if (!ar->wmi.ops->map_svc)
247 		return -EOPNOTSUPP;
248 
249 	ar->wmi.ops->map_svc(in, out, len);
250 	return 0;
251 }
252 
253 static inline int
254 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
255 		       size_t len)
256 {
257 	if (!ar->wmi.ops->map_svc_ext)
258 		return -EOPNOTSUPP;
259 
260 	ar->wmi.ops->map_svc_ext(in, out, len);
261 	return 0;
262 }
263 
264 static inline int
265 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
266 		     struct wmi_scan_ev_arg *arg)
267 {
268 	if (!ar->wmi.ops->pull_scan)
269 		return -EOPNOTSUPP;
270 
271 	return ar->wmi.ops->pull_scan(ar, skb, arg);
272 }
273 
274 static inline int
275 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
276 			      struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
277 {
278 	if (!ar->wmi.ops->pull_mgmt_tx_compl)
279 		return -EOPNOTSUPP;
280 
281 	return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
282 }
283 
284 static inline int
285 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
286 				     struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
287 {
288 	if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
289 		return -EOPNOTSUPP;
290 
291 	return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
292 }
293 
294 static inline int
295 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
296 			struct wmi_mgmt_rx_ev_arg *arg)
297 {
298 	if (!ar->wmi.ops->pull_mgmt_rx)
299 		return -EOPNOTSUPP;
300 
301 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
302 }
303 
304 static inline int
305 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
306 			struct wmi_ch_info_ev_arg *arg)
307 {
308 	if (!ar->wmi.ops->pull_ch_info)
309 		return -EOPNOTSUPP;
310 
311 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
312 }
313 
314 static inline int
315 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
316 			   struct wmi_vdev_start_ev_arg *arg)
317 {
318 	if (!ar->wmi.ops->pull_vdev_start)
319 		return -EOPNOTSUPP;
320 
321 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
322 }
323 
324 static inline int
325 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
326 			  struct wmi_peer_kick_ev_arg *arg)
327 {
328 	if (!ar->wmi.ops->pull_peer_kick)
329 		return -EOPNOTSUPP;
330 
331 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
332 }
333 
334 static inline int
335 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
336 		     struct wmi_swba_ev_arg *arg)
337 {
338 	if (!ar->wmi.ops->pull_swba)
339 		return -EOPNOTSUPP;
340 
341 	return ar->wmi.ops->pull_swba(ar, skb, arg);
342 }
343 
344 static inline int
345 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
346 			   struct wmi_phyerr_hdr_arg *arg)
347 {
348 	if (!ar->wmi.ops->pull_phyerr_hdr)
349 		return -EOPNOTSUPP;
350 
351 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
352 }
353 
354 static inline int
355 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
356 		       int left_len, struct wmi_phyerr_ev_arg *arg)
357 {
358 	if (!ar->wmi.ops->pull_phyerr)
359 		return -EOPNOTSUPP;
360 
361 	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
362 }
363 
364 static inline int
365 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
366 			struct wmi_svc_rdy_ev_arg *arg)
367 {
368 	if (!ar->wmi.ops->pull_svc_rdy)
369 		return -EOPNOTSUPP;
370 
371 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
372 }
373 
374 static inline int
375 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
376 		    struct wmi_rdy_ev_arg *arg)
377 {
378 	if (!ar->wmi.ops->pull_rdy)
379 		return -EOPNOTSUPP;
380 
381 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
382 }
383 
384 static inline int
385 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
386 			  struct wmi_svc_avail_ev_arg *arg)
387 {
388 	if (!ar->wmi.ops->pull_svc_avail)
389 		return -EOPNOTSUPP;
390 	return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
391 }
392 
393 static inline int
394 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
395 			 struct ath10k_fw_stats *stats)
396 {
397 	if (!ar->wmi.ops->pull_fw_stats)
398 		return -EOPNOTSUPP;
399 
400 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
401 }
402 
403 static inline int
404 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
405 			struct wmi_roam_ev_arg *arg)
406 {
407 	if (!ar->wmi.ops->pull_roam_ev)
408 		return -EOPNOTSUPP;
409 
410 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
411 }
412 
413 static inline int
414 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
415 			  struct wmi_wow_ev_arg *arg)
416 {
417 	if (!ar->wmi.ops->pull_wow_event)
418 		return -EOPNOTSUPP;
419 
420 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
421 }
422 
423 static inline int
424 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
425 			struct wmi_echo_ev_arg *arg)
426 {
427 	if (!ar->wmi.ops->pull_echo_ev)
428 		return -EOPNOTSUPP;
429 
430 	return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
431 }
432 
433 static inline int
434 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
435 			   struct wmi_dfs_status_ev_arg *arg)
436 {
437 	if (!ar->wmi.ops->pull_dfs_status_ev)
438 		return -EOPNOTSUPP;
439 
440 	return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
441 }
442 
443 static inline enum wmi_txbf_conf
444 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
445 {
446 	if (!ar->wmi.ops->get_txbf_conf_scheme)
447 		return WMI_TXBF_CONF_UNSUPPORTED;
448 
449 	return ar->wmi.ops->get_txbf_conf_scheme(ar);
450 }
451 
452 static inline int
453 ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
454 {
455 	if (!ar->wmi.ops->cleanup_mgmt_tx_send)
456 		return -EOPNOTSUPP;
457 
458 	return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
459 }
460 
461 static inline int
462 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
463 			dma_addr_t paddr)
464 {
465 	struct sk_buff *skb;
466 	int ret;
467 
468 	if (!ar->wmi.ops->gen_mgmt_tx_send)
469 		return -EOPNOTSUPP;
470 
471 	skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
472 	if (IS_ERR(skb))
473 		return PTR_ERR(skb);
474 
475 	ret = ath10k_wmi_cmd_send(ar, skb,
476 				  ar->wmi.cmd->mgmt_tx_send_cmdid);
477 	if (ret)
478 		return ret;
479 
480 	return 0;
481 }
482 
483 static inline int
484 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
485 {
486 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
487 	struct sk_buff *skb;
488 	int ret;
489 
490 	if (!ar->wmi.ops->gen_mgmt_tx)
491 		return -EOPNOTSUPP;
492 
493 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
494 	if (IS_ERR(skb))
495 		return PTR_ERR(skb);
496 
497 	ret = ath10k_wmi_cmd_send(ar, skb,
498 				  ar->wmi.cmd->mgmt_tx_cmdid);
499 	if (ret)
500 		return ret;
501 
502 	/* FIXME There's no ACK event for Management Tx. This probably
503 	 * shouldn't be called here either.
504 	 */
505 	info->flags |= IEEE80211_TX_STAT_ACK;
506 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
507 
508 	return 0;
509 }
510 
511 static inline int
512 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
513 			      u16 ctl2g, u16 ctl5g,
514 			      enum wmi_dfs_region dfs_reg)
515 {
516 	struct sk_buff *skb;
517 
518 	if (!ar->wmi.ops->gen_pdev_set_rd)
519 		return -EOPNOTSUPP;
520 
521 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
522 					   dfs_reg);
523 	if (IS_ERR(skb))
524 		return PTR_ERR(skb);
525 
526 	return ath10k_wmi_cmd_send(ar, skb,
527 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
528 }
529 
530 static inline int
531 ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
532 {
533 	struct sk_buff *skb;
534 
535 	if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
536 		return -EOPNOTSUPP;
537 
538 	skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
539 	if (IS_ERR(skb))
540 		return PTR_ERR(skb);
541 
542 	return ath10k_wmi_cmd_send(ar, skb,
543 				   ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
544 }
545 
546 static inline int
547 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
548 {
549 	struct sk_buff *skb;
550 
551 	if (!ar->wmi.ops->gen_pdev_suspend)
552 		return -EOPNOTSUPP;
553 
554 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
555 	if (IS_ERR(skb))
556 		return PTR_ERR(skb);
557 
558 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
559 }
560 
561 static inline int
562 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
563 {
564 	struct sk_buff *skb;
565 
566 	if (!ar->wmi.ops->gen_pdev_resume)
567 		return -EOPNOTSUPP;
568 
569 	skb = ar->wmi.ops->gen_pdev_resume(ar);
570 	if (IS_ERR(skb))
571 		return PTR_ERR(skb);
572 
573 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
574 }
575 
576 static inline int
577 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
578 {
579 	struct sk_buff *skb;
580 
581 	if (!ar->wmi.ops->gen_pdev_set_param)
582 		return -EOPNOTSUPP;
583 
584 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
585 	if (IS_ERR(skb))
586 		return PTR_ERR(skb);
587 
588 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
589 }
590 
591 static inline int
592 ath10k_wmi_cmd_init(struct ath10k *ar)
593 {
594 	struct sk_buff *skb;
595 
596 	if (!ar->wmi.ops->gen_init)
597 		return -EOPNOTSUPP;
598 
599 	skb = ar->wmi.ops->gen_init(ar);
600 	if (IS_ERR(skb))
601 		return PTR_ERR(skb);
602 
603 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
604 }
605 
606 static inline int
607 ath10k_wmi_start_scan(struct ath10k *ar,
608 		      const struct wmi_start_scan_arg *arg)
609 {
610 	struct sk_buff *skb;
611 
612 	if (!ar->wmi.ops->gen_start_scan)
613 		return -EOPNOTSUPP;
614 
615 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
616 	if (IS_ERR(skb))
617 		return PTR_ERR(skb);
618 
619 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
620 }
621 
622 static inline int
623 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
624 {
625 	struct sk_buff *skb;
626 
627 	if (!ar->wmi.ops->gen_stop_scan)
628 		return -EOPNOTSUPP;
629 
630 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
631 	if (IS_ERR(skb))
632 		return PTR_ERR(skb);
633 
634 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
635 }
636 
637 static inline int
638 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
639 		       enum wmi_vdev_type type,
640 		       enum wmi_vdev_subtype subtype,
641 		       const u8 macaddr[ETH_ALEN])
642 {
643 	struct sk_buff *skb;
644 
645 	if (!ar->wmi.ops->gen_vdev_create)
646 		return -EOPNOTSUPP;
647 
648 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
649 	if (IS_ERR(skb))
650 		return PTR_ERR(skb);
651 
652 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
653 }
654 
655 static inline int
656 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
657 {
658 	struct sk_buff *skb;
659 
660 	if (!ar->wmi.ops->gen_vdev_delete)
661 		return -EOPNOTSUPP;
662 
663 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
664 	if (IS_ERR(skb))
665 		return PTR_ERR(skb);
666 
667 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
668 }
669 
670 static inline int
671 ath10k_wmi_vdev_start(struct ath10k *ar,
672 		      const struct wmi_vdev_start_request_arg *arg)
673 {
674 	struct sk_buff *skb;
675 
676 	if (!ar->wmi.ops->gen_vdev_start)
677 		return -EOPNOTSUPP;
678 
679 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
680 	if (IS_ERR(skb))
681 		return PTR_ERR(skb);
682 
683 	return ath10k_wmi_cmd_send(ar, skb,
684 				   ar->wmi.cmd->vdev_start_request_cmdid);
685 }
686 
687 static inline int
688 ath10k_wmi_vdev_restart(struct ath10k *ar,
689 			const struct wmi_vdev_start_request_arg *arg)
690 {
691 	struct sk_buff *skb;
692 
693 	if (!ar->wmi.ops->gen_vdev_start)
694 		return -EOPNOTSUPP;
695 
696 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
697 	if (IS_ERR(skb))
698 		return PTR_ERR(skb);
699 
700 	return ath10k_wmi_cmd_send(ar, skb,
701 				   ar->wmi.cmd->vdev_restart_request_cmdid);
702 }
703 
704 static inline int
705 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
706 {
707 	struct sk_buff *skb;
708 
709 	if (!ar->wmi.ops->gen_vdev_stop)
710 		return -EOPNOTSUPP;
711 
712 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
713 	if (IS_ERR(skb))
714 		return PTR_ERR(skb);
715 
716 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
717 }
718 
719 static inline int
720 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
721 {
722 	struct sk_buff *skb;
723 
724 	if (!ar->wmi.ops->gen_vdev_up)
725 		return -EOPNOTSUPP;
726 
727 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
728 	if (IS_ERR(skb))
729 		return PTR_ERR(skb);
730 
731 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
732 }
733 
734 static inline int
735 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
736 {
737 	struct sk_buff *skb;
738 
739 	if (!ar->wmi.ops->gen_vdev_down)
740 		return -EOPNOTSUPP;
741 
742 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
743 	if (IS_ERR(skb))
744 		return PTR_ERR(skb);
745 
746 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
747 }
748 
749 static inline int
750 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
751 			  u32 param_value)
752 {
753 	struct sk_buff *skb;
754 
755 	if (!ar->wmi.ops->gen_vdev_set_param)
756 		return -EOPNOTSUPP;
757 
758 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
759 					      param_value);
760 	if (IS_ERR(skb))
761 		return PTR_ERR(skb);
762 
763 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
764 }
765 
766 static inline int
767 ath10k_wmi_vdev_install_key(struct ath10k *ar,
768 			    const struct wmi_vdev_install_key_arg *arg)
769 {
770 	struct sk_buff *skb;
771 
772 	if (!ar->wmi.ops->gen_vdev_install_key)
773 		return -EOPNOTSUPP;
774 
775 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
776 	if (IS_ERR(skb))
777 		return PTR_ERR(skb);
778 
779 	return ath10k_wmi_cmd_send(ar, skb,
780 				   ar->wmi.cmd->vdev_install_key_cmdid);
781 }
782 
783 static inline int
784 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
785 			      const struct wmi_vdev_spectral_conf_arg *arg)
786 {
787 	struct sk_buff *skb;
788 	u32 cmd_id;
789 
790 	if (!ar->wmi.ops->gen_vdev_spectral_conf)
791 		return -EOPNOTSUPP;
792 
793 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
794 	if (IS_ERR(skb))
795 		return PTR_ERR(skb);
796 
797 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
798 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
799 }
800 
801 static inline int
802 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
803 				u32 enable)
804 {
805 	struct sk_buff *skb;
806 	u32 cmd_id;
807 
808 	if (!ar->wmi.ops->gen_vdev_spectral_enable)
809 		return -EOPNOTSUPP;
810 
811 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
812 						    enable);
813 	if (IS_ERR(skb))
814 		return PTR_ERR(skb);
815 
816 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
817 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
818 }
819 
820 static inline int
821 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
822 			  const u8 peer_addr[ETH_ALEN],
823 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
824 			  u32 num_ac)
825 {
826 	struct sk_buff *skb;
827 	u32 cmd_id;
828 
829 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
830 		return -EOPNOTSUPP;
831 
832 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
833 					      num_ac);
834 	if (IS_ERR(skb))
835 		return PTR_ERR(skb);
836 
837 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
838 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
839 }
840 
841 static inline int
842 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
843 			 const struct wmi_wmm_params_all_arg *arg)
844 {
845 	struct sk_buff *skb;
846 	u32 cmd_id;
847 
848 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
849 	if (IS_ERR(skb))
850 		return PTR_ERR(skb);
851 
852 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
853 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
854 }
855 
856 static inline int
857 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
858 		       const u8 peer_addr[ETH_ALEN],
859 		       enum wmi_peer_type peer_type)
860 {
861 	struct sk_buff *skb;
862 
863 	if (!ar->wmi.ops->gen_peer_create)
864 		return -EOPNOTSUPP;
865 
866 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
867 	if (IS_ERR(skb))
868 		return PTR_ERR(skb);
869 
870 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
871 }
872 
873 static inline int
874 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
875 		       const u8 peer_addr[ETH_ALEN])
876 {
877 	struct sk_buff *skb;
878 
879 	if (!ar->wmi.ops->gen_peer_delete)
880 		return -EOPNOTSUPP;
881 
882 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
883 	if (IS_ERR(skb))
884 		return PTR_ERR(skb);
885 
886 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
887 }
888 
889 static inline int
890 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
891 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
892 {
893 	struct sk_buff *skb;
894 
895 	if (!ar->wmi.ops->gen_peer_flush)
896 		return -EOPNOTSUPP;
897 
898 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
899 	if (IS_ERR(skb))
900 		return PTR_ERR(skb);
901 
902 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
903 }
904 
905 static inline int
906 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
907 			  enum wmi_peer_param param_id, u32 param_value)
908 {
909 	struct sk_buff *skb;
910 
911 	if (!ar->wmi.ops->gen_peer_set_param)
912 		return -EOPNOTSUPP;
913 
914 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
915 					      param_value);
916 	if (IS_ERR(skb))
917 		return PTR_ERR(skb);
918 
919 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
920 }
921 
922 static inline int
923 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
924 		      enum wmi_sta_ps_mode psmode)
925 {
926 	struct sk_buff *skb;
927 
928 	if (!ar->wmi.ops->gen_set_psmode)
929 		return -EOPNOTSUPP;
930 
931 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
932 	if (IS_ERR(skb))
933 		return PTR_ERR(skb);
934 
935 	return ath10k_wmi_cmd_send(ar, skb,
936 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
937 }
938 
939 static inline int
940 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
941 			    enum wmi_sta_powersave_param param_id, u32 value)
942 {
943 	struct sk_buff *skb;
944 
945 	if (!ar->wmi.ops->gen_set_sta_ps)
946 		return -EOPNOTSUPP;
947 
948 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
949 	if (IS_ERR(skb))
950 		return PTR_ERR(skb);
951 
952 	return ath10k_wmi_cmd_send(ar, skb,
953 				   ar->wmi.cmd->sta_powersave_param_cmdid);
954 }
955 
956 static inline int
957 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
958 			   enum wmi_ap_ps_peer_param param_id, u32 value)
959 {
960 	struct sk_buff *skb;
961 
962 	if (!ar->wmi.ops->gen_set_ap_ps)
963 		return -EOPNOTSUPP;
964 
965 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
966 	if (IS_ERR(skb))
967 		return PTR_ERR(skb);
968 
969 	return ath10k_wmi_cmd_send(ar, skb,
970 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
971 }
972 
973 static inline int
974 ath10k_wmi_scan_chan_list(struct ath10k *ar,
975 			  const struct wmi_scan_chan_list_arg *arg)
976 {
977 	struct sk_buff *skb;
978 
979 	if (!ar->wmi.ops->gen_scan_chan_list)
980 		return -EOPNOTSUPP;
981 
982 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
983 	if (IS_ERR(skb))
984 		return PTR_ERR(skb);
985 
986 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
987 }
988 
989 static inline int
990 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
991 {
992 	struct sk_buff *skb;
993 	u32 prob_req_oui;
994 
995 	prob_req_oui = (((u32)mac_addr[0]) << 16) |
996 		       (((u32)mac_addr[1]) << 8) | mac_addr[2];
997 
998 	if (!ar->wmi.ops->gen_scan_prob_req_oui)
999 		return -EOPNOTSUPP;
1000 
1001 	skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
1002 	if (IS_ERR(skb))
1003 		return PTR_ERR(skb);
1004 
1005 	return ath10k_wmi_cmd_send(ar, skb,
1006 			ar->wmi.cmd->scan_prob_req_oui_cmdid);
1007 }
1008 
1009 static inline int
1010 ath10k_wmi_peer_assoc(struct ath10k *ar,
1011 		      const struct wmi_peer_assoc_complete_arg *arg)
1012 {
1013 	struct sk_buff *skb;
1014 
1015 	if (!ar->wmi.ops->gen_peer_assoc)
1016 		return -EOPNOTSUPP;
1017 
1018 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
1019 	if (IS_ERR(skb))
1020 		return PTR_ERR(skb);
1021 
1022 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
1023 }
1024 
1025 static inline int
1026 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
1027 				  const void *bcn, size_t bcn_len,
1028 				  u32 bcn_paddr, bool dtim_zero,
1029 				  bool deliver_cab)
1030 {
1031 	struct sk_buff *skb;
1032 	int ret;
1033 
1034 	if (!ar->wmi.ops->gen_beacon_dma)
1035 		return -EOPNOTSUPP;
1036 
1037 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1038 					  dtim_zero, deliver_cab);
1039 	if (IS_ERR(skb))
1040 		return PTR_ERR(skb);
1041 
1042 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1043 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
1044 	if (ret) {
1045 		dev_kfree_skb(skb);
1046 		return ret;
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 static inline int
1053 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1054 			       const struct wmi_wmm_params_all_arg *arg)
1055 {
1056 	struct sk_buff *skb;
1057 
1058 	if (!ar->wmi.ops->gen_pdev_set_wmm)
1059 		return -EOPNOTSUPP;
1060 
1061 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1062 	if (IS_ERR(skb))
1063 		return PTR_ERR(skb);
1064 
1065 	return ath10k_wmi_cmd_send(ar, skb,
1066 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1067 }
1068 
1069 static inline int
1070 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1071 {
1072 	struct sk_buff *skb;
1073 
1074 	if (!ar->wmi.ops->gen_request_stats)
1075 		return -EOPNOTSUPP;
1076 
1077 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1078 	if (IS_ERR(skb))
1079 		return PTR_ERR(skb);
1080 
1081 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1082 }
1083 
1084 static inline int
1085 ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
1086 				   u32 vdev_id,
1087 				   enum wmi_peer_stats_info_request_type type,
1088 				   u8 *addr,
1089 				   u32 reset)
1090 {
1091 	struct sk_buff *skb;
1092 
1093 	if (!ar->wmi.ops->gen_request_peer_stats_info)
1094 		return -EOPNOTSUPP;
1095 
1096 	skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
1097 						       vdev_id,
1098 						       type,
1099 						       addr,
1100 						       reset);
1101 	if (IS_ERR(skb))
1102 		return PTR_ERR(skb);
1103 
1104 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
1105 }
1106 
1107 static inline int
1108 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1109 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
1110 {
1111 	struct sk_buff *skb;
1112 
1113 	if (!ar->wmi.ops->gen_force_fw_hang)
1114 		return -EOPNOTSUPP;
1115 
1116 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1117 	if (IS_ERR(skb))
1118 		return PTR_ERR(skb);
1119 
1120 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1121 }
1122 
1123 static inline int
1124 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1125 {
1126 	struct sk_buff *skb;
1127 
1128 	if (!ar->wmi.ops->gen_dbglog_cfg)
1129 		return -EOPNOTSUPP;
1130 
1131 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1132 	if (IS_ERR(skb))
1133 		return PTR_ERR(skb);
1134 
1135 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1136 }
1137 
1138 static inline int
1139 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1140 {
1141 	struct sk_buff *skb;
1142 
1143 	if (!ar->wmi.ops->gen_pktlog_enable)
1144 		return -EOPNOTSUPP;
1145 
1146 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1147 	if (IS_ERR(skb))
1148 		return PTR_ERR(skb);
1149 
1150 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1151 }
1152 
1153 static inline int
1154 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1155 {
1156 	struct sk_buff *skb;
1157 
1158 	if (!ar->wmi.ops->gen_pktlog_disable)
1159 		return -EOPNOTSUPP;
1160 
1161 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
1162 	if (IS_ERR(skb))
1163 		return PTR_ERR(skb);
1164 
1165 	return ath10k_wmi_cmd_send(ar, skb,
1166 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1167 }
1168 
1169 static inline int
1170 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1171 			       u32 next_offset, u32 enabled)
1172 {
1173 	struct sk_buff *skb;
1174 
1175 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1176 		return -EOPNOTSUPP;
1177 
1178 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1179 						   next_offset, enabled);
1180 	if (IS_ERR(skb))
1181 		return PTR_ERR(skb);
1182 
1183 	return ath10k_wmi_cmd_send(ar, skb,
1184 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1185 }
1186 
1187 static inline int
1188 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1189 {
1190 	struct sk_buff *skb;
1191 
1192 	if (!ar->wmi.ops->gen_pdev_get_temperature)
1193 		return -EOPNOTSUPP;
1194 
1195 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1196 	if (IS_ERR(skb))
1197 		return PTR_ERR(skb);
1198 
1199 	return ath10k_wmi_cmd_send(ar, skb,
1200 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
1201 }
1202 
1203 static inline int
1204 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1205 {
1206 	struct sk_buff *skb;
1207 
1208 	if (!ar->wmi.ops->gen_addba_clear_resp)
1209 		return -EOPNOTSUPP;
1210 
1211 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1212 	if (IS_ERR(skb))
1213 		return PTR_ERR(skb);
1214 
1215 	return ath10k_wmi_cmd_send(ar, skb,
1216 				   ar->wmi.cmd->addba_clear_resp_cmdid);
1217 }
1218 
1219 static inline int
1220 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1221 		      u32 tid, u32 buf_size)
1222 {
1223 	struct sk_buff *skb;
1224 
1225 	if (!ar->wmi.ops->gen_addba_send)
1226 		return -EOPNOTSUPP;
1227 
1228 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1229 	if (IS_ERR(skb))
1230 		return PTR_ERR(skb);
1231 
1232 	return ath10k_wmi_cmd_send(ar, skb,
1233 				   ar->wmi.cmd->addba_send_cmdid);
1234 }
1235 
1236 static inline int
1237 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1238 			  u32 tid, u32 status)
1239 {
1240 	struct sk_buff *skb;
1241 
1242 	if (!ar->wmi.ops->gen_addba_set_resp)
1243 		return -EOPNOTSUPP;
1244 
1245 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1246 	if (IS_ERR(skb))
1247 		return PTR_ERR(skb);
1248 
1249 	return ath10k_wmi_cmd_send(ar, skb,
1250 				   ar->wmi.cmd->addba_set_resp_cmdid);
1251 }
1252 
1253 static inline int
1254 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1255 		      u32 tid, u32 initiator, u32 reason)
1256 {
1257 	struct sk_buff *skb;
1258 
1259 	if (!ar->wmi.ops->gen_delba_send)
1260 		return -EOPNOTSUPP;
1261 
1262 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1263 					  reason);
1264 	if (IS_ERR(skb))
1265 		return PTR_ERR(skb);
1266 
1267 	return ath10k_wmi_cmd_send(ar, skb,
1268 				   ar->wmi.cmd->delba_send_cmdid);
1269 }
1270 
1271 static inline int
1272 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1273 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1274 		    void *prb_ies, size_t prb_ies_len)
1275 {
1276 	struct sk_buff *skb;
1277 
1278 	if (!ar->wmi.ops->gen_bcn_tmpl)
1279 		return -EOPNOTSUPP;
1280 
1281 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1282 					prb_caps, prb_erp, prb_ies,
1283 					prb_ies_len);
1284 	if (IS_ERR(skb))
1285 		return PTR_ERR(skb);
1286 
1287 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1288 }
1289 
1290 static inline int
1291 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1292 {
1293 	struct sk_buff *skb;
1294 
1295 	if (!ar->wmi.ops->gen_prb_tmpl)
1296 		return -EOPNOTSUPP;
1297 
1298 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1299 	if (IS_ERR(skb))
1300 		return PTR_ERR(skb);
1301 
1302 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1303 }
1304 
1305 static inline int
1306 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1307 {
1308 	struct sk_buff *skb;
1309 
1310 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1311 		return -EOPNOTSUPP;
1312 
1313 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1314 	if (IS_ERR(skb))
1315 		return PTR_ERR(skb);
1316 
1317 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1318 }
1319 
1320 static inline int
1321 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1322 			 const struct wmi_sta_keepalive_arg *arg)
1323 {
1324 	struct sk_buff *skb;
1325 	u32 cmd_id;
1326 
1327 	if (!ar->wmi.ops->gen_sta_keepalive)
1328 		return -EOPNOTSUPP;
1329 
1330 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1331 	if (IS_ERR(skb))
1332 		return PTR_ERR(skb);
1333 
1334 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1335 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1336 }
1337 
1338 static inline int
1339 ath10k_wmi_wow_enable(struct ath10k *ar)
1340 {
1341 	struct sk_buff *skb;
1342 	u32 cmd_id;
1343 
1344 	if (!ar->wmi.ops->gen_wow_enable)
1345 		return -EOPNOTSUPP;
1346 
1347 	skb = ar->wmi.ops->gen_wow_enable(ar);
1348 	if (IS_ERR(skb))
1349 		return PTR_ERR(skb);
1350 
1351 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1352 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1353 }
1354 
1355 static inline int
1356 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1357 				enum wmi_wow_wakeup_event event,
1358 				u32 enable)
1359 {
1360 	struct sk_buff *skb;
1361 	u32 cmd_id;
1362 
1363 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1364 		return -EOPNOTSUPP;
1365 
1366 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1367 	if (IS_ERR(skb))
1368 		return PTR_ERR(skb);
1369 
1370 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1371 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1372 }
1373 
1374 static inline int
1375 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1376 {
1377 	struct sk_buff *skb;
1378 	u32 cmd_id;
1379 
1380 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1381 		return -EOPNOTSUPP;
1382 
1383 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1384 	if (IS_ERR(skb))
1385 		return PTR_ERR(skb);
1386 
1387 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1388 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1389 }
1390 
1391 static inline int
1392 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1393 			   const u8 *pattern, const u8 *mask,
1394 			   int pattern_len, int pattern_offset)
1395 {
1396 	struct sk_buff *skb;
1397 	u32 cmd_id;
1398 
1399 	if (!ar->wmi.ops->gen_wow_add_pattern)
1400 		return -EOPNOTSUPP;
1401 
1402 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1403 					       pattern, mask, pattern_len,
1404 					       pattern_offset);
1405 	if (IS_ERR(skb))
1406 		return PTR_ERR(skb);
1407 
1408 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1409 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1410 }
1411 
1412 static inline int
1413 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1414 {
1415 	struct sk_buff *skb;
1416 	u32 cmd_id;
1417 
1418 	if (!ar->wmi.ops->gen_wow_del_pattern)
1419 		return -EOPNOTSUPP;
1420 
1421 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1422 	if (IS_ERR(skb))
1423 		return PTR_ERR(skb);
1424 
1425 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1426 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1427 }
1428 
1429 static inline int
1430 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1431 			  struct wmi_pno_scan_req  *pno_scan)
1432 {
1433 	struct sk_buff *skb;
1434 	u32 cmd_id;
1435 
1436 	if (!ar->wmi.ops->gen_wow_config_pno)
1437 		return -EOPNOTSUPP;
1438 
1439 	skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1440 	if (IS_ERR(skb))
1441 		return PTR_ERR(skb);
1442 
1443 	cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1444 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1445 }
1446 
1447 static inline int
1448 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1449 				enum wmi_tdls_state state)
1450 {
1451 	struct sk_buff *skb;
1452 
1453 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1454 		return -EOPNOTSUPP;
1455 
1456 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1457 	if (IS_ERR(skb))
1458 		return PTR_ERR(skb);
1459 
1460 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1461 }
1462 
1463 static inline int
1464 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1465 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1466 			    const struct wmi_tdls_peer_capab_arg *cap,
1467 			    const struct wmi_channel_arg *chan)
1468 {
1469 	struct sk_buff *skb;
1470 
1471 	if (!ar->wmi.ops->gen_tdls_peer_update)
1472 		return -EOPNOTSUPP;
1473 
1474 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1475 	if (IS_ERR(skb))
1476 		return PTR_ERR(skb);
1477 
1478 	return ath10k_wmi_cmd_send(ar, skb,
1479 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1480 }
1481 
1482 static inline int
1483 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1484 {
1485 	struct sk_buff *skb;
1486 
1487 	if (!ar->wmi.ops->gen_adaptive_qcs)
1488 		return -EOPNOTSUPP;
1489 
1490 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1491 	if (IS_ERR(skb))
1492 		return PTR_ERR(skb);
1493 
1494 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1495 }
1496 
1497 static inline int
1498 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1499 {
1500 	struct sk_buff *skb;
1501 
1502 	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1503 		return -EOPNOTSUPP;
1504 
1505 	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1506 
1507 	if (IS_ERR(skb))
1508 		return PTR_ERR(skb);
1509 
1510 	return ath10k_wmi_cmd_send(ar, skb,
1511 				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1512 }
1513 
1514 static inline int
1515 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1516 			 char *buf)
1517 {
1518 	if (!ar->wmi.ops->fw_stats_fill)
1519 		return -EOPNOTSUPP;
1520 
1521 	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1522 	return 0;
1523 }
1524 
1525 static inline int
1526 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1527 				    u32 detect_level, u32 detect_margin)
1528 {
1529 	struct sk_buff *skb;
1530 
1531 	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1532 		return -EOPNOTSUPP;
1533 
1534 	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1535 							detect_level,
1536 							detect_margin);
1537 
1538 	if (IS_ERR(skb))
1539 		return PTR_ERR(skb);
1540 
1541 	return ath10k_wmi_cmd_send(ar, skb,
1542 				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1543 }
1544 
1545 static inline int
1546 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1547 			       enum wmi_host_platform_type type,
1548 			       u32 fw_feature_bitmap)
1549 {
1550 	struct sk_buff *skb;
1551 
1552 	if (!ar->wmi.ops->ext_resource_config)
1553 		return -EOPNOTSUPP;
1554 
1555 	skb = ar->wmi.ops->ext_resource_config(ar, type,
1556 					       fw_feature_bitmap);
1557 
1558 	if (IS_ERR(skb))
1559 		return PTR_ERR(skb);
1560 
1561 	return ath10k_wmi_cmd_send(ar, skb,
1562 				   ar->wmi.cmd->ext_resource_cfg_cmdid);
1563 }
1564 
1565 static inline int
1566 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1567 {
1568 	if (!ar->wmi.ops->get_vdev_subtype)
1569 		return -EOPNOTSUPP;
1570 
1571 	return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1572 }
1573 
1574 static inline int
1575 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1576 				      enum wmi_bss_survey_req_type type)
1577 {
1578 	struct ath10k_wmi *wmi = &ar->wmi;
1579 	struct sk_buff *skb;
1580 
1581 	if (!wmi->ops->gen_pdev_bss_chan_info_req)
1582 		return -EOPNOTSUPP;
1583 
1584 	skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1585 	if (IS_ERR(skb))
1586 		return PTR_ERR(skb);
1587 
1588 	return ath10k_wmi_cmd_send(ar, skb,
1589 				   wmi->cmd->pdev_bss_chan_info_request_cmdid);
1590 }
1591 
1592 static inline int
1593 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1594 {
1595 	struct ath10k_wmi *wmi = &ar->wmi;
1596 	struct sk_buff *skb;
1597 
1598 	if (!wmi->ops->gen_echo)
1599 		return -EOPNOTSUPP;
1600 
1601 	skb = wmi->ops->gen_echo(ar, value);
1602 	if (IS_ERR(skb))
1603 		return PTR_ERR(skb);
1604 
1605 	return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1606 }
1607 
1608 static inline int
1609 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1610 {
1611 	struct sk_buff *skb;
1612 
1613 	if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1614 		return -EOPNOTSUPP;
1615 
1616 	skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1617 
1618 	if (IS_ERR(skb))
1619 		return PTR_ERR(skb);
1620 
1621 	return ath10k_wmi_cmd_send(ar, skb,
1622 				   ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1623 }
1624 
1625 static inline int
1626 ath10k_wmi_report_radar_found(struct ath10k *ar,
1627 			      const struct ath10k_radar_found_info *arg)
1628 {
1629 	struct sk_buff *skb;
1630 
1631 	if (!ar->wmi.ops->gen_radar_found)
1632 		return -EOPNOTSUPP;
1633 
1634 	skb = ar->wmi.ops->gen_radar_found(ar, arg);
1635 	if (IS_ERR(skb))
1636 		return PTR_ERR(skb);
1637 
1638 	return ath10k_wmi_cmd_send(ar, skb,
1639 				   ar->wmi.cmd->radar_found_cmdid);
1640 }
1641 
1642 static inline int
1643 ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1644 			  const struct wmi_bb_timing_cfg_arg *arg)
1645 {
1646 	struct sk_buff *skb;
1647 
1648 	if (!ar->wmi.ops->gen_bb_timing)
1649 		return -EOPNOTSUPP;
1650 
1651 	skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1652 
1653 	if (IS_ERR(skb))
1654 		return PTR_ERR(skb);
1655 
1656 	return ath10k_wmi_cmd_send(ar, skb,
1657 				   ar->wmi.cmd->set_bb_timing_cmdid);
1658 }
1659 #endif
1660