xref: /freebsd/sys/contrib/dev/athk/ath10k/wmi-ops.h (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /* SPDX-License-Identifier: ISC */
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _WMI_OPS_H_
9 #define _WMI_OPS_H_
10 
11 struct ath10k;
12 struct sk_buff;
13 
14 struct wmi_ops {
15 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
16 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
17 	void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
18 
19 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
20 			 struct wmi_scan_ev_arg *arg);
21 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
22 			    struct wmi_mgmt_rx_ev_arg *arg);
23 	int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
24 				  struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
25 	int (*pull_mgmt_tx_bundle_compl)(
26 				struct ath10k *ar, struct sk_buff *skb,
27 				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
28 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
29 			    struct wmi_ch_info_ev_arg *arg);
30 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
31 			       struct wmi_vdev_start_ev_arg *arg);
32 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
33 			      struct wmi_peer_kick_ev_arg *arg);
34 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
35 			 struct wmi_swba_ev_arg *arg);
36 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
37 			       struct wmi_phyerr_hdr_arg *arg);
38 	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
39 			   int left_len, struct wmi_phyerr_ev_arg *arg);
40 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
41 			    struct wmi_svc_rdy_ev_arg *arg);
42 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 			struct wmi_rdy_ev_arg *arg);
44 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
45 			     struct ath10k_fw_stats *stats);
46 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
47 			    struct wmi_roam_ev_arg *arg);
48 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
49 			      struct wmi_wow_ev_arg *arg);
50 	int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
51 			    struct wmi_echo_ev_arg *arg);
52 	int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
53 				  struct wmi_dfs_status_ev_arg *arg);
54 	int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
55 			      struct wmi_svc_avail_ev_arg *arg);
56 
57 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
58 
59 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
60 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
61 	struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
62 						     const u8 macaddr[ETH_ALEN]);
63 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
64 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
65 					   enum wmi_dfs_region dfs_reg);
66 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
67 					      u32 value);
68 	struct sk_buff *(*gen_init)(struct ath10k *ar);
69 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
70 					  const struct wmi_start_scan_arg *arg);
71 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
72 					 const struct wmi_stop_scan_arg *arg);
73 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
74 					   enum wmi_vdev_type type,
75 					   enum wmi_vdev_subtype subtype,
76 					   const u8 macaddr[ETH_ALEN]);
77 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
78 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
79 					  const struct wmi_vdev_start_request_arg *arg,
80 					  bool restart);
81 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
82 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
83 				       const u8 *bssid);
84 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
85 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
86 					      u32 param_id, u32 param_value);
87 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
88 						const struct wmi_vdev_install_key_arg *arg);
89 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
90 						  const struct wmi_vdev_spectral_conf_arg *arg);
91 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
92 						    u32 trigger, u32 enable);
93 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
94 					     const struct wmi_wmm_params_all_arg *arg);
95 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
96 					   const u8 peer_addr[ETH_ALEN],
97 					   enum wmi_peer_type peer_type);
98 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
99 					   const u8 peer_addr[ETH_ALEN]);
100 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
101 					  const u8 peer_addr[ETH_ALEN],
102 					  u32 tid_bitmap);
103 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
104 					      const u8 *peer_addr,
105 					      enum wmi_peer_param param_id,
106 					      u32 param_value);
107 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
108 					  const struct wmi_peer_assoc_complete_arg *arg);
109 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
110 					  enum wmi_sta_ps_mode psmode);
111 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
112 					  enum wmi_sta_powersave_param param_id,
113 					  u32 value);
114 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
115 					 const u8 *mac,
116 					 enum wmi_ap_ps_peer_param param_id,
117 					 u32 value);
118 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
119 					      const struct wmi_scan_chan_list_arg *arg);
120 	struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
121 						 u32 prob_req_oui);
122 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
123 					  const void *bcn, size_t bcn_len,
124 					  u32 bcn_paddr, bool dtim_zero,
125 					  bool deliver_cab);
126 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
127 					    const struct wmi_wmm_params_all_arg *arg);
128 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
129 	struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
130 						       u32 vdev_id,
131 						       enum
132 						       wmi_peer_stats_info_request_type
133 						       type,
134 						       u8 *addr,
135 						       u32 reset);
136 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
137 					     enum wmi_force_fw_hang_type type,
138 					     u32 delay_ms);
139 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
140 	struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
141 					    struct sk_buff *skb,
142 					    dma_addr_t paddr);
143 	int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
144 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
145 					  u32 log_level);
146 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
147 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
148 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
149 						   u32 period, u32 duration,
150 						   u32 next_offset,
151 						   u32 enabled);
152 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
153 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
154 						const u8 *mac);
155 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
156 					  const u8 *mac, u32 tid, u32 buf_size);
157 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
158 					      const u8 *mac, u32 tid,
159 					      u32 status);
160 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
161 					  const u8 *mac, u32 tid, u32 initiator,
162 					  u32 reason);
163 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
164 					u32 tim_ie_offset, struct sk_buff *bcn,
165 					u32 prb_caps, u32 prb_erp,
166 					void *prb_ies, size_t prb_ies_len);
167 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
168 					struct sk_buff *bcn);
169 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
170 					     const u8 *p2p_ie);
171 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
172 					      const u8 peer_addr[ETH_ALEN],
173 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
174 					      u32 num_ac);
175 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
176 					     const struct wmi_sta_keepalive_arg *arg);
177 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
178 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
179 						    enum wmi_wow_wakeup_event event,
180 						    u32 enable);
181 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
182 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
183 					       u32 pattern_id,
184 					       const u8 *pattern,
185 					       const u8 *mask,
186 					       int pattern_len,
187 					       int pattern_offset);
188 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
189 					       u32 pattern_id);
190 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
191 						    u32 vdev_id,
192 						    enum wmi_tdls_state state);
193 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
194 						const struct wmi_tdls_peer_update_cmd_arg *arg,
195 						const struct wmi_tdls_peer_capab_arg *cap,
196 						const struct wmi_channel_arg *chan);
197 	struct sk_buff *(*gen_radar_found)
198 			(struct ath10k *ar,
199 			 const struct ath10k_radar_found_info *arg);
200 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
201 	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
202 						   u32 param);
203 	void (*fw_stats_fill)(struct ath10k *ar,
204 			      struct ath10k_fw_stats *fw_stats,
205 			      char *buf);
206 	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
207 							u8 enable,
208 							u32 detect_level,
209 							u32 detect_margin);
210 	struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
211 					       enum wmi_host_platform_type type,
212 					       u32 fw_feature_bitmap);
213 	int (*get_vdev_subtype)(struct ath10k *ar,
214 				enum wmi_vdev_subtype subtype);
215 	struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
216 					      u32 vdev_id,
217 					      struct wmi_pno_scan_req *pno_scan);
218 	struct sk_buff *(*gen_pdev_bss_chan_info_req)
219 					(struct ath10k *ar,
220 					 enum wmi_bss_survey_req_type type);
221 	struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
222 	struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
223 							u32 param);
224 	struct sk_buff *(*gen_bb_timing)
225 			(struct ath10k *ar,
226 			 const struct wmi_bb_timing_cfg_arg *arg);
227 	struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
228 						    const struct wmi_per_peer_per_tid_cfg_arg *arg);
229 
230 };
231 
232 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
233 
234 static inline int
235 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
236 {
237 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
238 		return -EOPNOTSUPP;
239 
240 	ar->wmi.ops->rx(ar, skb);
241 	return 0;
242 }
243 
244 static inline int
245 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
246 		   size_t len)
247 {
248 	if (!ar->wmi.ops->map_svc)
249 		return -EOPNOTSUPP;
250 
251 	ar->wmi.ops->map_svc(in, out, len);
252 	return 0;
253 }
254 
255 static inline int
256 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
257 		       size_t len)
258 {
259 	if (!ar->wmi.ops->map_svc_ext)
260 		return -EOPNOTSUPP;
261 
262 	ar->wmi.ops->map_svc_ext(in, out, len);
263 	return 0;
264 }
265 
266 static inline int
267 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
268 		     struct wmi_scan_ev_arg *arg)
269 {
270 	if (!ar->wmi.ops->pull_scan)
271 		return -EOPNOTSUPP;
272 
273 	return ar->wmi.ops->pull_scan(ar, skb, arg);
274 }
275 
276 static inline int
277 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
278 			      struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
279 {
280 	if (!ar->wmi.ops->pull_mgmt_tx_compl)
281 		return -EOPNOTSUPP;
282 
283 	return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
284 }
285 
286 static inline int
287 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
288 				     struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
289 {
290 	if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
291 		return -EOPNOTSUPP;
292 
293 	return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
294 }
295 
296 static inline int
297 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
298 			struct wmi_mgmt_rx_ev_arg *arg)
299 {
300 	if (!ar->wmi.ops->pull_mgmt_rx)
301 		return -EOPNOTSUPP;
302 
303 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
304 }
305 
306 static inline int
307 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
308 			struct wmi_ch_info_ev_arg *arg)
309 {
310 	if (!ar->wmi.ops->pull_ch_info)
311 		return -EOPNOTSUPP;
312 
313 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
314 }
315 
316 static inline int
317 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
318 			   struct wmi_vdev_start_ev_arg *arg)
319 {
320 	if (!ar->wmi.ops->pull_vdev_start)
321 		return -EOPNOTSUPP;
322 
323 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
324 }
325 
326 static inline int
327 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
328 			  struct wmi_peer_kick_ev_arg *arg)
329 {
330 	if (!ar->wmi.ops->pull_peer_kick)
331 		return -EOPNOTSUPP;
332 
333 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
334 }
335 
336 static inline int
337 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
338 		     struct wmi_swba_ev_arg *arg)
339 {
340 	if (!ar->wmi.ops->pull_swba)
341 		return -EOPNOTSUPP;
342 
343 	return ar->wmi.ops->pull_swba(ar, skb, arg);
344 }
345 
346 static inline int
347 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
348 			   struct wmi_phyerr_hdr_arg *arg)
349 {
350 	if (!ar->wmi.ops->pull_phyerr_hdr)
351 		return -EOPNOTSUPP;
352 
353 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
354 }
355 
356 static inline int
357 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
358 		       int left_len, struct wmi_phyerr_ev_arg *arg)
359 {
360 	if (!ar->wmi.ops->pull_phyerr)
361 		return -EOPNOTSUPP;
362 
363 	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
364 }
365 
366 static inline int
367 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
368 			struct wmi_svc_rdy_ev_arg *arg)
369 {
370 	if (!ar->wmi.ops->pull_svc_rdy)
371 		return -EOPNOTSUPP;
372 
373 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
374 }
375 
376 static inline int
377 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
378 		    struct wmi_rdy_ev_arg *arg)
379 {
380 	if (!ar->wmi.ops->pull_rdy)
381 		return -EOPNOTSUPP;
382 
383 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
384 }
385 
386 static inline int
387 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
388 			  struct wmi_svc_avail_ev_arg *arg)
389 {
390 	if (!ar->wmi.ops->pull_svc_avail)
391 		return -EOPNOTSUPP;
392 	return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
393 }
394 
395 static inline int
396 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
397 			 struct ath10k_fw_stats *stats)
398 {
399 	if (!ar->wmi.ops->pull_fw_stats)
400 		return -EOPNOTSUPP;
401 
402 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
403 }
404 
405 static inline int
406 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
407 			struct wmi_roam_ev_arg *arg)
408 {
409 	if (!ar->wmi.ops->pull_roam_ev)
410 		return -EOPNOTSUPP;
411 
412 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
413 }
414 
415 static inline int
416 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
417 			  struct wmi_wow_ev_arg *arg)
418 {
419 	if (!ar->wmi.ops->pull_wow_event)
420 		return -EOPNOTSUPP;
421 
422 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
423 }
424 
425 static inline int
426 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
427 			struct wmi_echo_ev_arg *arg)
428 {
429 	if (!ar->wmi.ops->pull_echo_ev)
430 		return -EOPNOTSUPP;
431 
432 	return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
433 }
434 
435 static inline int
436 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
437 			   struct wmi_dfs_status_ev_arg *arg)
438 {
439 	if (!ar->wmi.ops->pull_dfs_status_ev)
440 		return -EOPNOTSUPP;
441 
442 	return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
443 }
444 
445 static inline enum wmi_txbf_conf
446 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
447 {
448 	if (!ar->wmi.ops->get_txbf_conf_scheme)
449 		return WMI_TXBF_CONF_UNSUPPORTED;
450 
451 	return ar->wmi.ops->get_txbf_conf_scheme(ar);
452 }
453 
454 static inline int
455 ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
456 {
457 	if (!ar->wmi.ops->cleanup_mgmt_tx_send)
458 		return -EOPNOTSUPP;
459 
460 	return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
461 }
462 
463 static inline int
464 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
465 			dma_addr_t paddr)
466 {
467 	struct sk_buff *skb;
468 	int ret;
469 
470 	if (!ar->wmi.ops->gen_mgmt_tx_send)
471 		return -EOPNOTSUPP;
472 
473 	skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
474 	if (IS_ERR(skb))
475 		return PTR_ERR(skb);
476 
477 	ret = ath10k_wmi_cmd_send(ar, skb,
478 				  ar->wmi.cmd->mgmt_tx_send_cmdid);
479 	if (ret)
480 		return ret;
481 
482 	return 0;
483 }
484 
485 static inline int
486 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
487 {
488 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
489 	struct sk_buff *skb;
490 	int ret;
491 
492 	if (!ar->wmi.ops->gen_mgmt_tx)
493 		return -EOPNOTSUPP;
494 
495 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
496 	if (IS_ERR(skb))
497 		return PTR_ERR(skb);
498 
499 	ret = ath10k_wmi_cmd_send(ar, skb,
500 				  ar->wmi.cmd->mgmt_tx_cmdid);
501 	if (ret)
502 		return ret;
503 
504 	/* FIXME There's no ACK event for Management Tx. This probably
505 	 * shouldn't be called here either.
506 	 */
507 	info->flags |= IEEE80211_TX_STAT_ACK;
508 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
509 
510 	return 0;
511 }
512 
513 static inline int
514 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
515 			      u16 ctl2g, u16 ctl5g,
516 			      enum wmi_dfs_region dfs_reg)
517 {
518 	struct sk_buff *skb;
519 
520 	if (!ar->wmi.ops->gen_pdev_set_rd)
521 		return -EOPNOTSUPP;
522 
523 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
524 					   dfs_reg);
525 	if (IS_ERR(skb))
526 		return PTR_ERR(skb);
527 
528 	return ath10k_wmi_cmd_send(ar, skb,
529 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
530 }
531 
532 static inline int
533 ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
534 {
535 	struct sk_buff *skb;
536 
537 	if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
538 		return -EOPNOTSUPP;
539 
540 	skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
541 	if (IS_ERR(skb))
542 		return PTR_ERR(skb);
543 
544 	return ath10k_wmi_cmd_send(ar, skb,
545 				   ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
546 }
547 
548 static inline int
549 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
550 {
551 	struct sk_buff *skb;
552 
553 	if (!ar->wmi.ops->gen_pdev_suspend)
554 		return -EOPNOTSUPP;
555 
556 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
557 	if (IS_ERR(skb))
558 		return PTR_ERR(skb);
559 
560 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
561 }
562 
563 static inline int
564 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
565 {
566 	struct sk_buff *skb;
567 
568 	if (!ar->wmi.ops->gen_pdev_resume)
569 		return -EOPNOTSUPP;
570 
571 	skb = ar->wmi.ops->gen_pdev_resume(ar);
572 	if (IS_ERR(skb))
573 		return PTR_ERR(skb);
574 
575 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
576 }
577 
578 static inline int
579 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
580 {
581 	struct sk_buff *skb;
582 
583 	if (!ar->wmi.ops->gen_pdev_set_param)
584 		return -EOPNOTSUPP;
585 
586 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
587 	if (IS_ERR(skb))
588 		return PTR_ERR(skb);
589 
590 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
591 }
592 
593 static inline int
594 ath10k_wmi_cmd_init(struct ath10k *ar)
595 {
596 	struct sk_buff *skb;
597 
598 	if (!ar->wmi.ops->gen_init)
599 		return -EOPNOTSUPP;
600 
601 	skb = ar->wmi.ops->gen_init(ar);
602 	if (IS_ERR(skb))
603 		return PTR_ERR(skb);
604 
605 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
606 }
607 
608 static inline int
609 ath10k_wmi_start_scan(struct ath10k *ar,
610 		      const struct wmi_start_scan_arg *arg)
611 {
612 	struct sk_buff *skb;
613 
614 	if (!ar->wmi.ops->gen_start_scan)
615 		return -EOPNOTSUPP;
616 
617 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
618 	if (IS_ERR(skb))
619 		return PTR_ERR(skb);
620 
621 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
622 }
623 
624 static inline int
625 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
626 {
627 	struct sk_buff *skb;
628 
629 	if (!ar->wmi.ops->gen_stop_scan)
630 		return -EOPNOTSUPP;
631 
632 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
633 	if (IS_ERR(skb))
634 		return PTR_ERR(skb);
635 
636 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
637 }
638 
639 static inline int
640 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
641 		       enum wmi_vdev_type type,
642 		       enum wmi_vdev_subtype subtype,
643 		       const u8 macaddr[ETH_ALEN])
644 {
645 	struct sk_buff *skb;
646 
647 	if (!ar->wmi.ops->gen_vdev_create)
648 		return -EOPNOTSUPP;
649 
650 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
651 	if (IS_ERR(skb))
652 		return PTR_ERR(skb);
653 
654 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
655 }
656 
657 static inline int
658 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
659 {
660 	struct sk_buff *skb;
661 
662 	if (!ar->wmi.ops->gen_vdev_delete)
663 		return -EOPNOTSUPP;
664 
665 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
666 	if (IS_ERR(skb))
667 		return PTR_ERR(skb);
668 
669 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
670 }
671 
672 static inline int
673 ath10k_wmi_vdev_start(struct ath10k *ar,
674 		      const struct wmi_vdev_start_request_arg *arg)
675 {
676 	struct sk_buff *skb;
677 
678 	if (!ar->wmi.ops->gen_vdev_start)
679 		return -EOPNOTSUPP;
680 
681 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
682 	if (IS_ERR(skb))
683 		return PTR_ERR(skb);
684 
685 	return ath10k_wmi_cmd_send(ar, skb,
686 				   ar->wmi.cmd->vdev_start_request_cmdid);
687 }
688 
689 static inline int
690 ath10k_wmi_vdev_restart(struct ath10k *ar,
691 			const struct wmi_vdev_start_request_arg *arg)
692 {
693 	struct sk_buff *skb;
694 
695 	if (!ar->wmi.ops->gen_vdev_start)
696 		return -EOPNOTSUPP;
697 
698 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
699 	if (IS_ERR(skb))
700 		return PTR_ERR(skb);
701 
702 	return ath10k_wmi_cmd_send(ar, skb,
703 				   ar->wmi.cmd->vdev_restart_request_cmdid);
704 }
705 
706 static inline int
707 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
708 {
709 	struct sk_buff *skb;
710 
711 	if (!ar->wmi.ops->gen_vdev_stop)
712 		return -EOPNOTSUPP;
713 
714 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
715 	if (IS_ERR(skb))
716 		return PTR_ERR(skb);
717 
718 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
719 }
720 
721 static inline int
722 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
723 {
724 	struct sk_buff *skb;
725 
726 	if (!ar->wmi.ops->gen_vdev_up)
727 		return -EOPNOTSUPP;
728 
729 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
730 	if (IS_ERR(skb))
731 		return PTR_ERR(skb);
732 
733 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
734 }
735 
736 static inline int
737 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
738 {
739 	struct sk_buff *skb;
740 
741 	if (!ar->wmi.ops->gen_vdev_down)
742 		return -EOPNOTSUPP;
743 
744 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
745 	if (IS_ERR(skb))
746 		return PTR_ERR(skb);
747 
748 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
749 }
750 
751 static inline int
752 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
753 			  u32 param_value)
754 {
755 	struct sk_buff *skb;
756 
757 	if (!ar->wmi.ops->gen_vdev_set_param)
758 		return -EOPNOTSUPP;
759 
760 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
761 					      param_value);
762 	if (IS_ERR(skb))
763 		return PTR_ERR(skb);
764 
765 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
766 }
767 
768 static inline int
769 ath10k_wmi_vdev_install_key(struct ath10k *ar,
770 			    const struct wmi_vdev_install_key_arg *arg)
771 {
772 	struct sk_buff *skb;
773 
774 	if (!ar->wmi.ops->gen_vdev_install_key)
775 		return -EOPNOTSUPP;
776 
777 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
778 	if (IS_ERR(skb))
779 		return PTR_ERR(skb);
780 
781 	return ath10k_wmi_cmd_send(ar, skb,
782 				   ar->wmi.cmd->vdev_install_key_cmdid);
783 }
784 
785 static inline int
786 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
787 			      const struct wmi_vdev_spectral_conf_arg *arg)
788 {
789 	struct sk_buff *skb;
790 	u32 cmd_id;
791 
792 	if (!ar->wmi.ops->gen_vdev_spectral_conf)
793 		return -EOPNOTSUPP;
794 
795 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
796 	if (IS_ERR(skb))
797 		return PTR_ERR(skb);
798 
799 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
800 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
801 }
802 
803 static inline int
804 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
805 				u32 enable)
806 {
807 	struct sk_buff *skb;
808 	u32 cmd_id;
809 
810 	if (!ar->wmi.ops->gen_vdev_spectral_enable)
811 		return -EOPNOTSUPP;
812 
813 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
814 						    enable);
815 	if (IS_ERR(skb))
816 		return PTR_ERR(skb);
817 
818 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
819 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
820 }
821 
822 static inline int
823 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
824 			  const u8 peer_addr[ETH_ALEN],
825 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
826 			  u32 num_ac)
827 {
828 	struct sk_buff *skb;
829 	u32 cmd_id;
830 
831 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
832 		return -EOPNOTSUPP;
833 
834 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
835 					      num_ac);
836 	if (IS_ERR(skb))
837 		return PTR_ERR(skb);
838 
839 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
840 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
841 }
842 
843 static inline int
844 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
845 			 const struct wmi_wmm_params_all_arg *arg)
846 {
847 	struct sk_buff *skb;
848 	u32 cmd_id;
849 
850 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
851 	if (IS_ERR(skb))
852 		return PTR_ERR(skb);
853 
854 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
855 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
856 }
857 
858 static inline int
859 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
860 		       const u8 peer_addr[ETH_ALEN],
861 		       enum wmi_peer_type peer_type)
862 {
863 	struct sk_buff *skb;
864 
865 	if (!ar->wmi.ops->gen_peer_create)
866 		return -EOPNOTSUPP;
867 
868 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
869 	if (IS_ERR(skb))
870 		return PTR_ERR(skb);
871 
872 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
873 }
874 
875 static inline int
876 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
877 		       const u8 peer_addr[ETH_ALEN])
878 {
879 	struct sk_buff *skb;
880 
881 	if (!ar->wmi.ops->gen_peer_delete)
882 		return -EOPNOTSUPP;
883 
884 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
885 	if (IS_ERR(skb))
886 		return PTR_ERR(skb);
887 
888 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
889 }
890 
891 static inline int
892 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
893 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
894 {
895 	struct sk_buff *skb;
896 
897 	if (!ar->wmi.ops->gen_peer_flush)
898 		return -EOPNOTSUPP;
899 
900 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
901 	if (IS_ERR(skb))
902 		return PTR_ERR(skb);
903 
904 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
905 }
906 
907 static inline int
908 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
909 			  enum wmi_peer_param param_id, u32 param_value)
910 {
911 	struct sk_buff *skb;
912 
913 	if (!ar->wmi.ops->gen_peer_set_param)
914 		return -EOPNOTSUPP;
915 
916 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
917 					      param_value);
918 	if (IS_ERR(skb))
919 		return PTR_ERR(skb);
920 
921 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
922 }
923 
924 static inline int
925 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
926 		      enum wmi_sta_ps_mode psmode)
927 {
928 	struct sk_buff *skb;
929 
930 	if (!ar->wmi.ops->gen_set_psmode)
931 		return -EOPNOTSUPP;
932 
933 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
934 	if (IS_ERR(skb))
935 		return PTR_ERR(skb);
936 
937 	return ath10k_wmi_cmd_send(ar, skb,
938 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
939 }
940 
941 static inline int
942 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
943 			    enum wmi_sta_powersave_param param_id, u32 value)
944 {
945 	struct sk_buff *skb;
946 
947 	if (!ar->wmi.ops->gen_set_sta_ps)
948 		return -EOPNOTSUPP;
949 
950 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
951 	if (IS_ERR(skb))
952 		return PTR_ERR(skb);
953 
954 	return ath10k_wmi_cmd_send(ar, skb,
955 				   ar->wmi.cmd->sta_powersave_param_cmdid);
956 }
957 
958 static inline int
959 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
960 			   enum wmi_ap_ps_peer_param param_id, u32 value)
961 {
962 	struct sk_buff *skb;
963 
964 	if (!ar->wmi.ops->gen_set_ap_ps)
965 		return -EOPNOTSUPP;
966 
967 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
968 	if (IS_ERR(skb))
969 		return PTR_ERR(skb);
970 
971 	return ath10k_wmi_cmd_send(ar, skb,
972 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
973 }
974 
975 static inline int
976 ath10k_wmi_scan_chan_list(struct ath10k *ar,
977 			  const struct wmi_scan_chan_list_arg *arg)
978 {
979 	struct sk_buff *skb;
980 
981 	if (!ar->wmi.ops->gen_scan_chan_list)
982 		return -EOPNOTSUPP;
983 
984 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
985 	if (IS_ERR(skb))
986 		return PTR_ERR(skb);
987 
988 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
989 }
990 
991 static inline int
992 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
993 {
994 	struct sk_buff *skb;
995 	u32 prob_req_oui;
996 
997 	prob_req_oui = (((u32)mac_addr[0]) << 16) |
998 		       (((u32)mac_addr[1]) << 8) | mac_addr[2];
999 
1000 	if (!ar->wmi.ops->gen_scan_prob_req_oui)
1001 		return -EOPNOTSUPP;
1002 
1003 	skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
1004 	if (IS_ERR(skb))
1005 		return PTR_ERR(skb);
1006 
1007 	return ath10k_wmi_cmd_send(ar, skb,
1008 			ar->wmi.cmd->scan_prob_req_oui_cmdid);
1009 }
1010 
1011 static inline int
1012 ath10k_wmi_peer_assoc(struct ath10k *ar,
1013 		      const struct wmi_peer_assoc_complete_arg *arg)
1014 {
1015 	struct sk_buff *skb;
1016 
1017 	if (!ar->wmi.ops->gen_peer_assoc)
1018 		return -EOPNOTSUPP;
1019 
1020 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
1021 	if (IS_ERR(skb))
1022 		return PTR_ERR(skb);
1023 
1024 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
1025 }
1026 
1027 static inline int
1028 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
1029 				  const void *bcn, size_t bcn_len,
1030 				  u32 bcn_paddr, bool dtim_zero,
1031 				  bool deliver_cab)
1032 {
1033 	struct sk_buff *skb;
1034 	int ret;
1035 
1036 	if (!ar->wmi.ops->gen_beacon_dma)
1037 		return -EOPNOTSUPP;
1038 
1039 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1040 					  dtim_zero, deliver_cab);
1041 	if (IS_ERR(skb))
1042 		return PTR_ERR(skb);
1043 
1044 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1045 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
1046 	if (ret) {
1047 		dev_kfree_skb(skb);
1048 		return ret;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 static inline int
1055 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1056 			       const struct wmi_wmm_params_all_arg *arg)
1057 {
1058 	struct sk_buff *skb;
1059 
1060 	if (!ar->wmi.ops->gen_pdev_set_wmm)
1061 		return -EOPNOTSUPP;
1062 
1063 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1064 	if (IS_ERR(skb))
1065 		return PTR_ERR(skb);
1066 
1067 	return ath10k_wmi_cmd_send(ar, skb,
1068 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1069 }
1070 
1071 static inline int
1072 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1073 {
1074 	struct sk_buff *skb;
1075 
1076 	if (!ar->wmi.ops->gen_request_stats)
1077 		return -EOPNOTSUPP;
1078 
1079 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1080 	if (IS_ERR(skb))
1081 		return PTR_ERR(skb);
1082 
1083 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1084 }
1085 
1086 static inline int
1087 ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
1088 				   u32 vdev_id,
1089 				   enum wmi_peer_stats_info_request_type type,
1090 				   u8 *addr,
1091 				   u32 reset)
1092 {
1093 	struct sk_buff *skb;
1094 
1095 	if (!ar->wmi.ops->gen_request_peer_stats_info)
1096 		return -EOPNOTSUPP;
1097 
1098 	skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
1099 						       vdev_id,
1100 						       type,
1101 						       addr,
1102 						       reset);
1103 	if (IS_ERR(skb))
1104 		return PTR_ERR(skb);
1105 
1106 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
1107 }
1108 
1109 static inline int
1110 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1111 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
1112 {
1113 	struct sk_buff *skb;
1114 
1115 	if (!ar->wmi.ops->gen_force_fw_hang)
1116 		return -EOPNOTSUPP;
1117 
1118 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1119 	if (IS_ERR(skb))
1120 		return PTR_ERR(skb);
1121 
1122 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1123 }
1124 
1125 static inline int
1126 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1127 {
1128 	struct sk_buff *skb;
1129 
1130 	if (!ar->wmi.ops->gen_dbglog_cfg)
1131 		return -EOPNOTSUPP;
1132 
1133 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1134 	if (IS_ERR(skb))
1135 		return PTR_ERR(skb);
1136 
1137 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1138 }
1139 
1140 static inline int
1141 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1142 {
1143 	struct sk_buff *skb;
1144 
1145 	if (!ar->wmi.ops->gen_pktlog_enable)
1146 		return -EOPNOTSUPP;
1147 
1148 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1149 	if (IS_ERR(skb))
1150 		return PTR_ERR(skb);
1151 
1152 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1153 }
1154 
1155 static inline int
1156 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1157 {
1158 	struct sk_buff *skb;
1159 
1160 	if (!ar->wmi.ops->gen_pktlog_disable)
1161 		return -EOPNOTSUPP;
1162 
1163 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
1164 	if (IS_ERR(skb))
1165 		return PTR_ERR(skb);
1166 
1167 	return ath10k_wmi_cmd_send(ar, skb,
1168 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1169 }
1170 
1171 static inline int
1172 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1173 			       u32 next_offset, u32 enabled)
1174 {
1175 	struct sk_buff *skb;
1176 
1177 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1178 		return -EOPNOTSUPP;
1179 
1180 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1181 						   next_offset, enabled);
1182 	if (IS_ERR(skb))
1183 		return PTR_ERR(skb);
1184 
1185 	return ath10k_wmi_cmd_send(ar, skb,
1186 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1187 }
1188 
1189 static inline int
1190 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1191 {
1192 	struct sk_buff *skb;
1193 
1194 	if (!ar->wmi.ops->gen_pdev_get_temperature)
1195 		return -EOPNOTSUPP;
1196 
1197 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1198 	if (IS_ERR(skb))
1199 		return PTR_ERR(skb);
1200 
1201 	return ath10k_wmi_cmd_send(ar, skb,
1202 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
1203 }
1204 
1205 static inline int
1206 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1207 {
1208 	struct sk_buff *skb;
1209 
1210 	if (!ar->wmi.ops->gen_addba_clear_resp)
1211 		return -EOPNOTSUPP;
1212 
1213 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1214 	if (IS_ERR(skb))
1215 		return PTR_ERR(skb);
1216 
1217 	return ath10k_wmi_cmd_send(ar, skb,
1218 				   ar->wmi.cmd->addba_clear_resp_cmdid);
1219 }
1220 
1221 static inline int
1222 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1223 		      u32 tid, u32 buf_size)
1224 {
1225 	struct sk_buff *skb;
1226 
1227 	if (!ar->wmi.ops->gen_addba_send)
1228 		return -EOPNOTSUPP;
1229 
1230 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1231 	if (IS_ERR(skb))
1232 		return PTR_ERR(skb);
1233 
1234 	return ath10k_wmi_cmd_send(ar, skb,
1235 				   ar->wmi.cmd->addba_send_cmdid);
1236 }
1237 
1238 static inline int
1239 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1240 			  u32 tid, u32 status)
1241 {
1242 	struct sk_buff *skb;
1243 
1244 	if (!ar->wmi.ops->gen_addba_set_resp)
1245 		return -EOPNOTSUPP;
1246 
1247 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1248 	if (IS_ERR(skb))
1249 		return PTR_ERR(skb);
1250 
1251 	return ath10k_wmi_cmd_send(ar, skb,
1252 				   ar->wmi.cmd->addba_set_resp_cmdid);
1253 }
1254 
1255 static inline int
1256 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1257 		      u32 tid, u32 initiator, u32 reason)
1258 {
1259 	struct sk_buff *skb;
1260 
1261 	if (!ar->wmi.ops->gen_delba_send)
1262 		return -EOPNOTSUPP;
1263 
1264 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1265 					  reason);
1266 	if (IS_ERR(skb))
1267 		return PTR_ERR(skb);
1268 
1269 	return ath10k_wmi_cmd_send(ar, skb,
1270 				   ar->wmi.cmd->delba_send_cmdid);
1271 }
1272 
1273 static inline int
1274 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1275 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1276 		    void *prb_ies, size_t prb_ies_len)
1277 {
1278 	struct sk_buff *skb;
1279 
1280 	if (!ar->wmi.ops->gen_bcn_tmpl)
1281 		return -EOPNOTSUPP;
1282 
1283 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1284 					prb_caps, prb_erp, prb_ies,
1285 					prb_ies_len);
1286 	if (IS_ERR(skb))
1287 		return PTR_ERR(skb);
1288 
1289 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1290 }
1291 
1292 static inline int
1293 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1294 {
1295 	struct sk_buff *skb;
1296 
1297 	if (!ar->wmi.ops->gen_prb_tmpl)
1298 		return -EOPNOTSUPP;
1299 
1300 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1301 	if (IS_ERR(skb))
1302 		return PTR_ERR(skb);
1303 
1304 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1305 }
1306 
1307 static inline int
1308 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1309 {
1310 	struct sk_buff *skb;
1311 
1312 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1313 		return -EOPNOTSUPP;
1314 
1315 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1316 	if (IS_ERR(skb))
1317 		return PTR_ERR(skb);
1318 
1319 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1320 }
1321 
1322 static inline int
1323 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1324 			 const struct wmi_sta_keepalive_arg *arg)
1325 {
1326 	struct sk_buff *skb;
1327 	u32 cmd_id;
1328 
1329 	if (!ar->wmi.ops->gen_sta_keepalive)
1330 		return -EOPNOTSUPP;
1331 
1332 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1333 	if (IS_ERR(skb))
1334 		return PTR_ERR(skb);
1335 
1336 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1337 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1338 }
1339 
1340 static inline int
1341 ath10k_wmi_wow_enable(struct ath10k *ar)
1342 {
1343 	struct sk_buff *skb;
1344 	u32 cmd_id;
1345 
1346 	if (!ar->wmi.ops->gen_wow_enable)
1347 		return -EOPNOTSUPP;
1348 
1349 	skb = ar->wmi.ops->gen_wow_enable(ar);
1350 	if (IS_ERR(skb))
1351 		return PTR_ERR(skb);
1352 
1353 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1354 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1355 }
1356 
1357 static inline int
1358 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1359 				enum wmi_wow_wakeup_event event,
1360 				u32 enable)
1361 {
1362 	struct sk_buff *skb;
1363 	u32 cmd_id;
1364 
1365 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1366 		return -EOPNOTSUPP;
1367 
1368 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1369 	if (IS_ERR(skb))
1370 		return PTR_ERR(skb);
1371 
1372 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1373 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1374 }
1375 
1376 static inline int
1377 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1378 {
1379 	struct sk_buff *skb;
1380 	u32 cmd_id;
1381 
1382 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1383 		return -EOPNOTSUPP;
1384 
1385 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1386 	if (IS_ERR(skb))
1387 		return PTR_ERR(skb);
1388 
1389 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1390 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1391 }
1392 
1393 static inline int
1394 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1395 			   const u8 *pattern, const u8 *mask,
1396 			   int pattern_len, int pattern_offset)
1397 {
1398 	struct sk_buff *skb;
1399 	u32 cmd_id;
1400 
1401 	if (!ar->wmi.ops->gen_wow_add_pattern)
1402 		return -EOPNOTSUPP;
1403 
1404 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1405 					       pattern, mask, pattern_len,
1406 					       pattern_offset);
1407 	if (IS_ERR(skb))
1408 		return PTR_ERR(skb);
1409 
1410 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1411 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1412 }
1413 
1414 static inline int
1415 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1416 {
1417 	struct sk_buff *skb;
1418 	u32 cmd_id;
1419 
1420 	if (!ar->wmi.ops->gen_wow_del_pattern)
1421 		return -EOPNOTSUPP;
1422 
1423 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1424 	if (IS_ERR(skb))
1425 		return PTR_ERR(skb);
1426 
1427 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1428 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1429 }
1430 
1431 static inline int
1432 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1433 			  struct wmi_pno_scan_req  *pno_scan)
1434 {
1435 	struct sk_buff *skb;
1436 	u32 cmd_id;
1437 
1438 	if (!ar->wmi.ops->gen_wow_config_pno)
1439 		return -EOPNOTSUPP;
1440 
1441 	skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1442 	if (IS_ERR(skb))
1443 		return PTR_ERR(skb);
1444 
1445 	cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1446 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1447 }
1448 
1449 static inline int
1450 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1451 				enum wmi_tdls_state state)
1452 {
1453 	struct sk_buff *skb;
1454 
1455 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1456 		return -EOPNOTSUPP;
1457 
1458 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1459 	if (IS_ERR(skb))
1460 		return PTR_ERR(skb);
1461 
1462 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1463 }
1464 
1465 static inline int
1466 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1467 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1468 			    const struct wmi_tdls_peer_capab_arg *cap,
1469 			    const struct wmi_channel_arg *chan)
1470 {
1471 	struct sk_buff *skb;
1472 
1473 	if (!ar->wmi.ops->gen_tdls_peer_update)
1474 		return -EOPNOTSUPP;
1475 
1476 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1477 	if (IS_ERR(skb))
1478 		return PTR_ERR(skb);
1479 
1480 	return ath10k_wmi_cmd_send(ar, skb,
1481 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1482 }
1483 
1484 static inline int
1485 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1486 {
1487 	struct sk_buff *skb;
1488 
1489 	if (!ar->wmi.ops->gen_adaptive_qcs)
1490 		return -EOPNOTSUPP;
1491 
1492 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1493 	if (IS_ERR(skb))
1494 		return PTR_ERR(skb);
1495 
1496 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1497 }
1498 
1499 static inline int
1500 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1501 {
1502 	struct sk_buff *skb;
1503 
1504 	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1505 		return -EOPNOTSUPP;
1506 
1507 	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1508 
1509 	if (IS_ERR(skb))
1510 		return PTR_ERR(skb);
1511 
1512 	return ath10k_wmi_cmd_send(ar, skb,
1513 				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1514 }
1515 
1516 static inline int
1517 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1518 			 char *buf)
1519 {
1520 	if (!ar->wmi.ops->fw_stats_fill)
1521 		return -EOPNOTSUPP;
1522 
1523 	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1524 	return 0;
1525 }
1526 
1527 static inline int
1528 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1529 				    u32 detect_level, u32 detect_margin)
1530 {
1531 	struct sk_buff *skb;
1532 
1533 	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1534 		return -EOPNOTSUPP;
1535 
1536 	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1537 							detect_level,
1538 							detect_margin);
1539 
1540 	if (IS_ERR(skb))
1541 		return PTR_ERR(skb);
1542 
1543 	return ath10k_wmi_cmd_send(ar, skb,
1544 				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1545 }
1546 
1547 static inline int
1548 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1549 			       enum wmi_host_platform_type type,
1550 			       u32 fw_feature_bitmap)
1551 {
1552 	struct sk_buff *skb;
1553 
1554 	if (!ar->wmi.ops->ext_resource_config)
1555 		return -EOPNOTSUPP;
1556 
1557 	skb = ar->wmi.ops->ext_resource_config(ar, type,
1558 					       fw_feature_bitmap);
1559 
1560 	if (IS_ERR(skb))
1561 		return PTR_ERR(skb);
1562 
1563 	return ath10k_wmi_cmd_send(ar, skb,
1564 				   ar->wmi.cmd->ext_resource_cfg_cmdid);
1565 }
1566 
1567 static inline int
1568 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1569 {
1570 	if (!ar->wmi.ops->get_vdev_subtype)
1571 		return -EOPNOTSUPP;
1572 
1573 	return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1574 }
1575 
1576 static inline int
1577 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1578 				      enum wmi_bss_survey_req_type type)
1579 {
1580 	struct ath10k_wmi *wmi = &ar->wmi;
1581 	struct sk_buff *skb;
1582 
1583 	if (!wmi->ops->gen_pdev_bss_chan_info_req)
1584 		return -EOPNOTSUPP;
1585 
1586 	skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1587 	if (IS_ERR(skb))
1588 		return PTR_ERR(skb);
1589 
1590 	return ath10k_wmi_cmd_send(ar, skb,
1591 				   wmi->cmd->pdev_bss_chan_info_request_cmdid);
1592 }
1593 
1594 static inline int
1595 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1596 {
1597 	struct ath10k_wmi *wmi = &ar->wmi;
1598 	struct sk_buff *skb;
1599 
1600 	if (!wmi->ops->gen_echo)
1601 		return -EOPNOTSUPP;
1602 
1603 	skb = wmi->ops->gen_echo(ar, value);
1604 	if (IS_ERR(skb))
1605 		return PTR_ERR(skb);
1606 
1607 	return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1608 }
1609 
1610 static inline int
1611 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1612 {
1613 	struct sk_buff *skb;
1614 
1615 	if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1616 		return -EOPNOTSUPP;
1617 
1618 	skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1619 
1620 	if (IS_ERR(skb))
1621 		return PTR_ERR(skb);
1622 
1623 	return ath10k_wmi_cmd_send(ar, skb,
1624 				   ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1625 }
1626 
1627 static inline int
1628 ath10k_wmi_report_radar_found(struct ath10k *ar,
1629 			      const struct ath10k_radar_found_info *arg)
1630 {
1631 	struct sk_buff *skb;
1632 
1633 	if (!ar->wmi.ops->gen_radar_found)
1634 		return -EOPNOTSUPP;
1635 
1636 	skb = ar->wmi.ops->gen_radar_found(ar, arg);
1637 	if (IS_ERR(skb))
1638 		return PTR_ERR(skb);
1639 
1640 	return ath10k_wmi_cmd_send(ar, skb,
1641 				   ar->wmi.cmd->radar_found_cmdid);
1642 }
1643 
1644 static inline int
1645 ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1646 			  const struct wmi_bb_timing_cfg_arg *arg)
1647 {
1648 	struct sk_buff *skb;
1649 
1650 	if (!ar->wmi.ops->gen_bb_timing)
1651 		return -EOPNOTSUPP;
1652 
1653 	skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1654 
1655 	if (IS_ERR(skb))
1656 		return PTR_ERR(skb);
1657 
1658 	return ath10k_wmi_cmd_send(ar, skb,
1659 				   ar->wmi.cmd->set_bb_timing_cmdid);
1660 }
1661 
1662 static inline int
1663 ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
1664 				    const struct wmi_per_peer_per_tid_cfg_arg *arg)
1665 {
1666 	struct sk_buff *skb;
1667 
1668 	if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
1669 		return -EOPNOTSUPP;
1670 
1671 	skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
1672 	if (IS_ERR(skb))
1673 		return PTR_ERR(skb);
1674 
1675 	return ath10k_wmi_cmd_send(ar, skb,
1676 				   ar->wmi.cmd->per_peer_per_tid_config_cmdid);
1677 }
1678 #endif
1679