xref: /linux/drivers/net/wireless/ath/ath10k/wmi-ops.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: ISC */
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _WMI_OPS_H_
9 #define _WMI_OPS_H_
10 
11 struct ath10k;
12 struct sk_buff;
13 
14 struct wmi_ops {
15 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
16 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
17 	void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
18 
19 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
20 			 struct wmi_scan_ev_arg *arg);
21 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
22 			    struct wmi_mgmt_rx_ev_arg *arg);
23 	int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
24 				  struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
25 	int (*pull_mgmt_tx_bundle_compl)(
26 				struct ath10k *ar, struct sk_buff *skb,
27 				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
28 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
29 			    struct wmi_ch_info_ev_arg *arg);
30 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
31 			       struct wmi_vdev_start_ev_arg *arg);
32 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
33 			      struct wmi_peer_kick_ev_arg *arg);
34 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
35 			 struct wmi_swba_ev_arg *arg);
36 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
37 			       struct wmi_phyerr_hdr_arg *arg);
38 	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
39 			   int left_len, struct wmi_phyerr_ev_arg *arg);
40 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
41 			    struct wmi_svc_rdy_ev_arg *arg);
42 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 			struct wmi_rdy_ev_arg *arg);
44 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
45 			     struct ath10k_fw_stats *stats);
46 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
47 			    struct wmi_roam_ev_arg *arg);
48 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
49 			      struct wmi_wow_ev_arg *arg);
50 	int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
51 			    struct wmi_echo_ev_arg *arg);
52 	int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
53 				  struct wmi_dfs_status_ev_arg *arg);
54 	int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
55 			      struct wmi_svc_avail_ev_arg *arg);
56 
57 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
58 
59 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
60 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
61 	struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
62 						     const u8 macaddr[ETH_ALEN]);
63 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
64 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
65 					   enum wmi_dfs_region dfs_reg);
66 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
67 					      u32 value);
68 	struct sk_buff *(*gen_init)(struct ath10k *ar);
69 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
70 					  const struct wmi_start_scan_arg *arg);
71 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
72 					 const struct wmi_stop_scan_arg *arg);
73 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
74 					   enum wmi_vdev_type type,
75 					   enum wmi_vdev_subtype subtype,
76 					   const u8 macaddr[ETH_ALEN]);
77 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
78 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
79 					  const struct wmi_vdev_start_request_arg *arg,
80 					  bool restart);
81 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
82 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
83 				       const u8 *bssid);
84 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
85 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
86 					      u32 param_id, u32 param_value);
87 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
88 						const struct wmi_vdev_install_key_arg *arg);
89 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
90 						  const struct wmi_vdev_spectral_conf_arg *arg);
91 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
92 						    u32 trigger, u32 enable);
93 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
94 					     const struct wmi_wmm_params_all_arg *arg);
95 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
96 					   const u8 peer_addr[ETH_ALEN],
97 					   enum wmi_peer_type peer_type);
98 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
99 					   const u8 peer_addr[ETH_ALEN]);
100 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
101 					  const u8 peer_addr[ETH_ALEN],
102 					  u32 tid_bitmap);
103 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
104 					      const u8 *peer_addr,
105 					      enum wmi_peer_param param_id,
106 					      u32 param_value);
107 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
108 					  const struct wmi_peer_assoc_complete_arg *arg);
109 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
110 					  enum wmi_sta_ps_mode psmode);
111 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
112 					  enum wmi_sta_powersave_param param_id,
113 					  u32 value);
114 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
115 					 const u8 *mac,
116 					 enum wmi_ap_ps_peer_param param_id,
117 					 u32 value);
118 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
119 					      const struct wmi_scan_chan_list_arg *arg);
120 	struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
121 						 u32 prob_req_oui);
122 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
123 					  const void *bcn, size_t bcn_len,
124 					  u32 bcn_paddr, bool dtim_zero,
125 					  bool deliver_cab);
126 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
127 					    const struct wmi_wmm_params_all_arg *arg);
128 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
129 	struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
130 						       u32 vdev_id,
131 						       enum
132 						       wmi_peer_stats_info_request_type
133 						       type,
134 						       u8 *addr,
135 						       u32 reset);
136 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
137 					     enum wmi_force_fw_hang_type type,
138 					     u32 delay_ms);
139 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
140 	struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
141 					    struct sk_buff *skb,
142 					    dma_addr_t paddr);
143 	int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
144 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
145 					  u32 log_level);
146 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
147 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
148 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
149 						   u32 period, u32 duration,
150 						   u32 next_offset,
151 						   u32 enabled);
152 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
153 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
154 						const u8 *mac);
155 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
156 					  const u8 *mac, u32 tid, u32 buf_size);
157 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
158 					      const u8 *mac, u32 tid,
159 					      u32 status);
160 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
161 					  const u8 *mac, u32 tid, u32 initiator,
162 					  u32 reason);
163 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
164 					u32 tim_ie_offset, struct sk_buff *bcn,
165 					u32 prb_caps, u32 prb_erp,
166 					void *prb_ies, size_t prb_ies_len);
167 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
168 					struct sk_buff *bcn);
169 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
170 					     const u8 *p2p_ie);
171 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
172 					      const u8 peer_addr[ETH_ALEN],
173 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
174 					      u32 num_ac);
175 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
176 					     const struct wmi_sta_keepalive_arg *arg);
177 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
178 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
179 						    enum wmi_wow_wakeup_event event,
180 						    u32 enable);
181 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
182 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
183 					       u32 pattern_id,
184 					       const u8 *pattern,
185 					       const u8 *mask,
186 					       int pattern_len,
187 					       int pattern_offset);
188 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
189 					       u32 pattern_id);
190 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
191 						    u32 vdev_id,
192 						    enum wmi_tdls_state state);
193 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
194 						const struct wmi_tdls_peer_update_cmd_arg *arg,
195 						const struct wmi_tdls_peer_capab_arg *cap,
196 						const struct wmi_channel_arg *chan);
197 	struct sk_buff *(*gen_radar_found)
198 			(struct ath10k *ar,
199 			 const struct ath10k_radar_found_info *arg);
200 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
201 	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
202 						   u32 param);
203 	void (*fw_stats_fill)(struct ath10k *ar,
204 			      struct ath10k_fw_stats *fw_stats,
205 			      char *buf);
206 	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
207 							u8 enable,
208 							u32 detect_level,
209 							u32 detect_margin);
210 	struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
211 					       enum wmi_host_platform_type type,
212 					       u32 fw_feature_bitmap);
213 	int (*get_vdev_subtype)(struct ath10k *ar,
214 				enum wmi_vdev_subtype subtype);
215 	struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
216 					      u32 vdev_id,
217 					      struct wmi_pno_scan_req *pno_scan);
218 	struct sk_buff *(*gen_pdev_bss_chan_info_req)
219 					(struct ath10k *ar,
220 					 enum wmi_bss_survey_req_type type);
221 	struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
222 	struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
223 							u32 param);
224 	struct sk_buff *(*gen_bb_timing)
225 			(struct ath10k *ar,
226 			 const struct wmi_bb_timing_cfg_arg *arg);
227 	struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
228 						    const struct wmi_per_peer_per_tid_cfg_arg *arg);
229 	struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num,
230 					   u32 input, u32 pull_type, u32 intr_mode);
231 
232 	struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set);
233 };
234 
235 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
236 
237 static inline int
238 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
239 {
240 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
241 		return -EOPNOTSUPP;
242 
243 	ar->wmi.ops->rx(ar, skb);
244 	return 0;
245 }
246 
247 static inline int
248 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
249 		   size_t len)
250 {
251 	if (!ar->wmi.ops->map_svc)
252 		return -EOPNOTSUPP;
253 
254 	ar->wmi.ops->map_svc(in, out, len);
255 	return 0;
256 }
257 
258 static inline int
259 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
260 		       size_t len)
261 {
262 	if (!ar->wmi.ops->map_svc_ext)
263 		return -EOPNOTSUPP;
264 
265 	ar->wmi.ops->map_svc_ext(in, out, len);
266 	return 0;
267 }
268 
269 static inline int
270 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
271 		     struct wmi_scan_ev_arg *arg)
272 {
273 	if (!ar->wmi.ops->pull_scan)
274 		return -EOPNOTSUPP;
275 
276 	return ar->wmi.ops->pull_scan(ar, skb, arg);
277 }
278 
279 static inline int
280 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
281 			      struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
282 {
283 	if (!ar->wmi.ops->pull_mgmt_tx_compl)
284 		return -EOPNOTSUPP;
285 
286 	return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
287 }
288 
289 static inline int
290 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
291 				     struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
292 {
293 	if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
294 		return -EOPNOTSUPP;
295 
296 	return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
297 }
298 
299 static inline int
300 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
301 			struct wmi_mgmt_rx_ev_arg *arg)
302 {
303 	if (!ar->wmi.ops->pull_mgmt_rx)
304 		return -EOPNOTSUPP;
305 
306 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
307 }
308 
309 static inline int
310 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
311 			struct wmi_ch_info_ev_arg *arg)
312 {
313 	if (!ar->wmi.ops->pull_ch_info)
314 		return -EOPNOTSUPP;
315 
316 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
317 }
318 
319 static inline int
320 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
321 			   struct wmi_vdev_start_ev_arg *arg)
322 {
323 	if (!ar->wmi.ops->pull_vdev_start)
324 		return -EOPNOTSUPP;
325 
326 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
327 }
328 
329 static inline int
330 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
331 			  struct wmi_peer_kick_ev_arg *arg)
332 {
333 	if (!ar->wmi.ops->pull_peer_kick)
334 		return -EOPNOTSUPP;
335 
336 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
337 }
338 
339 static inline int
340 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
341 		     struct wmi_swba_ev_arg *arg)
342 {
343 	if (!ar->wmi.ops->pull_swba)
344 		return -EOPNOTSUPP;
345 
346 	return ar->wmi.ops->pull_swba(ar, skb, arg);
347 }
348 
349 static inline int
350 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
351 			   struct wmi_phyerr_hdr_arg *arg)
352 {
353 	if (!ar->wmi.ops->pull_phyerr_hdr)
354 		return -EOPNOTSUPP;
355 
356 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
357 }
358 
359 static inline int
360 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
361 		       int left_len, struct wmi_phyerr_ev_arg *arg)
362 {
363 	if (!ar->wmi.ops->pull_phyerr)
364 		return -EOPNOTSUPP;
365 
366 	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
367 }
368 
369 static inline int
370 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
371 			struct wmi_svc_rdy_ev_arg *arg)
372 {
373 	if (!ar->wmi.ops->pull_svc_rdy)
374 		return -EOPNOTSUPP;
375 
376 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
377 }
378 
379 static inline int
380 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
381 		    struct wmi_rdy_ev_arg *arg)
382 {
383 	if (!ar->wmi.ops->pull_rdy)
384 		return -EOPNOTSUPP;
385 
386 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
387 }
388 
389 static inline int
390 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
391 			  struct wmi_svc_avail_ev_arg *arg)
392 {
393 	if (!ar->wmi.ops->pull_svc_avail)
394 		return -EOPNOTSUPP;
395 	return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
396 }
397 
398 static inline int
399 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
400 			 struct ath10k_fw_stats *stats)
401 {
402 	if (!ar->wmi.ops->pull_fw_stats)
403 		return -EOPNOTSUPP;
404 
405 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
406 }
407 
408 static inline int
409 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
410 			struct wmi_roam_ev_arg *arg)
411 {
412 	if (!ar->wmi.ops->pull_roam_ev)
413 		return -EOPNOTSUPP;
414 
415 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
416 }
417 
418 static inline int
419 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
420 			  struct wmi_wow_ev_arg *arg)
421 {
422 	if (!ar->wmi.ops->pull_wow_event)
423 		return -EOPNOTSUPP;
424 
425 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
426 }
427 
428 static inline int
429 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
430 			struct wmi_echo_ev_arg *arg)
431 {
432 	if (!ar->wmi.ops->pull_echo_ev)
433 		return -EOPNOTSUPP;
434 
435 	return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
436 }
437 
438 static inline int
439 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
440 			   struct wmi_dfs_status_ev_arg *arg)
441 {
442 	if (!ar->wmi.ops->pull_dfs_status_ev)
443 		return -EOPNOTSUPP;
444 
445 	return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
446 }
447 
448 static inline enum wmi_txbf_conf
449 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
450 {
451 	if (!ar->wmi.ops->get_txbf_conf_scheme)
452 		return WMI_TXBF_CONF_UNSUPPORTED;
453 
454 	return ar->wmi.ops->get_txbf_conf_scheme(ar);
455 }
456 
457 static inline int
458 ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
459 {
460 	if (!ar->wmi.ops->cleanup_mgmt_tx_send)
461 		return -EOPNOTSUPP;
462 
463 	return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
464 }
465 
466 static inline int
467 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
468 			dma_addr_t paddr)
469 {
470 	struct sk_buff *skb;
471 	int ret;
472 
473 	if (!ar->wmi.ops->gen_mgmt_tx_send)
474 		return -EOPNOTSUPP;
475 
476 	skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
477 	if (IS_ERR(skb))
478 		return PTR_ERR(skb);
479 
480 	ret = ath10k_wmi_cmd_send(ar, skb,
481 				  ar->wmi.cmd->mgmt_tx_send_cmdid);
482 	if (ret)
483 		return ret;
484 
485 	return 0;
486 }
487 
488 static inline int
489 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
490 {
491 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
492 	struct sk_buff *skb;
493 	int ret;
494 
495 	if (!ar->wmi.ops->gen_mgmt_tx)
496 		return -EOPNOTSUPP;
497 
498 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
499 	if (IS_ERR(skb))
500 		return PTR_ERR(skb);
501 
502 	ret = ath10k_wmi_cmd_send(ar, skb,
503 				  ar->wmi.cmd->mgmt_tx_cmdid);
504 	if (ret)
505 		return ret;
506 
507 	/* FIXME There's no ACK event for Management Tx. This probably
508 	 * shouldn't be called here either.
509 	 */
510 	info->flags |= IEEE80211_TX_STAT_ACK;
511 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
512 
513 	return 0;
514 }
515 
516 static inline int
517 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
518 			      u16 ctl2g, u16 ctl5g,
519 			      enum wmi_dfs_region dfs_reg)
520 {
521 	struct sk_buff *skb;
522 
523 	if (!ar->wmi.ops->gen_pdev_set_rd)
524 		return -EOPNOTSUPP;
525 
526 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
527 					   dfs_reg);
528 	if (IS_ERR(skb))
529 		return PTR_ERR(skb);
530 
531 	return ath10k_wmi_cmd_send(ar, skb,
532 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
533 }
534 
535 static inline int
536 ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
537 {
538 	struct sk_buff *skb;
539 
540 	if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
541 		return -EOPNOTSUPP;
542 
543 	skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
544 	if (IS_ERR(skb))
545 		return PTR_ERR(skb);
546 
547 	return ath10k_wmi_cmd_send(ar, skb,
548 				   ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
549 }
550 
551 static inline int
552 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
553 {
554 	struct sk_buff *skb;
555 
556 	if (!ar->wmi.ops->gen_pdev_suspend)
557 		return -EOPNOTSUPP;
558 
559 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
560 	if (IS_ERR(skb))
561 		return PTR_ERR(skb);
562 
563 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
564 }
565 
566 static inline int
567 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
568 {
569 	struct sk_buff *skb;
570 
571 	if (!ar->wmi.ops->gen_pdev_resume)
572 		return -EOPNOTSUPP;
573 
574 	skb = ar->wmi.ops->gen_pdev_resume(ar);
575 	if (IS_ERR(skb))
576 		return PTR_ERR(skb);
577 
578 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
579 }
580 
581 static inline int
582 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
583 {
584 	struct sk_buff *skb;
585 
586 	if (!ar->wmi.ops->gen_pdev_set_param)
587 		return -EOPNOTSUPP;
588 
589 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
590 	if (IS_ERR(skb))
591 		return PTR_ERR(skb);
592 
593 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
594 }
595 
596 static inline int
597 ath10k_wmi_cmd_init(struct ath10k *ar)
598 {
599 	struct sk_buff *skb;
600 
601 	if (!ar->wmi.ops->gen_init)
602 		return -EOPNOTSUPP;
603 
604 	skb = ar->wmi.ops->gen_init(ar);
605 	if (IS_ERR(skb))
606 		return PTR_ERR(skb);
607 
608 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
609 }
610 
611 static inline int
612 ath10k_wmi_start_scan(struct ath10k *ar,
613 		      const struct wmi_start_scan_arg *arg)
614 {
615 	struct sk_buff *skb;
616 
617 	if (!ar->wmi.ops->gen_start_scan)
618 		return -EOPNOTSUPP;
619 
620 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
621 	if (IS_ERR(skb))
622 		return PTR_ERR(skb);
623 
624 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
625 }
626 
627 static inline int
628 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
629 {
630 	struct sk_buff *skb;
631 
632 	if (!ar->wmi.ops->gen_stop_scan)
633 		return -EOPNOTSUPP;
634 
635 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
636 	if (IS_ERR(skb))
637 		return PTR_ERR(skb);
638 
639 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
640 }
641 
642 static inline int
643 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
644 		       enum wmi_vdev_type type,
645 		       enum wmi_vdev_subtype subtype,
646 		       const u8 macaddr[ETH_ALEN])
647 {
648 	struct sk_buff *skb;
649 
650 	if (!ar->wmi.ops->gen_vdev_create)
651 		return -EOPNOTSUPP;
652 
653 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
654 	if (IS_ERR(skb))
655 		return PTR_ERR(skb);
656 
657 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
658 }
659 
660 static inline int
661 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
662 {
663 	struct sk_buff *skb;
664 
665 	if (!ar->wmi.ops->gen_vdev_delete)
666 		return -EOPNOTSUPP;
667 
668 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
669 	if (IS_ERR(skb))
670 		return PTR_ERR(skb);
671 
672 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
673 }
674 
675 static inline int
676 ath10k_wmi_vdev_start(struct ath10k *ar,
677 		      const struct wmi_vdev_start_request_arg *arg)
678 {
679 	struct sk_buff *skb;
680 
681 	if (!ar->wmi.ops->gen_vdev_start)
682 		return -EOPNOTSUPP;
683 
684 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
685 	if (IS_ERR(skb))
686 		return PTR_ERR(skb);
687 
688 	return ath10k_wmi_cmd_send(ar, skb,
689 				   ar->wmi.cmd->vdev_start_request_cmdid);
690 }
691 
692 static inline int
693 ath10k_wmi_vdev_restart(struct ath10k *ar,
694 			const struct wmi_vdev_start_request_arg *arg)
695 {
696 	struct sk_buff *skb;
697 
698 	if (!ar->wmi.ops->gen_vdev_start)
699 		return -EOPNOTSUPP;
700 
701 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
702 	if (IS_ERR(skb))
703 		return PTR_ERR(skb);
704 
705 	return ath10k_wmi_cmd_send(ar, skb,
706 				   ar->wmi.cmd->vdev_restart_request_cmdid);
707 }
708 
709 static inline int
710 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
711 {
712 	struct sk_buff *skb;
713 
714 	if (!ar->wmi.ops->gen_vdev_stop)
715 		return -EOPNOTSUPP;
716 
717 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
718 	if (IS_ERR(skb))
719 		return PTR_ERR(skb);
720 
721 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
722 }
723 
724 static inline int
725 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
726 {
727 	struct sk_buff *skb;
728 
729 	if (!ar->wmi.ops->gen_vdev_up)
730 		return -EOPNOTSUPP;
731 
732 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
733 	if (IS_ERR(skb))
734 		return PTR_ERR(skb);
735 
736 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
737 }
738 
739 static inline int
740 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
741 {
742 	struct sk_buff *skb;
743 
744 	if (!ar->wmi.ops->gen_vdev_down)
745 		return -EOPNOTSUPP;
746 
747 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
748 	if (IS_ERR(skb))
749 		return PTR_ERR(skb);
750 
751 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
752 }
753 
754 static inline int
755 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
756 			  u32 param_value)
757 {
758 	struct sk_buff *skb;
759 
760 	if (!ar->wmi.ops->gen_vdev_set_param)
761 		return -EOPNOTSUPP;
762 
763 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
764 					      param_value);
765 	if (IS_ERR(skb))
766 		return PTR_ERR(skb);
767 
768 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
769 }
770 
771 static inline int
772 ath10k_wmi_vdev_install_key(struct ath10k *ar,
773 			    const struct wmi_vdev_install_key_arg *arg)
774 {
775 	struct sk_buff *skb;
776 
777 	if (!ar->wmi.ops->gen_vdev_install_key)
778 		return -EOPNOTSUPP;
779 
780 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
781 	if (IS_ERR(skb))
782 		return PTR_ERR(skb);
783 
784 	return ath10k_wmi_cmd_send(ar, skb,
785 				   ar->wmi.cmd->vdev_install_key_cmdid);
786 }
787 
788 static inline int
789 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
790 			      const struct wmi_vdev_spectral_conf_arg *arg)
791 {
792 	struct sk_buff *skb;
793 	u32 cmd_id;
794 
795 	if (!ar->wmi.ops->gen_vdev_spectral_conf)
796 		return -EOPNOTSUPP;
797 
798 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
799 	if (IS_ERR(skb))
800 		return PTR_ERR(skb);
801 
802 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
803 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
804 }
805 
806 static inline int
807 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
808 				u32 enable)
809 {
810 	struct sk_buff *skb;
811 	u32 cmd_id;
812 
813 	if (!ar->wmi.ops->gen_vdev_spectral_enable)
814 		return -EOPNOTSUPP;
815 
816 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
817 						    enable);
818 	if (IS_ERR(skb))
819 		return PTR_ERR(skb);
820 
821 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
822 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
823 }
824 
825 static inline int
826 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
827 			  const u8 peer_addr[ETH_ALEN],
828 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
829 			  u32 num_ac)
830 {
831 	struct sk_buff *skb;
832 	u32 cmd_id;
833 
834 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
835 		return -EOPNOTSUPP;
836 
837 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
838 					      num_ac);
839 	if (IS_ERR(skb))
840 		return PTR_ERR(skb);
841 
842 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
843 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
844 }
845 
846 static inline int
847 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
848 			 const struct wmi_wmm_params_all_arg *arg)
849 {
850 	struct sk_buff *skb;
851 	u32 cmd_id;
852 
853 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
854 	if (IS_ERR(skb))
855 		return PTR_ERR(skb);
856 
857 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
858 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
859 }
860 
861 static inline int
862 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
863 		       const u8 peer_addr[ETH_ALEN],
864 		       enum wmi_peer_type peer_type)
865 {
866 	struct sk_buff *skb;
867 
868 	if (!ar->wmi.ops->gen_peer_create)
869 		return -EOPNOTSUPP;
870 
871 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
872 	if (IS_ERR(skb))
873 		return PTR_ERR(skb);
874 
875 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
876 }
877 
878 static inline int
879 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
880 		       const u8 peer_addr[ETH_ALEN])
881 {
882 	struct sk_buff *skb;
883 
884 	if (!ar->wmi.ops->gen_peer_delete)
885 		return -EOPNOTSUPP;
886 
887 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
888 	if (IS_ERR(skb))
889 		return PTR_ERR(skb);
890 
891 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
892 }
893 
894 static inline int
895 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
896 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
897 {
898 	struct sk_buff *skb;
899 
900 	if (!ar->wmi.ops->gen_peer_flush)
901 		return -EOPNOTSUPP;
902 
903 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
904 	if (IS_ERR(skb))
905 		return PTR_ERR(skb);
906 
907 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
908 }
909 
910 static inline int
911 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
912 			  enum wmi_peer_param param_id, u32 param_value)
913 {
914 	struct sk_buff *skb;
915 
916 	if (!ar->wmi.ops->gen_peer_set_param)
917 		return -EOPNOTSUPP;
918 
919 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
920 					      param_value);
921 	if (IS_ERR(skb))
922 		return PTR_ERR(skb);
923 
924 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
925 }
926 
927 static inline int
928 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
929 		      enum wmi_sta_ps_mode psmode)
930 {
931 	struct sk_buff *skb;
932 
933 	if (!ar->wmi.ops->gen_set_psmode)
934 		return -EOPNOTSUPP;
935 
936 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
937 	if (IS_ERR(skb))
938 		return PTR_ERR(skb);
939 
940 	return ath10k_wmi_cmd_send(ar, skb,
941 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
942 }
943 
944 static inline int
945 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
946 			    enum wmi_sta_powersave_param param_id, u32 value)
947 {
948 	struct sk_buff *skb;
949 
950 	if (!ar->wmi.ops->gen_set_sta_ps)
951 		return -EOPNOTSUPP;
952 
953 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
954 	if (IS_ERR(skb))
955 		return PTR_ERR(skb);
956 
957 	return ath10k_wmi_cmd_send(ar, skb,
958 				   ar->wmi.cmd->sta_powersave_param_cmdid);
959 }
960 
961 static inline int
962 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
963 			   enum wmi_ap_ps_peer_param param_id, u32 value)
964 {
965 	struct sk_buff *skb;
966 
967 	if (!ar->wmi.ops->gen_set_ap_ps)
968 		return -EOPNOTSUPP;
969 
970 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
971 	if (IS_ERR(skb))
972 		return PTR_ERR(skb);
973 
974 	return ath10k_wmi_cmd_send(ar, skb,
975 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
976 }
977 
978 static inline int
979 ath10k_wmi_scan_chan_list(struct ath10k *ar,
980 			  const struct wmi_scan_chan_list_arg *arg)
981 {
982 	struct sk_buff *skb;
983 
984 	if (!ar->wmi.ops->gen_scan_chan_list)
985 		return -EOPNOTSUPP;
986 
987 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
988 	if (IS_ERR(skb))
989 		return PTR_ERR(skb);
990 
991 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
992 }
993 
994 static inline int
995 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
996 {
997 	struct sk_buff *skb;
998 	u32 prob_req_oui;
999 
1000 	prob_req_oui = (((u32)mac_addr[0]) << 16) |
1001 		       (((u32)mac_addr[1]) << 8) | mac_addr[2];
1002 
1003 	if (!ar->wmi.ops->gen_scan_prob_req_oui)
1004 		return -EOPNOTSUPP;
1005 
1006 	skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
1007 	if (IS_ERR(skb))
1008 		return PTR_ERR(skb);
1009 
1010 	return ath10k_wmi_cmd_send(ar, skb,
1011 			ar->wmi.cmd->scan_prob_req_oui_cmdid);
1012 }
1013 
1014 static inline int
1015 ath10k_wmi_peer_assoc(struct ath10k *ar,
1016 		      const struct wmi_peer_assoc_complete_arg *arg)
1017 {
1018 	struct sk_buff *skb;
1019 
1020 	if (!ar->wmi.ops->gen_peer_assoc)
1021 		return -EOPNOTSUPP;
1022 
1023 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
1024 	if (IS_ERR(skb))
1025 		return PTR_ERR(skb);
1026 
1027 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
1028 }
1029 
1030 static inline int
1031 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
1032 				  const void *bcn, size_t bcn_len,
1033 				  u32 bcn_paddr, bool dtim_zero,
1034 				  bool deliver_cab)
1035 {
1036 	struct sk_buff *skb;
1037 	int ret;
1038 
1039 	if (!ar->wmi.ops->gen_beacon_dma)
1040 		return -EOPNOTSUPP;
1041 
1042 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1043 					  dtim_zero, deliver_cab);
1044 	if (IS_ERR(skb))
1045 		return PTR_ERR(skb);
1046 
1047 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1048 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
1049 	if (ret) {
1050 		dev_kfree_skb(skb);
1051 		return ret;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 static inline int
1058 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1059 			       const struct wmi_wmm_params_all_arg *arg)
1060 {
1061 	struct sk_buff *skb;
1062 
1063 	if (!ar->wmi.ops->gen_pdev_set_wmm)
1064 		return -EOPNOTSUPP;
1065 
1066 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1067 	if (IS_ERR(skb))
1068 		return PTR_ERR(skb);
1069 
1070 	return ath10k_wmi_cmd_send(ar, skb,
1071 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1072 }
1073 
1074 static inline int
1075 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1076 {
1077 	struct sk_buff *skb;
1078 
1079 	if (!ar->wmi.ops->gen_request_stats)
1080 		return -EOPNOTSUPP;
1081 
1082 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1083 	if (IS_ERR(skb))
1084 		return PTR_ERR(skb);
1085 
1086 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1087 }
1088 
1089 static inline int
1090 ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
1091 				   u32 vdev_id,
1092 				   enum wmi_peer_stats_info_request_type type,
1093 				   u8 *addr,
1094 				   u32 reset)
1095 {
1096 	struct sk_buff *skb;
1097 
1098 	if (!ar->wmi.ops->gen_request_peer_stats_info)
1099 		return -EOPNOTSUPP;
1100 
1101 	skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
1102 						       vdev_id,
1103 						       type,
1104 						       addr,
1105 						       reset);
1106 	if (IS_ERR(skb))
1107 		return PTR_ERR(skb);
1108 
1109 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
1110 }
1111 
1112 static inline int
1113 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1114 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
1115 {
1116 	struct sk_buff *skb;
1117 
1118 	if (!ar->wmi.ops->gen_force_fw_hang)
1119 		return -EOPNOTSUPP;
1120 
1121 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1122 	if (IS_ERR(skb))
1123 		return PTR_ERR(skb);
1124 
1125 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1126 }
1127 
1128 static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num,
1129 					 u32 input, u32 pull_type, u32 intr_mode)
1130 {
1131 	struct sk_buff *skb;
1132 
1133 	if (!ar->wmi.ops->gen_gpio_config)
1134 		return -EOPNOTSUPP;
1135 
1136 	skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode);
1137 	if (IS_ERR(skb))
1138 		return PTR_ERR(skb);
1139 
1140 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid);
1141 }
1142 
1143 static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set)
1144 {
1145 	struct sk_buff *skb;
1146 
1147 	if (!ar->wmi.ops->gen_gpio_config)
1148 		return -EOPNOTSUPP;
1149 
1150 	skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set);
1151 	if (IS_ERR(skb))
1152 		return PTR_ERR(skb);
1153 
1154 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid);
1155 }
1156 
1157 static inline int
1158 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1159 {
1160 	struct sk_buff *skb;
1161 
1162 	if (!ar->wmi.ops->gen_dbglog_cfg)
1163 		return -EOPNOTSUPP;
1164 
1165 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1166 	if (IS_ERR(skb))
1167 		return PTR_ERR(skb);
1168 
1169 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1170 }
1171 
1172 static inline int
1173 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1174 {
1175 	struct sk_buff *skb;
1176 
1177 	if (!ar->wmi.ops->gen_pktlog_enable)
1178 		return -EOPNOTSUPP;
1179 
1180 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1181 	if (IS_ERR(skb))
1182 		return PTR_ERR(skb);
1183 
1184 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1185 }
1186 
1187 static inline int
1188 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1189 {
1190 	struct sk_buff *skb;
1191 
1192 	if (!ar->wmi.ops->gen_pktlog_disable)
1193 		return -EOPNOTSUPP;
1194 
1195 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
1196 	if (IS_ERR(skb))
1197 		return PTR_ERR(skb);
1198 
1199 	return ath10k_wmi_cmd_send(ar, skb,
1200 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1201 }
1202 
1203 static inline int
1204 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1205 			       u32 next_offset, u32 enabled)
1206 {
1207 	struct sk_buff *skb;
1208 
1209 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1210 		return -EOPNOTSUPP;
1211 
1212 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1213 						   next_offset, enabled);
1214 	if (IS_ERR(skb))
1215 		return PTR_ERR(skb);
1216 
1217 	return ath10k_wmi_cmd_send(ar, skb,
1218 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1219 }
1220 
1221 static inline int
1222 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1223 {
1224 	struct sk_buff *skb;
1225 
1226 	if (!ar->wmi.ops->gen_pdev_get_temperature)
1227 		return -EOPNOTSUPP;
1228 
1229 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1230 	if (IS_ERR(skb))
1231 		return PTR_ERR(skb);
1232 
1233 	return ath10k_wmi_cmd_send(ar, skb,
1234 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
1235 }
1236 
1237 static inline int
1238 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1239 {
1240 	struct sk_buff *skb;
1241 
1242 	if (!ar->wmi.ops->gen_addba_clear_resp)
1243 		return -EOPNOTSUPP;
1244 
1245 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1246 	if (IS_ERR(skb))
1247 		return PTR_ERR(skb);
1248 
1249 	return ath10k_wmi_cmd_send(ar, skb,
1250 				   ar->wmi.cmd->addba_clear_resp_cmdid);
1251 }
1252 
1253 static inline int
1254 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1255 		      u32 tid, u32 buf_size)
1256 {
1257 	struct sk_buff *skb;
1258 
1259 	if (!ar->wmi.ops->gen_addba_send)
1260 		return -EOPNOTSUPP;
1261 
1262 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1263 	if (IS_ERR(skb))
1264 		return PTR_ERR(skb);
1265 
1266 	return ath10k_wmi_cmd_send(ar, skb,
1267 				   ar->wmi.cmd->addba_send_cmdid);
1268 }
1269 
1270 static inline int
1271 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1272 			  u32 tid, u32 status)
1273 {
1274 	struct sk_buff *skb;
1275 
1276 	if (!ar->wmi.ops->gen_addba_set_resp)
1277 		return -EOPNOTSUPP;
1278 
1279 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1280 	if (IS_ERR(skb))
1281 		return PTR_ERR(skb);
1282 
1283 	return ath10k_wmi_cmd_send(ar, skb,
1284 				   ar->wmi.cmd->addba_set_resp_cmdid);
1285 }
1286 
1287 static inline int
1288 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1289 		      u32 tid, u32 initiator, u32 reason)
1290 {
1291 	struct sk_buff *skb;
1292 
1293 	if (!ar->wmi.ops->gen_delba_send)
1294 		return -EOPNOTSUPP;
1295 
1296 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1297 					  reason);
1298 	if (IS_ERR(skb))
1299 		return PTR_ERR(skb);
1300 
1301 	return ath10k_wmi_cmd_send(ar, skb,
1302 				   ar->wmi.cmd->delba_send_cmdid);
1303 }
1304 
1305 static inline int
1306 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1307 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1308 		    void *prb_ies, size_t prb_ies_len)
1309 {
1310 	struct sk_buff *skb;
1311 
1312 	if (!ar->wmi.ops->gen_bcn_tmpl)
1313 		return -EOPNOTSUPP;
1314 
1315 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1316 					prb_caps, prb_erp, prb_ies,
1317 					prb_ies_len);
1318 	if (IS_ERR(skb))
1319 		return PTR_ERR(skb);
1320 
1321 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1322 }
1323 
1324 static inline int
1325 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1326 {
1327 	struct sk_buff *skb;
1328 
1329 	if (!ar->wmi.ops->gen_prb_tmpl)
1330 		return -EOPNOTSUPP;
1331 
1332 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1333 	if (IS_ERR(skb))
1334 		return PTR_ERR(skb);
1335 
1336 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1337 }
1338 
1339 static inline int
1340 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1341 {
1342 	struct sk_buff *skb;
1343 
1344 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1345 		return -EOPNOTSUPP;
1346 
1347 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1348 	if (IS_ERR(skb))
1349 		return PTR_ERR(skb);
1350 
1351 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1352 }
1353 
1354 static inline int
1355 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1356 			 const struct wmi_sta_keepalive_arg *arg)
1357 {
1358 	struct sk_buff *skb;
1359 	u32 cmd_id;
1360 
1361 	if (!ar->wmi.ops->gen_sta_keepalive)
1362 		return -EOPNOTSUPP;
1363 
1364 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1365 	if (IS_ERR(skb))
1366 		return PTR_ERR(skb);
1367 
1368 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1369 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1370 }
1371 
1372 static inline int
1373 ath10k_wmi_wow_enable(struct ath10k *ar)
1374 {
1375 	struct sk_buff *skb;
1376 	u32 cmd_id;
1377 
1378 	if (!ar->wmi.ops->gen_wow_enable)
1379 		return -EOPNOTSUPP;
1380 
1381 	skb = ar->wmi.ops->gen_wow_enable(ar);
1382 	if (IS_ERR(skb))
1383 		return PTR_ERR(skb);
1384 
1385 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1386 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1387 }
1388 
1389 static inline int
1390 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1391 				enum wmi_wow_wakeup_event event,
1392 				u32 enable)
1393 {
1394 	struct sk_buff *skb;
1395 	u32 cmd_id;
1396 
1397 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1398 		return -EOPNOTSUPP;
1399 
1400 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1401 	if (IS_ERR(skb))
1402 		return PTR_ERR(skb);
1403 
1404 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1405 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1406 }
1407 
1408 static inline int
1409 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1410 {
1411 	struct sk_buff *skb;
1412 	u32 cmd_id;
1413 
1414 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1415 		return -EOPNOTSUPP;
1416 
1417 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1418 	if (IS_ERR(skb))
1419 		return PTR_ERR(skb);
1420 
1421 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1422 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1423 }
1424 
1425 static inline int
1426 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1427 			   const u8 *pattern, const u8 *mask,
1428 			   int pattern_len, int pattern_offset)
1429 {
1430 	struct sk_buff *skb;
1431 	u32 cmd_id;
1432 
1433 	if (!ar->wmi.ops->gen_wow_add_pattern)
1434 		return -EOPNOTSUPP;
1435 
1436 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1437 					       pattern, mask, pattern_len,
1438 					       pattern_offset);
1439 	if (IS_ERR(skb))
1440 		return PTR_ERR(skb);
1441 
1442 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1443 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1444 }
1445 
1446 static inline int
1447 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1448 {
1449 	struct sk_buff *skb;
1450 	u32 cmd_id;
1451 
1452 	if (!ar->wmi.ops->gen_wow_del_pattern)
1453 		return -EOPNOTSUPP;
1454 
1455 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1456 	if (IS_ERR(skb))
1457 		return PTR_ERR(skb);
1458 
1459 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1460 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1461 }
1462 
1463 static inline int
1464 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1465 			  struct wmi_pno_scan_req  *pno_scan)
1466 {
1467 	struct sk_buff *skb;
1468 	u32 cmd_id;
1469 
1470 	if (!ar->wmi.ops->gen_wow_config_pno)
1471 		return -EOPNOTSUPP;
1472 
1473 	skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1474 	if (IS_ERR(skb))
1475 		return PTR_ERR(skb);
1476 
1477 	cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1478 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1479 }
1480 
1481 static inline int
1482 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1483 				enum wmi_tdls_state state)
1484 {
1485 	struct sk_buff *skb;
1486 
1487 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1488 		return -EOPNOTSUPP;
1489 
1490 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1491 	if (IS_ERR(skb))
1492 		return PTR_ERR(skb);
1493 
1494 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1495 }
1496 
1497 static inline int
1498 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1499 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1500 			    const struct wmi_tdls_peer_capab_arg *cap,
1501 			    const struct wmi_channel_arg *chan)
1502 {
1503 	struct sk_buff *skb;
1504 
1505 	if (!ar->wmi.ops->gen_tdls_peer_update)
1506 		return -EOPNOTSUPP;
1507 
1508 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1509 	if (IS_ERR(skb))
1510 		return PTR_ERR(skb);
1511 
1512 	return ath10k_wmi_cmd_send(ar, skb,
1513 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1514 }
1515 
1516 static inline int
1517 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1518 {
1519 	struct sk_buff *skb;
1520 
1521 	if (!ar->wmi.ops->gen_adaptive_qcs)
1522 		return -EOPNOTSUPP;
1523 
1524 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1525 	if (IS_ERR(skb))
1526 		return PTR_ERR(skb);
1527 
1528 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1529 }
1530 
1531 static inline int
1532 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1533 {
1534 	struct sk_buff *skb;
1535 
1536 	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1537 		return -EOPNOTSUPP;
1538 
1539 	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1540 
1541 	if (IS_ERR(skb))
1542 		return PTR_ERR(skb);
1543 
1544 	return ath10k_wmi_cmd_send(ar, skb,
1545 				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1546 }
1547 
1548 static inline int
1549 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1550 			 char *buf)
1551 {
1552 	if (!ar->wmi.ops->fw_stats_fill)
1553 		return -EOPNOTSUPP;
1554 
1555 	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1556 	return 0;
1557 }
1558 
1559 static inline int
1560 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1561 				    u32 detect_level, u32 detect_margin)
1562 {
1563 	struct sk_buff *skb;
1564 
1565 	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1566 		return -EOPNOTSUPP;
1567 
1568 	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1569 							detect_level,
1570 							detect_margin);
1571 
1572 	if (IS_ERR(skb))
1573 		return PTR_ERR(skb);
1574 
1575 	return ath10k_wmi_cmd_send(ar, skb,
1576 				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1577 }
1578 
1579 static inline int
1580 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1581 			       enum wmi_host_platform_type type,
1582 			       u32 fw_feature_bitmap)
1583 {
1584 	struct sk_buff *skb;
1585 
1586 	if (!ar->wmi.ops->ext_resource_config)
1587 		return -EOPNOTSUPP;
1588 
1589 	skb = ar->wmi.ops->ext_resource_config(ar, type,
1590 					       fw_feature_bitmap);
1591 
1592 	if (IS_ERR(skb))
1593 		return PTR_ERR(skb);
1594 
1595 	return ath10k_wmi_cmd_send(ar, skb,
1596 				   ar->wmi.cmd->ext_resource_cfg_cmdid);
1597 }
1598 
1599 static inline int
1600 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1601 {
1602 	if (!ar->wmi.ops->get_vdev_subtype)
1603 		return -EOPNOTSUPP;
1604 
1605 	return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1606 }
1607 
1608 static inline int
1609 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1610 				      enum wmi_bss_survey_req_type type)
1611 {
1612 	struct ath10k_wmi *wmi = &ar->wmi;
1613 	struct sk_buff *skb;
1614 
1615 	if (!wmi->ops->gen_pdev_bss_chan_info_req)
1616 		return -EOPNOTSUPP;
1617 
1618 	skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1619 	if (IS_ERR(skb))
1620 		return PTR_ERR(skb);
1621 
1622 	return ath10k_wmi_cmd_send(ar, skb,
1623 				   wmi->cmd->pdev_bss_chan_info_request_cmdid);
1624 }
1625 
1626 static inline int
1627 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1628 {
1629 	struct ath10k_wmi *wmi = &ar->wmi;
1630 	struct sk_buff *skb;
1631 
1632 	if (!wmi->ops->gen_echo)
1633 		return -EOPNOTSUPP;
1634 
1635 	skb = wmi->ops->gen_echo(ar, value);
1636 	if (IS_ERR(skb))
1637 		return PTR_ERR(skb);
1638 
1639 	return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1640 }
1641 
1642 static inline int
1643 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1644 {
1645 	struct sk_buff *skb;
1646 
1647 	if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1648 		return -EOPNOTSUPP;
1649 
1650 	skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1651 
1652 	if (IS_ERR(skb))
1653 		return PTR_ERR(skb);
1654 
1655 	return ath10k_wmi_cmd_send(ar, skb,
1656 				   ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1657 }
1658 
1659 static inline int
1660 ath10k_wmi_report_radar_found(struct ath10k *ar,
1661 			      const struct ath10k_radar_found_info *arg)
1662 {
1663 	struct sk_buff *skb;
1664 
1665 	if (!ar->wmi.ops->gen_radar_found)
1666 		return -EOPNOTSUPP;
1667 
1668 	skb = ar->wmi.ops->gen_radar_found(ar, arg);
1669 	if (IS_ERR(skb))
1670 		return PTR_ERR(skb);
1671 
1672 	return ath10k_wmi_cmd_send(ar, skb,
1673 				   ar->wmi.cmd->radar_found_cmdid);
1674 }
1675 
1676 static inline int
1677 ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1678 			  const struct wmi_bb_timing_cfg_arg *arg)
1679 {
1680 	struct sk_buff *skb;
1681 
1682 	if (!ar->wmi.ops->gen_bb_timing)
1683 		return -EOPNOTSUPP;
1684 
1685 	skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1686 
1687 	if (IS_ERR(skb))
1688 		return PTR_ERR(skb);
1689 
1690 	return ath10k_wmi_cmd_send(ar, skb,
1691 				   ar->wmi.cmd->set_bb_timing_cmdid);
1692 }
1693 
1694 static inline int
1695 ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
1696 				    const struct wmi_per_peer_per_tid_cfg_arg *arg)
1697 {
1698 	struct sk_buff *skb;
1699 
1700 	if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
1701 		return -EOPNOTSUPP;
1702 
1703 	skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
1704 	if (IS_ERR(skb))
1705 		return PTR_ERR(skb);
1706 
1707 	return ath10k_wmi_cmd_send(ar, skb,
1708 				   ar->wmi.cmd->per_peer_per_tid_config_cmdid);
1709 }
1710 #endif
1711