xref: /linux/drivers/net/wireless/ath/ath10k/wmi.c (revision ff5599816711d2e67da2d7561fd36ac48debd433)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/skbuff.h>
19 
20 #include "core.h"
21 #include "htc.h"
22 #include "debug.h"
23 #include "wmi.h"
24 #include "mac.h"
25 
26 void ath10k_wmi_flush_tx(struct ath10k *ar)
27 {
28 	int ret;
29 
30 	ret = wait_event_timeout(ar->wmi.wq,
31 				 atomic_read(&ar->wmi.pending_tx_count) == 0,
32 				 5*HZ);
33 	if (atomic_read(&ar->wmi.pending_tx_count) == 0)
34 		return;
35 
36 	if (ret == 0)
37 		ret = -ETIMEDOUT;
38 
39 	if (ret < 0)
40 		ath10k_warn("wmi flush failed (%d)\n", ret);
41 }
42 
43 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
44 {
45 	int ret;
46 	ret = wait_for_completion_timeout(&ar->wmi.service_ready,
47 					  WMI_SERVICE_READY_TIMEOUT_HZ);
48 	return ret;
49 }
50 
51 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
52 {
53 	int ret;
54 	ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
55 					  WMI_UNIFIED_READY_TIMEOUT_HZ);
56 	return ret;
57 }
58 
59 static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
60 {
61 	struct sk_buff *skb;
62 	u32 round_len = roundup(len, 4);
63 
64 	skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
65 	if (!skb)
66 		return NULL;
67 
68 	skb_reserve(skb, WMI_SKB_HEADROOM);
69 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
70 		ath10k_warn("Unaligned WMI skb\n");
71 
72 	skb_put(skb, round_len);
73 	memset(skb->data, 0, round_len);
74 
75 	return skb;
76 }
77 
78 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
79 {
80 	dev_kfree_skb(skb);
81 
82 	if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
83 		wake_up(&ar->wmi.wq);
84 }
85 
86 /* WMI command API */
87 static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
88 			       enum wmi_cmd_id cmd_id)
89 {
90 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
91 	struct wmi_cmd_hdr *cmd_hdr;
92 	int status;
93 	u32 cmd = 0;
94 
95 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
96 		return -ENOMEM;
97 
98 	cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
99 
100 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
101 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
102 
103 	if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
104 	    WMI_MAX_PENDING_TX_COUNT) {
105 		/* avoid using up memory when FW hangs */
106 		atomic_dec(&ar->wmi.pending_tx_count);
107 		return -EBUSY;
108 	}
109 
110 	memset(skb_cb, 0, sizeof(*skb_cb));
111 
112 	trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
113 
114 	status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
115 	if (status) {
116 		dev_kfree_skb_any(skb);
117 		atomic_dec(&ar->wmi.pending_tx_count);
118 		return status;
119 	}
120 
121 	return 0;
122 }
123 
124 static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
125 {
126 	struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
127 	enum wmi_scan_event_type event_type;
128 	enum wmi_scan_completion_reason reason;
129 	u32 freq;
130 	u32 req_id;
131 	u32 scan_id;
132 	u32 vdev_id;
133 
134 	event_type = __le32_to_cpu(event->event_type);
135 	reason     = __le32_to_cpu(event->reason);
136 	freq       = __le32_to_cpu(event->channel_freq);
137 	req_id     = __le32_to_cpu(event->scan_req_id);
138 	scan_id    = __le32_to_cpu(event->scan_id);
139 	vdev_id    = __le32_to_cpu(event->vdev_id);
140 
141 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
142 	ath10k_dbg(ATH10K_DBG_WMI,
143 		   "scan event type %d reason %d freq %d req_id %d "
144 		   "scan_id %d vdev_id %d\n",
145 		   event_type, reason, freq, req_id, scan_id, vdev_id);
146 
147 	spin_lock_bh(&ar->data_lock);
148 
149 	switch (event_type) {
150 	case WMI_SCAN_EVENT_STARTED:
151 		ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
152 		if (ar->scan.in_progress && ar->scan.is_roc)
153 			ieee80211_ready_on_channel(ar->hw);
154 
155 		complete(&ar->scan.started);
156 		break;
157 	case WMI_SCAN_EVENT_COMPLETED:
158 		ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
159 		switch (reason) {
160 		case WMI_SCAN_REASON_COMPLETED:
161 			ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
162 			break;
163 		case WMI_SCAN_REASON_CANCELLED:
164 			ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
165 			break;
166 		case WMI_SCAN_REASON_PREEMPTED:
167 			ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
168 			break;
169 		case WMI_SCAN_REASON_TIMEDOUT:
170 			ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
171 			break;
172 		default:
173 			break;
174 		}
175 
176 		ar->scan_channel = NULL;
177 		if (!ar->scan.in_progress) {
178 			ath10k_warn("no scan requested, ignoring\n");
179 			break;
180 		}
181 
182 		if (ar->scan.is_roc) {
183 			ath10k_offchan_tx_purge(ar);
184 
185 			if (!ar->scan.aborting)
186 				ieee80211_remain_on_channel_expired(ar->hw);
187 		} else {
188 			ieee80211_scan_completed(ar->hw, ar->scan.aborting);
189 		}
190 
191 		del_timer(&ar->scan.timeout);
192 		complete_all(&ar->scan.completed);
193 		ar->scan.in_progress = false;
194 		break;
195 	case WMI_SCAN_EVENT_BSS_CHANNEL:
196 		ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
197 		ar->scan_channel = NULL;
198 		break;
199 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
200 		ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
201 		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
202 		if (ar->scan.in_progress && ar->scan.is_roc &&
203 		    ar->scan.roc_freq == freq) {
204 			complete(&ar->scan.on_channel);
205 		}
206 		break;
207 	case WMI_SCAN_EVENT_DEQUEUED:
208 		ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
209 		break;
210 	case WMI_SCAN_EVENT_PREEMPTED:
211 		ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
212 		break;
213 	case WMI_SCAN_EVENT_START_FAILED:
214 		ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
215 		break;
216 	default:
217 		break;
218 	}
219 
220 	spin_unlock_bh(&ar->data_lock);
221 	return 0;
222 }
223 
224 static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
225 {
226 	enum ieee80211_band band;
227 
228 	switch (phy_mode) {
229 	case MODE_11A:
230 	case MODE_11NA_HT20:
231 	case MODE_11NA_HT40:
232 	case MODE_11AC_VHT20:
233 	case MODE_11AC_VHT40:
234 	case MODE_11AC_VHT80:
235 		band = IEEE80211_BAND_5GHZ;
236 		break;
237 	case MODE_11G:
238 	case MODE_11B:
239 	case MODE_11GONLY:
240 	case MODE_11NG_HT20:
241 	case MODE_11NG_HT40:
242 	case MODE_11AC_VHT20_2G:
243 	case MODE_11AC_VHT40_2G:
244 	case MODE_11AC_VHT80_2G:
245 	default:
246 		band = IEEE80211_BAND_2GHZ;
247 	}
248 
249 	return band;
250 }
251 
252 static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
253 {
254 	u8 rate_idx = 0;
255 
256 	/* rate in Kbps */
257 	switch (rate) {
258 	case 1000:
259 		rate_idx = 0;
260 		break;
261 	case 2000:
262 		rate_idx = 1;
263 		break;
264 	case 5500:
265 		rate_idx = 2;
266 		break;
267 	case 11000:
268 		rate_idx = 3;
269 		break;
270 	case 6000:
271 		rate_idx = 4;
272 		break;
273 	case 9000:
274 		rate_idx = 5;
275 		break;
276 	case 12000:
277 		rate_idx = 6;
278 		break;
279 	case 18000:
280 		rate_idx = 7;
281 		break;
282 	case 24000:
283 		rate_idx = 8;
284 		break;
285 	case 36000:
286 		rate_idx = 9;
287 		break;
288 	case 48000:
289 		rate_idx = 10;
290 		break;
291 	case 54000:
292 		rate_idx = 11;
293 		break;
294 	default:
295 		break;
296 	}
297 
298 	if (band == IEEE80211_BAND_5GHZ) {
299 		if (rate_idx > 3)
300 			/* Omit CCK rates */
301 			rate_idx -= 4;
302 		else
303 			rate_idx = 0;
304 	}
305 
306 	return rate_idx;
307 }
308 
309 static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
310 {
311 	struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
312 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
313 	struct ieee80211_hdr *hdr;
314 	u32 rx_status;
315 	u32 channel;
316 	u32 phy_mode;
317 	u32 snr;
318 	u32 rate;
319 	u32 buf_len;
320 	u16 fc;
321 
322 	channel   = __le32_to_cpu(event->hdr.channel);
323 	buf_len   = __le32_to_cpu(event->hdr.buf_len);
324 	rx_status = __le32_to_cpu(event->hdr.status);
325 	snr       = __le32_to_cpu(event->hdr.snr);
326 	phy_mode  = __le32_to_cpu(event->hdr.phy_mode);
327 	rate	  = __le32_to_cpu(event->hdr.rate);
328 
329 	memset(status, 0, sizeof(*status));
330 
331 	ath10k_dbg(ATH10K_DBG_MGMT,
332 		   "event mgmt rx status %08x\n", rx_status);
333 
334 	if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
335 		dev_kfree_skb(skb);
336 		return 0;
337 	}
338 
339 	if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
340 		dev_kfree_skb(skb);
341 		return 0;
342 	}
343 
344 	if (rx_status & WMI_RX_STATUS_ERR_CRC)
345 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
346 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
347 		status->flag |= RX_FLAG_MMIC_ERROR;
348 
349 	status->band = phy_mode_to_band(phy_mode);
350 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
351 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
352 	status->rate_idx = get_rate_idx(rate, status->band);
353 
354 	skb_pull(skb, sizeof(event->hdr));
355 
356 	hdr = (struct ieee80211_hdr *)skb->data;
357 	fc = le16_to_cpu(hdr->frame_control);
358 
359 	if (fc & IEEE80211_FCTL_PROTECTED) {
360 		status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
361 				RX_FLAG_MMIC_STRIPPED;
362 		hdr->frame_control = __cpu_to_le16(fc &
363 					~IEEE80211_FCTL_PROTECTED);
364 	}
365 
366 	ath10k_dbg(ATH10K_DBG_MGMT,
367 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
368 		   skb, skb->len,
369 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
370 
371 	ath10k_dbg(ATH10K_DBG_MGMT,
372 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
373 		   status->freq, status->band, status->signal,
374 		   status->rate_idx);
375 
376 	/*
377 	 * packets from HTC come aligned to 4byte boundaries
378 	 * because they can originally come in along with a trailer
379 	 */
380 	skb_trim(skb, buf_len);
381 
382 	ieee80211_rx(ar->hw, skb);
383 	return 0;
384 }
385 
386 static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
387 {
388 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
389 }
390 
391 static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
392 {
393 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
394 }
395 
396 static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
397 {
398 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
399 }
400 
401 static void ath10k_wmi_event_update_stats(struct ath10k *ar,
402 					  struct sk_buff *skb)
403 {
404 	struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
405 
406 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
407 
408 	ath10k_debug_read_target_stats(ar, ev);
409 }
410 
411 static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
412 					     struct sk_buff *skb)
413 {
414 	struct wmi_vdev_start_response_event *ev;
415 
416 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
417 
418 	ev = (struct wmi_vdev_start_response_event *)skb->data;
419 
420 	if (WARN_ON(__le32_to_cpu(ev->status)))
421 		return;
422 
423 	complete(&ar->vdev_setup_done);
424 }
425 
426 static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
427 					  struct sk_buff *skb)
428 {
429 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
430 	complete(&ar->vdev_setup_done);
431 }
432 
433 static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
434 					      struct sk_buff *skb)
435 {
436 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
437 }
438 
439 /*
440  * FIXME
441  *
442  * We don't report to mac80211 sleep state of connected
443  * stations. Due to this mac80211 can't fill in TIM IE
444  * correctly.
445  *
446  * I know of no way of getting nullfunc frames that contain
447  * sleep transition from connected stations - these do not
448  * seem to be sent from the target to the host. There also
449  * doesn't seem to be a dedicated event for that. So the
450  * only way left to do this would be to read tim_bitmap
451  * during SWBA.
452  *
453  * We could probably try using tim_bitmap from SWBA to tell
454  * mac80211 which stations are asleep and which are not. The
455  * problem here is calling mac80211 functions so many times
456  * could take too long and make us miss the time to submit
457  * the beacon to the target.
458  *
459  * So as a workaround we try to extend the TIM IE if there
460  * is unicast buffered for stations with aid > 7 and fill it
461  * in ourselves.
462  */
463 static void ath10k_wmi_update_tim(struct ath10k *ar,
464 				  struct ath10k_vif *arvif,
465 				  struct sk_buff *bcn,
466 				  struct wmi_bcn_info *bcn_info)
467 {
468 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
469 	struct ieee80211_tim_ie *tim;
470 	u8 *ies, *ie;
471 	u8 ie_len, pvm_len;
472 
473 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
474 	 * we must copy the bitmap upon change and reuse it later */
475 	if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
476 		int i;
477 
478 		BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
479 			     sizeof(bcn_info->tim_info.tim_bitmap));
480 
481 		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
482 			__le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
483 			u32 v = __le32_to_cpu(t);
484 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
485 		}
486 
487 		/* FW reports either length 0 or 16
488 		 * so we calculate this on our own */
489 		arvif->u.ap.tim_len = 0;
490 		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
491 			if (arvif->u.ap.tim_bitmap[i])
492 				arvif->u.ap.tim_len = i;
493 
494 		arvif->u.ap.tim_len++;
495 	}
496 
497 	ies = bcn->data;
498 	ies += ieee80211_hdrlen(hdr->frame_control);
499 	ies += 12; /* fixed parameters */
500 
501 	ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
502 				    (u8 *)skb_tail_pointer(bcn) - ies);
503 	if (!ie) {
504 		/* highly unlikely for mac80211 */
505 		ath10k_warn("no tim ie found;\n");
506 		return;
507 	}
508 
509 	tim = (void *)ie + 2;
510 	ie_len = ie[1];
511 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
512 
513 	if (pvm_len < arvif->u.ap.tim_len) {
514 		int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
515 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
516 		void *next_ie = ie + 2 + ie_len;
517 
518 		if (skb_put(bcn, expand_size)) {
519 			memmove(next_ie + expand_size, next_ie, move_size);
520 
521 			ie[1] += expand_size;
522 			ie_len += expand_size;
523 			pvm_len += expand_size;
524 		} else {
525 			ath10k_warn("tim expansion failed\n");
526 		}
527 	}
528 
529 	if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
530 		ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
531 		return;
532 	}
533 
534 	tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
535 	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
536 
537 	ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
538 		   tim->dtim_count, tim->dtim_period,
539 		   tim->bitmap_ctrl, pvm_len);
540 }
541 
542 static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
543 				   struct wmi_p2p_noa_info *noa)
544 {
545 	struct ieee80211_p2p_noa_attr *noa_attr;
546 	u8  ctwindow_oppps = noa->ctwindow_oppps;
547 	u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
548 	bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
549 	__le16 *noa_attr_len;
550 	u16 attr_len;
551 	u8 noa_descriptors = noa->num_descriptors;
552 	int i;
553 
554 	/* P2P IE */
555 	data[0] = WLAN_EID_VENDOR_SPECIFIC;
556 	data[1] = len - 2;
557 	data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
558 	data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
559 	data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
560 	data[5] = WLAN_OUI_TYPE_WFA_P2P;
561 
562 	/* NOA ATTR */
563 	data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
564 	noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
565 	noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
566 
567 	noa_attr->index = noa->index;
568 	noa_attr->oppps_ctwindow = ctwindow;
569 	if (oppps)
570 		noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
571 
572 	for (i = 0; i < noa_descriptors; i++) {
573 		noa_attr->desc[i].count =
574 			__le32_to_cpu(noa->descriptors[i].type_count);
575 		noa_attr->desc[i].duration = noa->descriptors[i].duration;
576 		noa_attr->desc[i].interval = noa->descriptors[i].interval;
577 		noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
578 	}
579 
580 	attr_len = 2; /* index + oppps_ctwindow */
581 	attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
582 	*noa_attr_len = __cpu_to_le16(attr_len);
583 }
584 
585 static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
586 {
587 	u32 len = 0;
588 	u8 noa_descriptors = noa->num_descriptors;
589 	u8 opp_ps_info = noa->ctwindow_oppps;
590 	bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
591 
592 
593 	if (!noa_descriptors && !opps_enabled)
594 		return len;
595 
596 	len += 1 + 1 + 4; /* EID + len + OUI */
597 	len += 1 + 2; /* noa attr  + attr len */
598 	len += 1 + 1; /* index + oppps_ctwindow */
599 	len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
600 
601 	return len;
602 }
603 
604 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
605 				  struct sk_buff *bcn,
606 				  struct wmi_bcn_info *bcn_info)
607 {
608 	struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
609 	u8 *new_data, *old_data = arvif->u.ap.noa_data;
610 	u32 new_len;
611 
612 	if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
613 		return;
614 
615 	ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
616 	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
617 		new_len = ath10k_p2p_calc_noa_ie_len(noa);
618 		if (!new_len)
619 			goto cleanup;
620 
621 		new_data = kmalloc(new_len, GFP_ATOMIC);
622 		if (!new_data)
623 			goto cleanup;
624 
625 		ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
626 
627 		spin_lock_bh(&ar->data_lock);
628 		arvif->u.ap.noa_data = new_data;
629 		arvif->u.ap.noa_len = new_len;
630 		spin_unlock_bh(&ar->data_lock);
631 		kfree(old_data);
632 	}
633 
634 	if (arvif->u.ap.noa_data)
635 		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
636 			memcpy(skb_put(bcn, arvif->u.ap.noa_len),
637 			       arvif->u.ap.noa_data,
638 			       arvif->u.ap.noa_len);
639 	return;
640 
641 cleanup:
642 	spin_lock_bh(&ar->data_lock);
643 	arvif->u.ap.noa_data = NULL;
644 	arvif->u.ap.noa_len = 0;
645 	spin_unlock_bh(&ar->data_lock);
646 	kfree(old_data);
647 }
648 
649 
650 static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
651 {
652 	struct wmi_host_swba_event *ev;
653 	u32 map;
654 	int i = -1;
655 	struct wmi_bcn_info *bcn_info;
656 	struct ath10k_vif *arvif;
657 	struct wmi_bcn_tx_arg arg;
658 	struct sk_buff *bcn;
659 	int vdev_id = 0;
660 	int ret;
661 
662 	ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
663 
664 	ev = (struct wmi_host_swba_event *)skb->data;
665 	map = __le32_to_cpu(ev->vdev_map);
666 
667 	ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
668 		   "-vdev map 0x%x\n",
669 		   ev->vdev_map);
670 
671 	for (; map; map >>= 1, vdev_id++) {
672 		if (!(map & 0x1))
673 			continue;
674 
675 		i++;
676 
677 		if (i >= WMI_MAX_AP_VDEV) {
678 			ath10k_warn("swba has corrupted vdev map\n");
679 			break;
680 		}
681 
682 		bcn_info = &ev->bcn_info[i];
683 
684 		ath10k_dbg(ATH10K_DBG_MGMT,
685 			   "-bcn_info[%d]:\n"
686 			   "--tim_len %d\n"
687 			   "--tim_mcast %d\n"
688 			   "--tim_changed %d\n"
689 			   "--tim_num_ps_pending %d\n"
690 			   "--tim_bitmap 0x%08x%08x%08x%08x\n",
691 			   i,
692 			   __le32_to_cpu(bcn_info->tim_info.tim_len),
693 			   __le32_to_cpu(bcn_info->tim_info.tim_mcast),
694 			   __le32_to_cpu(bcn_info->tim_info.tim_changed),
695 			   __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
696 			   __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
697 			   __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
698 			   __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
699 			   __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
700 
701 		arvif = ath10k_get_arvif(ar, vdev_id);
702 		if (arvif == NULL) {
703 			ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
704 			continue;
705 		}
706 
707 		bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
708 		if (!bcn) {
709 			ath10k_warn("could not get mac80211 beacon\n");
710 			continue;
711 		}
712 
713 		ath10k_tx_h_seq_no(bcn);
714 		ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
715 		ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
716 
717 		arg.vdev_id = arvif->vdev_id;
718 		arg.tx_rate = 0;
719 		arg.tx_power = 0;
720 		arg.bcn = bcn->data;
721 		arg.bcn_len = bcn->len;
722 
723 		ret = ath10k_wmi_beacon_send(ar, &arg);
724 		if (ret)
725 			ath10k_warn("could not send beacon (%d)\n", ret);
726 
727 		dev_kfree_skb_any(bcn);
728 	}
729 }
730 
731 static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
732 					       struct sk_buff *skb)
733 {
734 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
735 }
736 
737 static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
738 {
739 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
740 }
741 
742 static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
743 {
744 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
745 }
746 
747 static void ath10k_wmi_event_profile_match(struct ath10k *ar,
748 				    struct sk_buff *skb)
749 {
750 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
751 }
752 
753 static void ath10k_wmi_event_debug_print(struct ath10k *ar,
754 				  struct sk_buff *skb)
755 {
756 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
757 }
758 
759 static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
760 {
761 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
762 }
763 
764 static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
765 					       struct sk_buff *skb)
766 {
767 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
768 }
769 
770 static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
771 					     struct sk_buff *skb)
772 {
773 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
774 }
775 
776 static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
777 					     struct sk_buff *skb)
778 {
779 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
780 }
781 
782 static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
783 					      struct sk_buff *skb)
784 {
785 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
786 }
787 
788 static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
789 					     struct sk_buff *skb)
790 {
791 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
792 }
793 
794 static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
795 					      struct sk_buff *skb)
796 {
797 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
798 }
799 
800 static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
801 					     struct sk_buff *skb)
802 {
803 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
804 }
805 
806 static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
807 					   struct sk_buff *skb)
808 {
809 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
810 }
811 
812 static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
813 					 struct sk_buff *skb)
814 {
815 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
816 }
817 
818 static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
819 					    struct sk_buff *skb)
820 {
821 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
822 }
823 
824 static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
825 					    struct sk_buff *skb)
826 {
827 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
828 }
829 
830 static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
831 					    struct sk_buff *skb)
832 {
833 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
834 }
835 
836 static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
837 						struct sk_buff *skb)
838 {
839 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
840 }
841 
842 static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
843 					      struct sk_buff *skb)
844 {
845 	struct wmi_service_ready_event *ev = (void *)skb->data;
846 
847 	if (skb->len < sizeof(*ev)) {
848 		ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
849 			    skb->len, sizeof(*ev));
850 		return;
851 	}
852 
853 	ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
854 	ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
855 	ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
856 	ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
857 	ar->fw_version_major =
858 		(__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
859 	ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
860 	ar->fw_version_release =
861 		(__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
862 	ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
863 	ar->phy_capability = __le32_to_cpu(ev->phy_capability);
864 
865 	ar->ath_common.regulatory.current_rd =
866 		__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
867 
868 	ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
869 				      sizeof(ev->wmi_service_bitmap));
870 
871 	if (strlen(ar->hw->wiphy->fw_version) == 0) {
872 		snprintf(ar->hw->wiphy->fw_version,
873 			 sizeof(ar->hw->wiphy->fw_version),
874 			 "%u.%u.%u.%u",
875 			 ar->fw_version_major,
876 			 ar->fw_version_minor,
877 			 ar->fw_version_release,
878 			 ar->fw_version_build);
879 	}
880 
881 	/* FIXME: it probably should be better to support this */
882 	if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
883 		ath10k_warn("target requested %d memory chunks; ignoring\n",
884 			    __le32_to_cpu(ev->num_mem_reqs));
885 	}
886 
887 	ath10k_dbg(ATH10K_DBG_WMI,
888 		   "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
889 		   __le32_to_cpu(ev->sw_version),
890 		   __le32_to_cpu(ev->sw_version_1),
891 		   __le32_to_cpu(ev->abi_version),
892 		   __le32_to_cpu(ev->phy_capability),
893 		   __le32_to_cpu(ev->ht_cap_info),
894 		   __le32_to_cpu(ev->vht_cap_info),
895 		   __le32_to_cpu(ev->vht_supp_mcs),
896 		   __le32_to_cpu(ev->sys_cap_info),
897 		   __le32_to_cpu(ev->num_mem_reqs));
898 
899 	complete(&ar->wmi.service_ready);
900 }
901 
902 static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
903 {
904 	struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
905 
906 	if (WARN_ON(skb->len < sizeof(*ev)))
907 		return -EINVAL;
908 
909 	memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
910 
911 	ath10k_dbg(ATH10K_DBG_WMI,
912 		   "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
913 		   __le32_to_cpu(ev->sw_version),
914 		   __le32_to_cpu(ev->abi_version),
915 		   ev->mac_addr.addr,
916 		   __le32_to_cpu(ev->status));
917 
918 	complete(&ar->wmi.unified_ready);
919 	return 0;
920 }
921 
922 static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
923 {
924 	struct wmi_cmd_hdr *cmd_hdr;
925 	enum wmi_event_id id;
926 	u16 len;
927 
928 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
929 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
930 
931 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
932 		return;
933 
934 	len = skb->len;
935 
936 	trace_ath10k_wmi_event(id, skb->data, skb->len);
937 
938 	switch (id) {
939 	case WMI_MGMT_RX_EVENTID:
940 		ath10k_wmi_event_mgmt_rx(ar, skb);
941 		/* mgmt_rx() owns the skb now! */
942 		return;
943 	case WMI_SCAN_EVENTID:
944 		ath10k_wmi_event_scan(ar, skb);
945 		break;
946 	case WMI_CHAN_INFO_EVENTID:
947 		ath10k_wmi_event_chan_info(ar, skb);
948 		break;
949 	case WMI_ECHO_EVENTID:
950 		ath10k_wmi_event_echo(ar, skb);
951 		break;
952 	case WMI_DEBUG_MESG_EVENTID:
953 		ath10k_wmi_event_debug_mesg(ar, skb);
954 		break;
955 	case WMI_UPDATE_STATS_EVENTID:
956 		ath10k_wmi_event_update_stats(ar, skb);
957 		break;
958 	case WMI_VDEV_START_RESP_EVENTID:
959 		ath10k_wmi_event_vdev_start_resp(ar, skb);
960 		break;
961 	case WMI_VDEV_STOPPED_EVENTID:
962 		ath10k_wmi_event_vdev_stopped(ar, skb);
963 		break;
964 	case WMI_PEER_STA_KICKOUT_EVENTID:
965 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
966 		break;
967 	case WMI_HOST_SWBA_EVENTID:
968 		ath10k_wmi_event_host_swba(ar, skb);
969 		break;
970 	case WMI_TBTTOFFSET_UPDATE_EVENTID:
971 		ath10k_wmi_event_tbttoffset_update(ar, skb);
972 		break;
973 	case WMI_PHYERR_EVENTID:
974 		ath10k_wmi_event_phyerr(ar, skb);
975 		break;
976 	case WMI_ROAM_EVENTID:
977 		ath10k_wmi_event_roam(ar, skb);
978 		break;
979 	case WMI_PROFILE_MATCH:
980 		ath10k_wmi_event_profile_match(ar, skb);
981 		break;
982 	case WMI_DEBUG_PRINT_EVENTID:
983 		ath10k_wmi_event_debug_print(ar, skb);
984 		break;
985 	case WMI_PDEV_QVIT_EVENTID:
986 		ath10k_wmi_event_pdev_qvit(ar, skb);
987 		break;
988 	case WMI_WLAN_PROFILE_DATA_EVENTID:
989 		ath10k_wmi_event_wlan_profile_data(ar, skb);
990 		break;
991 	case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
992 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
993 		break;
994 	case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
995 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
996 		break;
997 	case WMI_RTT_ERROR_REPORT_EVENTID:
998 		ath10k_wmi_event_rtt_error_report(ar, skb);
999 		break;
1000 	case WMI_WOW_WAKEUP_HOST_EVENTID:
1001 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
1002 		break;
1003 	case WMI_DCS_INTERFERENCE_EVENTID:
1004 		ath10k_wmi_event_dcs_interference(ar, skb);
1005 		break;
1006 	case WMI_PDEV_TPC_CONFIG_EVENTID:
1007 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
1008 		break;
1009 	case WMI_PDEV_FTM_INTG_EVENTID:
1010 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
1011 		break;
1012 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
1013 		ath10k_wmi_event_gtk_offload_status(ar, skb);
1014 		break;
1015 	case WMI_GTK_REKEY_FAIL_EVENTID:
1016 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
1017 		break;
1018 	case WMI_TX_DELBA_COMPLETE_EVENTID:
1019 		ath10k_wmi_event_delba_complete(ar, skb);
1020 		break;
1021 	case WMI_TX_ADDBA_COMPLETE_EVENTID:
1022 		ath10k_wmi_event_addba_complete(ar, skb);
1023 		break;
1024 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
1025 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
1026 		break;
1027 	case WMI_SERVICE_READY_EVENTID:
1028 		ath10k_wmi_service_ready_event_rx(ar, skb);
1029 		break;
1030 	case WMI_READY_EVENTID:
1031 		ath10k_wmi_ready_event_rx(ar, skb);
1032 		break;
1033 	default:
1034 		ath10k_warn("Unknown eventid: %d\n", id);
1035 		break;
1036 	}
1037 
1038 	dev_kfree_skb(skb);
1039 }
1040 
1041 static void ath10k_wmi_event_work(struct work_struct *work)
1042 {
1043 	struct ath10k *ar = container_of(work, struct ath10k,
1044 					 wmi.wmi_event_work);
1045 	struct sk_buff *skb;
1046 
1047 	for (;;) {
1048 		skb = skb_dequeue(&ar->wmi.wmi_event_list);
1049 		if (!skb)
1050 			break;
1051 
1052 		ath10k_wmi_event_process(ar, skb);
1053 	}
1054 }
1055 
1056 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1057 {
1058 	struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1059 	enum wmi_event_id event_id;
1060 
1061 	event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
1062 
1063 	/* some events require to be handled ASAP
1064 	 * thus can't be defered to a worker thread */
1065 	switch (event_id) {
1066 	case WMI_HOST_SWBA_EVENTID:
1067 	case WMI_MGMT_RX_EVENTID:
1068 		ath10k_wmi_event_process(ar, skb);
1069 		return;
1070 	default:
1071 		break;
1072 	}
1073 
1074 	skb_queue_tail(&ar->wmi.wmi_event_list, skb);
1075 	queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
1076 }
1077 
1078 /* WMI Initialization functions */
1079 int ath10k_wmi_attach(struct ath10k *ar)
1080 {
1081 	init_completion(&ar->wmi.service_ready);
1082 	init_completion(&ar->wmi.unified_ready);
1083 	init_waitqueue_head(&ar->wmi.wq);
1084 
1085 	skb_queue_head_init(&ar->wmi.wmi_event_list);
1086 	INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
1087 
1088 	return 0;
1089 }
1090 
1091 void ath10k_wmi_detach(struct ath10k *ar)
1092 {
1093 	/* HTC should've drained the packets already */
1094 	if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
1095 		ath10k_warn("there are still pending packets\n");
1096 
1097 	cancel_work_sync(&ar->wmi.wmi_event_work);
1098 	skb_queue_purge(&ar->wmi.wmi_event_list);
1099 }
1100 
1101 int ath10k_wmi_connect_htc_service(struct ath10k *ar)
1102 {
1103 	int status;
1104 	struct ath10k_htc_svc_conn_req conn_req;
1105 	struct ath10k_htc_svc_conn_resp conn_resp;
1106 
1107 	memset(&conn_req, 0, sizeof(conn_req));
1108 	memset(&conn_resp, 0, sizeof(conn_resp));
1109 
1110 	/* these fields are the same for all service endpoints */
1111 	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
1112 	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
1113 
1114 	/* connect to control service */
1115 	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
1116 
1117 	status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
1118 	if (status) {
1119 		ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
1120 			    status);
1121 		return status;
1122 	}
1123 
1124 	ar->wmi.eid = conn_resp.eid;
1125 	return 0;
1126 }
1127 
1128 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
1129 				  u16 rd5g, u16 ctl2g, u16 ctl5g)
1130 {
1131 	struct wmi_pdev_set_regdomain_cmd *cmd;
1132 	struct sk_buff *skb;
1133 
1134 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1135 	if (!skb)
1136 		return -ENOMEM;
1137 
1138 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1139 	cmd->reg_domain = __cpu_to_le32(rd);
1140 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
1141 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
1142 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
1143 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
1144 
1145 	ath10k_dbg(ATH10K_DBG_WMI,
1146 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
1147 		   rd, rd2g, rd5g, ctl2g, ctl5g);
1148 
1149 	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1150 }
1151 
1152 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
1153 				const struct wmi_channel_arg *arg)
1154 {
1155 	struct wmi_set_channel_cmd *cmd;
1156 	struct sk_buff *skb;
1157 
1158 	if (arg->passive)
1159 		return -EINVAL;
1160 
1161 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1162 	if (!skb)
1163 		return -ENOMEM;
1164 
1165 	cmd = (struct wmi_set_channel_cmd *)skb->data;
1166 	cmd->chan.mhz               = __cpu_to_le32(arg->freq);
1167 	cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
1168 	cmd->chan.mode              = arg->mode;
1169 	cmd->chan.min_power         = arg->min_power;
1170 	cmd->chan.max_power         = arg->max_power;
1171 	cmd->chan.reg_power         = arg->max_reg_power;
1172 	cmd->chan.reg_classid       = arg->reg_class_id;
1173 	cmd->chan.antenna_max       = arg->max_antenna_gain;
1174 
1175 	ath10k_dbg(ATH10K_DBG_WMI,
1176 		   "wmi set channel mode %d freq %d\n",
1177 		   arg->mode, arg->freq);
1178 
1179 	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
1180 }
1181 
1182 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
1183 {
1184 	struct wmi_pdev_suspend_cmd *cmd;
1185 	struct sk_buff *skb;
1186 
1187 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1188 	if (!skb)
1189 		return -ENOMEM;
1190 
1191 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1192 	cmd->suspend_opt = WMI_PDEV_SUSPEND;
1193 
1194 	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
1195 }
1196 
1197 int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
1198 {
1199 	struct sk_buff *skb;
1200 
1201 	skb = ath10k_wmi_alloc_skb(0);
1202 	if (skb == NULL)
1203 		return -ENOMEM;
1204 
1205 	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
1206 }
1207 
1208 int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
1209 			      u32 value)
1210 {
1211 	struct wmi_pdev_set_param_cmd *cmd;
1212 	struct sk_buff *skb;
1213 
1214 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1215 	if (!skb)
1216 		return -ENOMEM;
1217 
1218 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1219 	cmd->param_id    = __cpu_to_le32(id);
1220 	cmd->param_value = __cpu_to_le32(value);
1221 
1222 	ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
1223 		   id, value);
1224 	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
1225 }
1226 
1227 int ath10k_wmi_cmd_init(struct ath10k *ar)
1228 {
1229 	struct wmi_init_cmd *cmd;
1230 	struct sk_buff *buf;
1231 	struct wmi_resource_config config = {};
1232 	u32 val;
1233 
1234 	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
1235 	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
1236 	config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
1237 
1238 	config.num_offload_reorder_bufs =
1239 		__cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
1240 
1241 	config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
1242 	config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
1243 	config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
1244 	config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
1245 	config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
1246 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1247 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1248 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1249 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
1250 	config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
1251 
1252 	config.scan_max_pending_reqs =
1253 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
1254 
1255 	config.bmiss_offload_max_vdev =
1256 		__cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
1257 
1258 	config.roam_offload_max_vdev =
1259 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
1260 
1261 	config.roam_offload_max_ap_profiles =
1262 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
1263 
1264 	config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
1265 	config.num_mcast_table_elems =
1266 		__cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
1267 
1268 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
1269 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
1270 	config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
1271 	config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
1272 	config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
1273 
1274 	val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
1275 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
1276 
1277 	config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
1278 
1279 	config.gtk_offload_max_vdev =
1280 		__cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
1281 
1282 	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
1283 	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
1284 
1285 	buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
1286 	if (!buf)
1287 		return -ENOMEM;
1288 
1289 	cmd = (struct wmi_init_cmd *)buf->data;
1290 	cmd->num_host_mem_chunks = 0;
1291 	memcpy(&cmd->resource_config, &config, sizeof(config));
1292 
1293 	ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
1294 	return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
1295 }
1296 
1297 static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
1298 {
1299 	int len;
1300 
1301 	len = sizeof(struct wmi_start_scan_cmd);
1302 
1303 	if (arg->ie_len) {
1304 		if (!arg->ie)
1305 			return -EINVAL;
1306 		if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
1307 			return -EINVAL;
1308 
1309 		len += sizeof(struct wmi_ie_data);
1310 		len += roundup(arg->ie_len, 4);
1311 	}
1312 
1313 	if (arg->n_channels) {
1314 		if (!arg->channels)
1315 			return -EINVAL;
1316 		if (arg->n_channels > ARRAY_SIZE(arg->channels))
1317 			return -EINVAL;
1318 
1319 		len += sizeof(struct wmi_chan_list);
1320 		len += sizeof(__le32) * arg->n_channels;
1321 	}
1322 
1323 	if (arg->n_ssids) {
1324 		if (!arg->ssids)
1325 			return -EINVAL;
1326 		if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
1327 			return -EINVAL;
1328 
1329 		len += sizeof(struct wmi_ssid_list);
1330 		len += sizeof(struct wmi_ssid) * arg->n_ssids;
1331 	}
1332 
1333 	if (arg->n_bssids) {
1334 		if (!arg->bssids)
1335 			return -EINVAL;
1336 		if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
1337 			return -EINVAL;
1338 
1339 		len += sizeof(struct wmi_bssid_list);
1340 		len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1341 	}
1342 
1343 	return len;
1344 }
1345 
1346 int ath10k_wmi_start_scan(struct ath10k *ar,
1347 			  const struct wmi_start_scan_arg *arg)
1348 {
1349 	struct wmi_start_scan_cmd *cmd;
1350 	struct sk_buff *skb;
1351 	struct wmi_ie_data *ie;
1352 	struct wmi_chan_list *channels;
1353 	struct wmi_ssid_list *ssids;
1354 	struct wmi_bssid_list *bssids;
1355 	u32 scan_id;
1356 	u32 scan_req_id;
1357 	int off;
1358 	int len = 0;
1359 	int i;
1360 
1361 	len = ath10k_wmi_start_scan_calc_len(arg);
1362 	if (len < 0)
1363 		return len; /* len contains error code here */
1364 
1365 	skb = ath10k_wmi_alloc_skb(len);
1366 	if (!skb)
1367 		return -ENOMEM;
1368 
1369 	scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
1370 	scan_id |= arg->scan_id;
1371 
1372 	scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1373 	scan_req_id |= arg->scan_req_id;
1374 
1375 	cmd = (struct wmi_start_scan_cmd *)skb->data;
1376 	cmd->scan_id            = __cpu_to_le32(scan_id);
1377 	cmd->scan_req_id        = __cpu_to_le32(scan_req_id);
1378 	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
1379 	cmd->scan_priority      = __cpu_to_le32(arg->scan_priority);
1380 	cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
1381 	cmd->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
1382 	cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
1383 	cmd->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
1384 	cmd->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
1385 	cmd->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
1386 	cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
1387 	cmd->idle_time          = __cpu_to_le32(arg->idle_time);
1388 	cmd->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
1389 	cmd->probe_delay        = __cpu_to_le32(arg->probe_delay);
1390 	cmd->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
1391 
1392 	/* TLV list starts after fields included in the struct */
1393 	off = sizeof(*cmd);
1394 
1395 	if (arg->n_channels) {
1396 		channels = (void *)skb->data + off;
1397 		channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
1398 		channels->num_chan = __cpu_to_le32(arg->n_channels);
1399 
1400 		for (i = 0; i < arg->n_channels; i++)
1401 			channels->channel_list[i] =
1402 				__cpu_to_le32(arg->channels[i]);
1403 
1404 		off += sizeof(*channels);
1405 		off += sizeof(__le32) * arg->n_channels;
1406 	}
1407 
1408 	if (arg->n_ssids) {
1409 		ssids = (void *)skb->data + off;
1410 		ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
1411 		ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
1412 
1413 		for (i = 0; i < arg->n_ssids; i++) {
1414 			ssids->ssids[i].ssid_len =
1415 				__cpu_to_le32(arg->ssids[i].len);
1416 			memcpy(&ssids->ssids[i].ssid,
1417 			       arg->ssids[i].ssid,
1418 			       arg->ssids[i].len);
1419 		}
1420 
1421 		off += sizeof(*ssids);
1422 		off += sizeof(struct wmi_ssid) * arg->n_ssids;
1423 	}
1424 
1425 	if (arg->n_bssids) {
1426 		bssids = (void *)skb->data + off;
1427 		bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
1428 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
1429 
1430 		for (i = 0; i < arg->n_bssids; i++)
1431 			memcpy(&bssids->bssid_list[i],
1432 			       arg->bssids[i].bssid,
1433 			       ETH_ALEN);
1434 
1435 		off += sizeof(*bssids);
1436 		off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1437 	}
1438 
1439 	if (arg->ie_len) {
1440 		ie = (void *)skb->data + off;
1441 		ie->tag = __cpu_to_le32(WMI_IE_TAG);
1442 		ie->ie_len = __cpu_to_le32(arg->ie_len);
1443 		memcpy(ie->ie_data, arg->ie, arg->ie_len);
1444 
1445 		off += sizeof(*ie);
1446 		off += roundup(arg->ie_len, 4);
1447 	}
1448 
1449 	if (off != skb->len) {
1450 		dev_kfree_skb(skb);
1451 		return -EINVAL;
1452 	}
1453 
1454 	ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
1455 	return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
1456 }
1457 
1458 void ath10k_wmi_start_scan_init(struct ath10k *ar,
1459 				struct wmi_start_scan_arg *arg)
1460 {
1461 	/* setup commonly used values */
1462 	arg->scan_req_id = 1;
1463 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
1464 	arg->dwell_time_active = 50;
1465 	arg->dwell_time_passive = 150;
1466 	arg->min_rest_time = 50;
1467 	arg->max_rest_time = 500;
1468 	arg->repeat_probe_time = 0;
1469 	arg->probe_spacing_time = 0;
1470 	arg->idle_time = 0;
1471 	arg->max_scan_time = 5000;
1472 	arg->probe_delay = 5;
1473 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
1474 		| WMI_SCAN_EVENT_COMPLETED
1475 		| WMI_SCAN_EVENT_BSS_CHANNEL
1476 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
1477 		| WMI_SCAN_EVENT_DEQUEUED;
1478 	arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
1479 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
1480 	arg->n_bssids = 1;
1481 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
1482 }
1483 
1484 int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
1485 {
1486 	struct wmi_stop_scan_cmd *cmd;
1487 	struct sk_buff *skb;
1488 	u32 scan_id;
1489 	u32 req_id;
1490 
1491 	if (arg->req_id > 0xFFF)
1492 		return -EINVAL;
1493 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1494 		return -EINVAL;
1495 
1496 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1497 	if (!skb)
1498 		return -ENOMEM;
1499 
1500 	scan_id = arg->u.scan_id;
1501 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1502 
1503 	req_id = arg->req_id;
1504 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1505 
1506 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
1507 	cmd->req_type    = __cpu_to_le32(arg->req_type);
1508 	cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
1509 	cmd->scan_id     = __cpu_to_le32(scan_id);
1510 	cmd->scan_req_id = __cpu_to_le32(req_id);
1511 
1512 	ath10k_dbg(ATH10K_DBG_WMI,
1513 		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
1514 		   arg->req_id, arg->req_type, arg->u.scan_id);
1515 	return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
1516 }
1517 
1518 int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
1519 			   enum wmi_vdev_type type,
1520 			   enum wmi_vdev_subtype subtype,
1521 			   const u8 macaddr[ETH_ALEN])
1522 {
1523 	struct wmi_vdev_create_cmd *cmd;
1524 	struct sk_buff *skb;
1525 
1526 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1527 	if (!skb)
1528 		return -ENOMEM;
1529 
1530 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
1531 	cmd->vdev_id      = __cpu_to_le32(vdev_id);
1532 	cmd->vdev_type    = __cpu_to_le32(type);
1533 	cmd->vdev_subtype = __cpu_to_le32(subtype);
1534 	memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
1535 
1536 	ath10k_dbg(ATH10K_DBG_WMI,
1537 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
1538 		   vdev_id, type, subtype, macaddr);
1539 
1540 	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
1541 }
1542 
1543 int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
1544 {
1545 	struct wmi_vdev_delete_cmd *cmd;
1546 	struct sk_buff *skb;
1547 
1548 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1549 	if (!skb)
1550 		return -ENOMEM;
1551 
1552 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
1553 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1554 
1555 	ath10k_dbg(ATH10K_DBG_WMI,
1556 		   "WMI vdev delete id %d\n", vdev_id);
1557 
1558 	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
1559 }
1560 
1561 static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
1562 				const struct wmi_vdev_start_request_arg *arg,
1563 				enum wmi_cmd_id cmd_id)
1564 {
1565 	struct wmi_vdev_start_request_cmd *cmd;
1566 	struct sk_buff *skb;
1567 	const char *cmdname;
1568 	u32 flags = 0;
1569 
1570 	if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
1571 	    cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
1572 		return -EINVAL;
1573 	if (WARN_ON(arg->ssid && arg->ssid_len == 0))
1574 		return -EINVAL;
1575 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1576 		return -EINVAL;
1577 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1578 		return -EINVAL;
1579 
1580 	if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
1581 		cmdname = "start";
1582 	else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
1583 		cmdname = "restart";
1584 	else
1585 		return -EINVAL; /* should not happen, we already check cmd_id */
1586 
1587 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1588 	if (!skb)
1589 		return -ENOMEM;
1590 
1591 	if (arg->hidden_ssid)
1592 		flags |= WMI_VDEV_START_HIDDEN_SSID;
1593 	if (arg->pmf_enabled)
1594 		flags |= WMI_VDEV_START_PMF_ENABLED;
1595 
1596 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1597 	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
1598 	cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
1599 	cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
1600 	cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
1601 	cmd->flags           = __cpu_to_le32(flags);
1602 	cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
1603 	cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
1604 
1605 	if (arg->ssid) {
1606 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1607 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1608 	}
1609 
1610 	cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
1611 
1612 	cmd->chan.band_center_freq1 =
1613 		__cpu_to_le32(arg->channel.band_center_freq1);
1614 
1615 	cmd->chan.mode = arg->channel.mode;
1616 	cmd->chan.min_power = arg->channel.min_power;
1617 	cmd->chan.max_power = arg->channel.max_power;
1618 	cmd->chan.reg_power = arg->channel.max_reg_power;
1619 	cmd->chan.reg_classid = arg->channel.reg_class_id;
1620 	cmd->chan.antenna_max = arg->channel.max_antenna_gain;
1621 
1622 	ath10k_dbg(ATH10K_DBG_WMI,
1623 		   "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
1624 		   "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
1625 		   arg->channel.mode, flags, arg->channel.max_power);
1626 
1627 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1628 }
1629 
1630 int ath10k_wmi_vdev_start(struct ath10k *ar,
1631 			  const struct wmi_vdev_start_request_arg *arg)
1632 {
1633 	return ath10k_wmi_vdev_start_restart(ar, arg,
1634 					     WMI_VDEV_START_REQUEST_CMDID);
1635 }
1636 
1637 int ath10k_wmi_vdev_restart(struct ath10k *ar,
1638 		     const struct wmi_vdev_start_request_arg *arg)
1639 {
1640 	return ath10k_wmi_vdev_start_restart(ar, arg,
1641 					     WMI_VDEV_RESTART_REQUEST_CMDID);
1642 }
1643 
1644 int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
1645 {
1646 	struct wmi_vdev_stop_cmd *cmd;
1647 	struct sk_buff *skb;
1648 
1649 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1650 	if (!skb)
1651 		return -ENOMEM;
1652 
1653 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1654 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1655 
1656 	ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
1657 
1658 	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
1659 }
1660 
1661 int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1662 {
1663 	struct wmi_vdev_up_cmd *cmd;
1664 	struct sk_buff *skb;
1665 
1666 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1667 	if (!skb)
1668 		return -ENOMEM;
1669 
1670 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1671 	cmd->vdev_id       = __cpu_to_le32(vdev_id);
1672 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
1673 	memcpy(&cmd->vdev_bssid.addr, bssid, 6);
1674 
1675 	ath10k_dbg(ATH10K_DBG_WMI,
1676 		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1677 		   vdev_id, aid, bssid);
1678 
1679 	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
1680 }
1681 
1682 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
1683 {
1684 	struct wmi_vdev_down_cmd *cmd;
1685 	struct sk_buff *skb;
1686 
1687 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1688 	if (!skb)
1689 		return -ENOMEM;
1690 
1691 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1692 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1693 
1694 	ath10k_dbg(ATH10K_DBG_WMI,
1695 		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
1696 
1697 	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
1698 }
1699 
1700 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1701 			      enum wmi_vdev_param param_id, u32 param_value)
1702 {
1703 	struct wmi_vdev_set_param_cmd *cmd;
1704 	struct sk_buff *skb;
1705 
1706 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1707 	if (!skb)
1708 		return -ENOMEM;
1709 
1710 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1711 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
1712 	cmd->param_id    = __cpu_to_le32(param_id);
1713 	cmd->param_value = __cpu_to_le32(param_value);
1714 
1715 	ath10k_dbg(ATH10K_DBG_WMI,
1716 		   "wmi vdev id 0x%x set param %d value %d\n",
1717 		   vdev_id, param_id, param_value);
1718 
1719 	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
1720 }
1721 
1722 int ath10k_wmi_vdev_install_key(struct ath10k *ar,
1723 				const struct wmi_vdev_install_key_arg *arg)
1724 {
1725 	struct wmi_vdev_install_key_cmd *cmd;
1726 	struct sk_buff *skb;
1727 
1728 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1729 		return -EINVAL;
1730 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1731 		return -EINVAL;
1732 
1733 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
1734 	if (!skb)
1735 		return -ENOMEM;
1736 
1737 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1738 	cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
1739 	cmd->key_idx       = __cpu_to_le32(arg->key_idx);
1740 	cmd->key_flags     = __cpu_to_le32(arg->key_flags);
1741 	cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
1742 	cmd->key_len       = __cpu_to_le32(arg->key_len);
1743 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1744 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1745 
1746 	if (arg->macaddr)
1747 		memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
1748 	if (arg->key_data)
1749 		memcpy(cmd->key_data, arg->key_data, arg->key_len);
1750 
1751 	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1752 }
1753 
1754 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
1755 			   const u8 peer_addr[ETH_ALEN])
1756 {
1757 	struct wmi_peer_create_cmd *cmd;
1758 	struct sk_buff *skb;
1759 
1760 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1761 	if (!skb)
1762 		return -ENOMEM;
1763 
1764 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1765 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1766 	memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1767 
1768 	ath10k_dbg(ATH10K_DBG_WMI,
1769 		   "wmi peer create vdev_id %d peer_addr %pM\n",
1770 		   vdev_id, peer_addr);
1771 	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
1772 }
1773 
1774 int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
1775 			   const u8 peer_addr[ETH_ALEN])
1776 {
1777 	struct wmi_peer_delete_cmd *cmd;
1778 	struct sk_buff *skb;
1779 
1780 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1781 	if (!skb)
1782 		return -ENOMEM;
1783 
1784 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1785 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1786 	memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1787 
1788 	ath10k_dbg(ATH10K_DBG_WMI,
1789 		   "wmi peer delete vdev_id %d peer_addr %pM\n",
1790 		   vdev_id, peer_addr);
1791 	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
1792 }
1793 
1794 int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
1795 			  const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
1796 {
1797 	struct wmi_peer_flush_tids_cmd *cmd;
1798 	struct sk_buff *skb;
1799 
1800 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1801 	if (!skb)
1802 		return -ENOMEM;
1803 
1804 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1805 	cmd->vdev_id         = __cpu_to_le32(vdev_id);
1806 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
1807 	memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1808 
1809 	ath10k_dbg(ATH10K_DBG_WMI,
1810 		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
1811 		   vdev_id, peer_addr, tid_bitmap);
1812 	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1813 }
1814 
1815 int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
1816 			      const u8 *peer_addr, enum wmi_peer_param param_id,
1817 			      u32 param_value)
1818 {
1819 	struct wmi_peer_set_param_cmd *cmd;
1820 	struct sk_buff *skb;
1821 
1822 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1823 	if (!skb)
1824 		return -ENOMEM;
1825 
1826 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1827 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
1828 	cmd->param_id    = __cpu_to_le32(param_id);
1829 	cmd->param_value = __cpu_to_le32(param_value);
1830 	memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
1831 
1832 	ath10k_dbg(ATH10K_DBG_WMI,
1833 		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
1834 		   vdev_id, peer_addr, param_id, param_value);
1835 
1836 	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
1837 }
1838 
1839 int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
1840 			  enum wmi_sta_ps_mode psmode)
1841 {
1842 	struct wmi_sta_powersave_mode_cmd *cmd;
1843 	struct sk_buff *skb;
1844 
1845 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1846 	if (!skb)
1847 		return -ENOMEM;
1848 
1849 	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
1850 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
1851 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
1852 
1853 	ath10k_dbg(ATH10K_DBG_WMI,
1854 		   "wmi set powersave id 0x%x mode %d\n",
1855 		   vdev_id, psmode);
1856 
1857 	return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1858 }
1859 
1860 int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
1861 				enum wmi_sta_powersave_param param_id,
1862 				u32 value)
1863 {
1864 	struct wmi_sta_powersave_param_cmd *cmd;
1865 	struct sk_buff *skb;
1866 
1867 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1868 	if (!skb)
1869 		return -ENOMEM;
1870 
1871 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1872 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
1873 	cmd->param_id    = __cpu_to_le32(param_id);
1874 	cmd->param_value = __cpu_to_le32(value);
1875 
1876 	ath10k_dbg(ATH10K_DBG_WMI,
1877 		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
1878 		   vdev_id, param_id, value);
1879 	return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1880 }
1881 
1882 int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1883 			       enum wmi_ap_ps_peer_param param_id, u32 value)
1884 {
1885 	struct wmi_ap_ps_peer_cmd *cmd;
1886 	struct sk_buff *skb;
1887 
1888 	if (!mac)
1889 		return -EINVAL;
1890 
1891 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1892 	if (!skb)
1893 		return -ENOMEM;
1894 
1895 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1896 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1897 	cmd->param_id = __cpu_to_le32(param_id);
1898 	cmd->param_value = __cpu_to_le32(value);
1899 	memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
1900 
1901 	ath10k_dbg(ATH10K_DBG_WMI,
1902 		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
1903 		   vdev_id, param_id, value, mac);
1904 
1905 	return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1906 }
1907 
1908 int ath10k_wmi_scan_chan_list(struct ath10k *ar,
1909 			      const struct wmi_scan_chan_list_arg *arg)
1910 {
1911 	struct wmi_scan_chan_list_cmd *cmd;
1912 	struct sk_buff *skb;
1913 	struct wmi_channel_arg *ch;
1914 	struct wmi_channel *ci;
1915 	int len;
1916 	int i;
1917 
1918 	len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
1919 
1920 	skb = ath10k_wmi_alloc_skb(len);
1921 	if (!skb)
1922 		return -EINVAL;
1923 
1924 	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
1925 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
1926 
1927 	for (i = 0; i < arg->n_channels; i++) {
1928 		u32 flags = 0;
1929 
1930 		ch = &arg->channels[i];
1931 		ci = &cmd->chan_info[i];
1932 
1933 		if (ch->passive)
1934 			flags |= WMI_CHAN_FLAG_PASSIVE;
1935 		if (ch->allow_ibss)
1936 			flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1937 		if (ch->allow_ht)
1938 			flags |= WMI_CHAN_FLAG_ALLOW_HT;
1939 		if (ch->allow_vht)
1940 			flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1941 		if (ch->ht40plus)
1942 			flags |= WMI_CHAN_FLAG_HT40_PLUS;
1943 
1944 		ci->mhz               = __cpu_to_le32(ch->freq);
1945 		ci->band_center_freq1 = __cpu_to_le32(ch->freq);
1946 		ci->band_center_freq2 = 0;
1947 		ci->min_power         = ch->min_power;
1948 		ci->max_power         = ch->max_power;
1949 		ci->reg_power         = ch->max_reg_power;
1950 		ci->antenna_max       = ch->max_antenna_gain;
1951 		ci->antenna_max       = 0;
1952 
1953 		/* mode & flags share storage */
1954 		ci->mode              = ch->mode;
1955 		ci->flags            |= __cpu_to_le32(flags);
1956 	}
1957 
1958 	return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
1959 }
1960 
1961 int ath10k_wmi_peer_assoc(struct ath10k *ar,
1962 			  const struct wmi_peer_assoc_complete_arg *arg)
1963 {
1964 	struct wmi_peer_assoc_complete_cmd *cmd;
1965 	struct sk_buff *skb;
1966 
1967 	if (arg->peer_mpdu_density > 16)
1968 		return -EINVAL;
1969 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
1970 		return -EINVAL;
1971 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
1972 		return -EINVAL;
1973 
1974 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1975 	if (!skb)
1976 		return -ENOMEM;
1977 
1978 	cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
1979 	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
1980 	cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
1981 	cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
1982 	cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
1983 	cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
1984 	cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
1985 	cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
1986 	cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
1987 	cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
1988 	cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
1989 	cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
1990 	cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
1991 	cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
1992 
1993 	memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
1994 
1995 	cmd->peer_legacy_rates.num_rates =
1996 		__cpu_to_le32(arg->peer_legacy_rates.num_rates);
1997 	memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
1998 	       arg->peer_legacy_rates.num_rates);
1999 
2000 	cmd->peer_ht_rates.num_rates =
2001 		__cpu_to_le32(arg->peer_ht_rates.num_rates);
2002 	memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
2003 	       arg->peer_ht_rates.num_rates);
2004 
2005 	cmd->peer_vht_rates.rx_max_rate =
2006 		__cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2007 	cmd->peer_vht_rates.rx_mcs_set =
2008 		__cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2009 	cmd->peer_vht_rates.tx_max_rate =
2010 		__cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2011 	cmd->peer_vht_rates.tx_mcs_set =
2012 		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2013 
2014 	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
2015 }
2016 
2017 int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
2018 {
2019 	struct wmi_bcn_tx_cmd *cmd;
2020 	struct sk_buff *skb;
2021 
2022 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
2023 	if (!skb)
2024 		return -ENOMEM;
2025 
2026 	cmd = (struct wmi_bcn_tx_cmd *)skb->data;
2027 	cmd->hdr.vdev_id  = __cpu_to_le32(arg->vdev_id);
2028 	cmd->hdr.tx_rate  = __cpu_to_le32(arg->tx_rate);
2029 	cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
2030 	cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len);
2031 	memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
2032 
2033 	return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
2034 }
2035 
2036 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
2037 					  const struct wmi_wmm_params_arg *arg)
2038 {
2039 	params->cwmin  = __cpu_to_le32(arg->cwmin);
2040 	params->cwmax  = __cpu_to_le32(arg->cwmax);
2041 	params->aifs   = __cpu_to_le32(arg->aifs);
2042 	params->txop   = __cpu_to_le32(arg->txop);
2043 	params->acm    = __cpu_to_le32(arg->acm);
2044 	params->no_ack = __cpu_to_le32(arg->no_ack);
2045 }
2046 
2047 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
2048 			const struct wmi_pdev_set_wmm_params_arg *arg)
2049 {
2050 	struct wmi_pdev_set_wmm_params *cmd;
2051 	struct sk_buff *skb;
2052 
2053 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2054 	if (!skb)
2055 		return -ENOMEM;
2056 
2057 	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
2058 	ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
2059 	ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
2060 	ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
2061 	ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
2062 
2063 	ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
2064 	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
2065 }
2066 
2067 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
2068 {
2069 	struct wmi_request_stats_cmd *cmd;
2070 	struct sk_buff *skb;
2071 
2072 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2073 	if (!skb)
2074 		return -ENOMEM;
2075 
2076 	cmd = (struct wmi_request_stats_cmd *)skb->data;
2077 	cmd->stats_id = __cpu_to_le32(stats_id);
2078 
2079 	ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
2080 	return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
2081 }
2082