xref: /linux/drivers/net/wireless/ath/ath10k/wow.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
4  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6  */
7 
8 #include "mac.h"
9 
10 #include <net/mac80211.h>
11 #include "hif.h"
12 #include "core.h"
13 #include "debug.h"
14 #include "wmi.h"
15 #include "wmi-ops.h"
16 
17 static const struct wiphy_wowlan_support ath10k_wowlan_support = {
18 	.flags = WIPHY_WOWLAN_DISCONNECT |
19 		 WIPHY_WOWLAN_MAGIC_PKT,
20 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
21 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
22 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
23 };
24 
ath10k_wow_vif_cleanup(struct ath10k_vif * arvif)25 static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
26 {
27 	struct ath10k *ar = arvif->ar;
28 	int i, ret;
29 
30 	for (i = 0; i < WOW_EVENT_MAX; i++) {
31 		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
32 		if (ret) {
33 			ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
34 				    wow_wakeup_event(i), arvif->vdev_id, ret);
35 			return ret;
36 		}
37 	}
38 
39 	for (i = 0; i < ar->wow.max_num_patterns; i++) {
40 		ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
41 		if (ret) {
42 			ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
43 				    i, arvif->vdev_id, ret);
44 			return ret;
45 		}
46 	}
47 
48 	return 0;
49 }
50 
ath10k_wow_cleanup(struct ath10k * ar)51 static int ath10k_wow_cleanup(struct ath10k *ar)
52 {
53 	struct ath10k_vif *arvif;
54 	int ret;
55 
56 	lockdep_assert_held(&ar->conf_mutex);
57 
58 	list_for_each_entry(arvif, &ar->arvifs, list) {
59 		ret = ath10k_wow_vif_cleanup(arvif);
60 		if (ret) {
61 			ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
62 				    arvif->vdev_id, ret);
63 			return ret;
64 		}
65 	}
66 
67 	return 0;
68 }
69 
70 /*
71  * Convert a 802.3 format to a 802.11 format.
72  *         +------------+-----------+--------+----------------+
73  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
74  *         +------------+-----------+--------+----------------+
75  *                |__         |_______    |____________  |________
76  *                   |                |                |          |
77  *         +--+------------+----+-----------+---------------+-----------+
78  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
79  *         +--+------------+----+-----------+---------------+-----------+
80  */
ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern * new,const struct cfg80211_pkt_pattern * old)81 static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
82 					     const struct cfg80211_pkt_pattern *old)
83 {
84 	u8 hdr_8023_pattern[ETH_HLEN] = {};
85 	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
86 	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
87 	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
88 
89 	int total_len = old->pkt_offset + old->pattern_len;
90 	int hdr_80211_end_offset;
91 
92 	struct ieee80211_hdr_3addr *new_hdr_pattern =
93 		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
94 	struct ieee80211_hdr_3addr *new_hdr_mask =
95 		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
96 	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
97 	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
98 	int hdr_len = sizeof(*new_hdr_pattern);
99 
100 	struct rfc1042_hdr *new_rfc_pattern =
101 		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
102 	struct rfc1042_hdr *new_rfc_mask =
103 		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
104 	int rfc_len = sizeof(*new_rfc_pattern);
105 
106 	memcpy(hdr_8023_pattern + old->pkt_offset,
107 	       old->pattern, ETH_HLEN - old->pkt_offset);
108 	memcpy(hdr_8023_bit_mask + old->pkt_offset,
109 	       old->mask, ETH_HLEN - old->pkt_offset);
110 
111 	/* Copy destination address */
112 	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
113 	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
114 
115 	/* Copy source address */
116 	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
117 	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
118 
119 	/* Copy logic link type */
120 	memcpy(&new_rfc_pattern->snap_type,
121 	       &old_hdr_pattern->h_proto,
122 	       sizeof(old_hdr_pattern->h_proto));
123 	memcpy(&new_rfc_mask->snap_type,
124 	       &old_hdr_mask->h_proto,
125 	       sizeof(old_hdr_mask->h_proto));
126 
127 	/* Calculate new pkt_offset */
128 	if (old->pkt_offset < ETH_ALEN)
129 		new->pkt_offset = old->pkt_offset +
130 			offsetof(struct ieee80211_hdr_3addr, addr1);
131 	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
132 		new->pkt_offset = old->pkt_offset +
133 			offsetof(struct ieee80211_hdr_3addr, addr3) -
134 			offsetof(struct ethhdr, h_source);
135 	else
136 		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
137 
138 	/* Calculate new hdr end offset */
139 	if (total_len > ETH_HLEN)
140 		hdr_80211_end_offset = hdr_len + rfc_len;
141 	else if (total_len > offsetof(struct ethhdr, h_proto))
142 		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
143 	else if (total_len > ETH_ALEN)
144 		hdr_80211_end_offset = total_len - ETH_ALEN +
145 			offsetof(struct ieee80211_hdr_3addr, addr3);
146 	else
147 		hdr_80211_end_offset = total_len +
148 			offsetof(struct ieee80211_hdr_3addr, addr1);
149 
150 	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
151 
152 	memcpy((u8 *)new->pattern,
153 	       hdr_80211_pattern + new->pkt_offset,
154 	       new->pattern_len);
155 	memcpy((u8 *)new->mask,
156 	       hdr_80211_bit_mask + new->pkt_offset,
157 	       new->pattern_len);
158 
159 	if (total_len > ETH_HLEN) {
160 		/* Copy frame body */
161 		memcpy((u8 *)new->pattern + new->pattern_len,
162 		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
163 		       total_len - ETH_HLEN);
164 		memcpy((u8 *)new->mask + new->pattern_len,
165 		       (void *)old->mask + ETH_HLEN - old->pkt_offset,
166 		       total_len - ETH_HLEN);
167 
168 		new->pattern_len += total_len - ETH_HLEN;
169 	}
170 }
171 
ath10k_wmi_pno_check(struct ath10k * ar,u32 vdev_id,struct cfg80211_sched_scan_request * nd_config,struct wmi_pno_scan_req * pno)172 static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id,
173 				struct cfg80211_sched_scan_request *nd_config,
174 				struct wmi_pno_scan_req *pno)
175 {
176 	int i, j, ret = 0;
177 	u8 ssid_len;
178 
179 	pno->enable = 1;
180 	pno->vdev_id = vdev_id;
181 	pno->uc_networks_count = nd_config->n_match_sets;
182 
183 	if (!pno->uc_networks_count ||
184 	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
185 		return -EINVAL;
186 
187 	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
188 		return -EINVAL;
189 
190 	/* Filling per profile  params */
191 	for (i = 0; i < pno->uc_networks_count; i++) {
192 		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
193 
194 		if (ssid_len == 0 || ssid_len > 32)
195 			return -EINVAL;
196 
197 		pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len);
198 
199 		memcpy(pno->a_networks[i].ssid.ssid,
200 		       nd_config->match_sets[i].ssid.ssid,
201 		       nd_config->match_sets[i].ssid.ssid_len);
202 		pno->a_networks[i].authentication = 0;
203 		pno->a_networks[i].encryption     = 0;
204 		pno->a_networks[i].bcast_nw_type  = 0;
205 
206 		/*Copying list of valid channel into request */
207 		pno->a_networks[i].channel_count = nd_config->n_channels;
208 		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
209 
210 		for (j = 0; j < nd_config->n_channels; j++) {
211 			pno->a_networks[i].channels[j] =
212 					nd_config->channels[j]->center_freq;
213 		}
214 	}
215 
216 	/* set scan to passive if no SSIDs are specified in the request */
217 	if (nd_config->n_ssids == 0)
218 		pno->do_passive_scan = true;
219 	else
220 		pno->do_passive_scan = false;
221 
222 	for (i = 0; i < nd_config->n_ssids; i++) {
223 		j = 0;
224 		while (j < pno->uc_networks_count) {
225 			if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) ==
226 				nd_config->ssids[i].ssid_len &&
227 			(memcmp(pno->a_networks[j].ssid.ssid,
228 				nd_config->ssids[i].ssid,
229 				__le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) {
230 				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
231 				break;
232 			}
233 			j++;
234 		}
235 	}
236 
237 	if (nd_config->n_scan_plans == 2) {
238 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
239 		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
240 		pno->slow_scan_period =
241 			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
242 	} else if (nd_config->n_scan_plans == 1) {
243 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
244 		pno->fast_scan_max_cycles = 1;
245 		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
246 	} else {
247 		ath10k_warn(ar, "Invalid number of scan plans %d !!",
248 			    nd_config->n_scan_plans);
249 	}
250 
251 	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
252 		/* enable mac randomization */
253 		pno->enable_pno_scan_randomization = 1;
254 		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
255 		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
256 	}
257 
258 	pno->delay_start_time = nd_config->delay;
259 
260 	/* Current FW does not support min-max range for dwell time */
261 	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
262 	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
263 	return ret;
264 }
265 
ath10k_vif_wow_set_wakeups(struct ath10k_vif * arvif,struct cfg80211_wowlan * wowlan)266 static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
267 				      struct cfg80211_wowlan *wowlan)
268 {
269 	int ret, i;
270 	unsigned long wow_mask = 0;
271 	struct ath10k *ar = arvif->ar;
272 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
273 	int pattern_id = 0;
274 
275 	/* Setup requested WOW features */
276 	switch (arvif->vdev_type) {
277 	case WMI_VDEV_TYPE_IBSS:
278 		__set_bit(WOW_BEACON_EVENT, &wow_mask);
279 		fallthrough;
280 	case WMI_VDEV_TYPE_AP:
281 		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
282 		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
283 		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
284 		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
285 		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
286 		__set_bit(WOW_HTT_EVENT, &wow_mask);
287 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
288 		break;
289 	case WMI_VDEV_TYPE_STA:
290 		if (wowlan->disconnect) {
291 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
292 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
293 			__set_bit(WOW_BMISS_EVENT, &wow_mask);
294 			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
295 		}
296 
297 		if (wowlan->magic_pkt)
298 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
299 
300 		if (wowlan->nd_config) {
301 			struct wmi_pno_scan_req *pno;
302 			int ret;
303 
304 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
305 			if (!pno)
306 				return -ENOMEM;
307 
308 			ar->nlo_enabled = true;
309 
310 			ret = ath10k_wmi_pno_check(ar, arvif->vdev_id,
311 						   wowlan->nd_config, pno);
312 			if (!ret) {
313 				ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
314 				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
315 			}
316 
317 			kfree(pno);
318 		}
319 		break;
320 	default:
321 		break;
322 	}
323 
324 	for (i = 0; i < wowlan->n_patterns; i++) {
325 		u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
326 		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
327 		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
328 		struct cfg80211_pkt_pattern new_pattern = {};
329 		struct cfg80211_pkt_pattern old_pattern = patterns[i];
330 		int j;
331 
332 		new_pattern.pattern = ath_pattern;
333 		new_pattern.mask = ath_bitmask;
334 		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
335 			continue;
336 		/* convert bytemask to bitmask */
337 		for (j = 0; j < patterns[i].pattern_len; j++)
338 			if (patterns[i].mask[j / 8] & BIT(j % 8))
339 				bitmask[j] = 0xff;
340 		old_pattern.mask = bitmask;
341 
342 		if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
343 			if (patterns[i].pkt_offset < ETH_HLEN) {
344 				ath10k_wow_convert_8023_to_80211(&new_pattern,
345 								 &old_pattern);
346 			} else {
347 				new_pattern = old_pattern;
348 				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
349 			}
350 		}
351 
352 		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
353 			return -EINVAL;
354 
355 		ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
356 						 pattern_id,
357 						 new_pattern.pattern,
358 						 new_pattern.mask,
359 						 new_pattern.pattern_len,
360 						 new_pattern.pkt_offset);
361 		if (ret) {
362 			ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
363 				    pattern_id,
364 				    arvif->vdev_id, ret);
365 			return ret;
366 		}
367 
368 		pattern_id++;
369 		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
370 	}
371 
372 	for (i = 0; i < WOW_EVENT_MAX; i++) {
373 		if (!test_bit(i, &wow_mask))
374 			continue;
375 		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
376 		if (ret) {
377 			ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
378 				    wow_wakeup_event(i), arvif->vdev_id, ret);
379 			return ret;
380 		}
381 	}
382 
383 	return 0;
384 }
385 
ath10k_wow_set_wakeups(struct ath10k * ar,struct cfg80211_wowlan * wowlan)386 static int ath10k_wow_set_wakeups(struct ath10k *ar,
387 				  struct cfg80211_wowlan *wowlan)
388 {
389 	struct ath10k_vif *arvif;
390 	int ret;
391 
392 	lockdep_assert_held(&ar->conf_mutex);
393 
394 	list_for_each_entry(arvif, &ar->arvifs, list) {
395 		ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
396 		if (ret) {
397 			ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
398 				    arvif->vdev_id, ret);
399 			return ret;
400 		}
401 	}
402 
403 	return 0;
404 }
405 
ath10k_vif_wow_clean_nlo(struct ath10k_vif * arvif)406 static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif)
407 {
408 	int ret = 0;
409 	struct ath10k *ar = arvif->ar;
410 
411 	switch (arvif->vdev_type) {
412 	case WMI_VDEV_TYPE_STA:
413 		if (ar->nlo_enabled) {
414 			struct wmi_pno_scan_req *pno;
415 
416 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
417 			if (!pno)
418 				return -ENOMEM;
419 
420 			pno->enable = 0;
421 			ar->nlo_enabled = false;
422 			ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
423 			kfree(pno);
424 		}
425 		break;
426 	default:
427 		break;
428 	}
429 	return ret;
430 }
431 
ath10k_wow_nlo_cleanup(struct ath10k * ar)432 static int ath10k_wow_nlo_cleanup(struct ath10k *ar)
433 {
434 	struct ath10k_vif *arvif;
435 	int ret = 0;
436 
437 	lockdep_assert_held(&ar->conf_mutex);
438 
439 	list_for_each_entry(arvif, &ar->arvifs, list) {
440 		ret = ath10k_vif_wow_clean_nlo(arvif);
441 		if (ret) {
442 			ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n",
443 				    arvif->vdev_id, ret);
444 			return ret;
445 		}
446 	}
447 
448 	return 0;
449 }
450 
ath10k_wow_enable(struct ath10k * ar)451 static int ath10k_wow_enable(struct ath10k *ar)
452 {
453 	int ret;
454 
455 	lockdep_assert_held(&ar->conf_mutex);
456 
457 	reinit_completion(&ar->target_suspend);
458 
459 	ret = ath10k_wmi_wow_enable(ar);
460 	if (ret) {
461 		ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
462 		return ret;
463 	}
464 
465 	ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
466 	if (ret == 0) {
467 		ath10k_warn(ar, "timed out while waiting for suspend completion\n");
468 		return -ETIMEDOUT;
469 	}
470 
471 	return 0;
472 }
473 
ath10k_wow_wakeup(struct ath10k * ar)474 static int ath10k_wow_wakeup(struct ath10k *ar)
475 {
476 	int ret;
477 
478 	lockdep_assert_held(&ar->conf_mutex);
479 
480 	reinit_completion(&ar->wow.wakeup_completed);
481 
482 	ret = ath10k_wmi_wow_host_wakeup_ind(ar);
483 	if (ret) {
484 		ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
485 			    ret);
486 		return ret;
487 	}
488 
489 	ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
490 	if (ret == 0) {
491 		ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
492 		return -ETIMEDOUT;
493 	}
494 
495 	return 0;
496 }
497 
ath10k_wow_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wowlan)498 int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
499 			  struct cfg80211_wowlan *wowlan)
500 {
501 	struct ath10k *ar = hw->priv;
502 	int ret;
503 
504 	mutex_lock(&ar->conf_mutex);
505 
506 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
507 			      ar->running_fw->fw_file.fw_features))) {
508 		ret = 1;
509 		goto exit;
510 	}
511 
512 	ret =  ath10k_wow_cleanup(ar);
513 	if (ret) {
514 		ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
515 			    ret);
516 		goto exit;
517 	}
518 
519 	ret = ath10k_wow_set_wakeups(ar, wowlan);
520 	if (ret) {
521 		ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
522 			    ret);
523 		goto cleanup;
524 	}
525 
526 	ath10k_mac_wait_tx_complete(ar);
527 
528 	ret = ath10k_wow_enable(ar);
529 	if (ret) {
530 		ath10k_warn(ar, "failed to start wow: %d\n", ret);
531 		goto cleanup;
532 	}
533 
534 	ret = ath10k_hif_suspend(ar);
535 	if (ret) {
536 		ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
537 		goto wakeup;
538 	}
539 
540 	goto exit;
541 
542 wakeup:
543 	ath10k_wow_wakeup(ar);
544 
545 cleanup:
546 	ath10k_wow_cleanup(ar);
547 
548 exit:
549 	mutex_unlock(&ar->conf_mutex);
550 	return ret ? 1 : 0;
551 }
552 
ath10k_wow_op_set_wakeup(struct ieee80211_hw * hw,bool enabled)553 void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
554 {
555 	struct ath10k *ar = hw->priv;
556 
557 	mutex_lock(&ar->conf_mutex);
558 	if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
559 		     ar->running_fw->fw_file.fw_features)) {
560 		device_set_wakeup_enable(ar->dev, enabled);
561 	}
562 	mutex_unlock(&ar->conf_mutex);
563 }
564 
ath10k_wow_op_resume(struct ieee80211_hw * hw)565 int ath10k_wow_op_resume(struct ieee80211_hw *hw)
566 {
567 	struct ath10k *ar = hw->priv;
568 	int ret;
569 
570 	mutex_lock(&ar->conf_mutex);
571 
572 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
573 			      ar->running_fw->fw_file.fw_features))) {
574 		ret = 1;
575 		goto exit;
576 	}
577 
578 	ret = ath10k_hif_resume(ar);
579 	if (ret) {
580 		ath10k_warn(ar, "failed to resume hif: %d\n", ret);
581 		goto exit;
582 	}
583 
584 	ret = ath10k_wow_wakeup(ar);
585 	if (ret)
586 		ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
587 
588 	ret = ath10k_wow_nlo_cleanup(ar);
589 	if (ret)
590 		ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret);
591 
592 exit:
593 	if (ret) {
594 		switch (ar->state) {
595 		case ATH10K_STATE_ON:
596 			ar->state = ATH10K_STATE_RESTARTING;
597 			ret = 1;
598 			break;
599 		case ATH10K_STATE_OFF:
600 		case ATH10K_STATE_RESTARTING:
601 		case ATH10K_STATE_RESTARTED:
602 		case ATH10K_STATE_UTF:
603 		case ATH10K_STATE_WEDGED:
604 			ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
605 				    ar->state);
606 			ret = -EIO;
607 			break;
608 		}
609 	}
610 
611 	mutex_unlock(&ar->conf_mutex);
612 	return ret;
613 }
614 
ath10k_wow_init(struct ath10k * ar)615 int ath10k_wow_init(struct ath10k *ar)
616 {
617 	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
618 		      ar->running_fw->fw_file.fw_features))
619 		return 0;
620 
621 	if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
622 		return -EINVAL;
623 
624 	ar->wow.wowlan_support = ath10k_wowlan_support;
625 
626 	if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
627 		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
628 		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
629 	}
630 
631 	if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
632 		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
633 		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
634 	}
635 
636 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
637 	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
638 
639 	device_set_wakeup_capable(ar->dev, true);
640 
641 	return 0;
642 }
643