xref: /linux/drivers/net/wireless/ath/ath11k/wow.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/delay.h>
8 
9 #include "mac.h"
10 
11 #include <net/mac80211.h>
12 #include "core.h"
13 #include "hif.h"
14 #include "debug.h"
15 #include "wmi.h"
16 #include "wow.h"
17 #include "dp_rx.h"
18 
19 static const struct wiphy_wowlan_support ath11k_wowlan_support = {
20 	.flags = WIPHY_WOWLAN_DISCONNECT |
21 		 WIPHY_WOWLAN_MAGIC_PKT |
22 		 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
23 		 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
24 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
25 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
26 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
27 };
28 
29 int ath11k_wow_enable(struct ath11k_base *ab)
30 {
31 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
32 	int i, ret;
33 
34 	clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
35 
36 	for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
37 		reinit_completion(&ab->htc_suspend);
38 
39 		ret = ath11k_wmi_wow_enable(ar);
40 		if (ret) {
41 			ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
42 			return ret;
43 		}
44 
45 		ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
46 		if (ret == 0) {
47 			ath11k_warn(ab,
48 				    "timed out while waiting for htc suspend completion\n");
49 			return -ETIMEDOUT;
50 		}
51 
52 		if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
53 			/* success, suspend complete received */
54 			return 0;
55 
56 		ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
57 			    i);
58 		msleep(ATH11K_WOW_RETRY_WAIT_MS);
59 	}
60 
61 	ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
62 
63 	return -ETIMEDOUT;
64 }
65 
66 int ath11k_wow_wakeup(struct ath11k_base *ab)
67 {
68 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
69 	int ret;
70 
71 	/* In the case of WCN6750, WoW wakeup is done
72 	 * by sending SMP2P power save exit message
73 	 * to the target processor.
74 	 */
75 	if (ab->hw_params.smp2p_wow_exit)
76 		return 0;
77 
78 	reinit_completion(&ab->wow.wakeup_completed);
79 
80 	ret = ath11k_wmi_wow_host_wakeup_ind(ar);
81 	if (ret) {
82 		ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
83 			    ret);
84 		return ret;
85 	}
86 
87 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
88 	if (ret == 0) {
89 		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
90 		return -ETIMEDOUT;
91 	}
92 
93 	return 0;
94 }
95 
96 static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
97 {
98 	struct ath11k *ar = arvif->ar;
99 	int i, ret;
100 
101 	for (i = 0; i < WOW_EVENT_MAX; i++) {
102 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
103 		if (ret) {
104 			ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
105 				    wow_wakeup_event(i), arvif->vdev_id, ret);
106 			return ret;
107 		}
108 	}
109 
110 	for (i = 0; i < ar->wow.max_num_patterns; i++) {
111 		ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
112 		if (ret) {
113 			ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
114 				    i, arvif->vdev_id, ret);
115 			return ret;
116 		}
117 	}
118 
119 	return 0;
120 }
121 
122 static int ath11k_wow_cleanup(struct ath11k *ar)
123 {
124 	struct ath11k_vif *arvif;
125 	int ret;
126 
127 	lockdep_assert_held(&ar->conf_mutex);
128 
129 	list_for_each_entry(arvif, &ar->arvifs, list) {
130 		ret = ath11k_wow_vif_cleanup(arvif);
131 		if (ret) {
132 			ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
133 				    arvif->vdev_id, ret);
134 			return ret;
135 		}
136 	}
137 
138 	return 0;
139 }
140 
141 /* Convert a 802.3 format to a 802.11 format.
142  *         +------------+-----------+--------+----------------+
143  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
144  *         +------------+-----------+--------+----------------+
145  *                |__         |_______    |____________  |________
146  *                   |                |                |          |
147  *         +--+------------+----+-----------+---------------+-----------+
148  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
149  *         +--+------------+----+-----------+---------------+-----------+
150  */
151 static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
152 					     const struct cfg80211_pkt_pattern *old)
153 {
154 	u8 hdr_8023_pattern[ETH_HLEN] = {};
155 	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
156 	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
157 	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
158 
159 	int total_len = old->pkt_offset + old->pattern_len;
160 	int hdr_80211_end_offset;
161 
162 	struct ieee80211_hdr_3addr *new_hdr_pattern =
163 		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
164 	struct ieee80211_hdr_3addr *new_hdr_mask =
165 		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
166 	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
167 	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
168 	int hdr_len = sizeof(*new_hdr_pattern);
169 
170 	struct rfc1042_hdr *new_rfc_pattern =
171 		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
172 	struct rfc1042_hdr *new_rfc_mask =
173 		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
174 	int rfc_len = sizeof(*new_rfc_pattern);
175 
176 	memcpy(hdr_8023_pattern + old->pkt_offset,
177 	       old->pattern, ETH_HLEN - old->pkt_offset);
178 	memcpy(hdr_8023_bit_mask + old->pkt_offset,
179 	       old->mask, ETH_HLEN - old->pkt_offset);
180 
181 	/* Copy destination address */
182 	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
183 	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
184 
185 	/* Copy source address */
186 	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
187 	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
188 
189 	/* Copy logic link type */
190 	memcpy(&new_rfc_pattern->snap_type,
191 	       &old_hdr_pattern->h_proto,
192 	       sizeof(old_hdr_pattern->h_proto));
193 	memcpy(&new_rfc_mask->snap_type,
194 	       &old_hdr_mask->h_proto,
195 	       sizeof(old_hdr_mask->h_proto));
196 
197 	/* Compute new pkt_offset */
198 	if (old->pkt_offset < ETH_ALEN)
199 		new->pkt_offset = old->pkt_offset +
200 			offsetof(struct ieee80211_hdr_3addr, addr1);
201 	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
202 		new->pkt_offset = old->pkt_offset +
203 			offsetof(struct ieee80211_hdr_3addr, addr3) -
204 			offsetof(struct ethhdr, h_source);
205 	else
206 		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
207 
208 	/* Compute new hdr end offset */
209 	if (total_len > ETH_HLEN)
210 		hdr_80211_end_offset = hdr_len + rfc_len;
211 	else if (total_len > offsetof(struct ethhdr, h_proto))
212 		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
213 	else if (total_len > ETH_ALEN)
214 		hdr_80211_end_offset = total_len - ETH_ALEN +
215 			offsetof(struct ieee80211_hdr_3addr, addr3);
216 	else
217 		hdr_80211_end_offset = total_len +
218 			offsetof(struct ieee80211_hdr_3addr, addr1);
219 
220 	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
221 
222 	memcpy((u8 *)new->pattern,
223 	       hdr_80211_pattern + new->pkt_offset,
224 	       new->pattern_len);
225 	memcpy((u8 *)new->mask,
226 	       hdr_80211_bit_mask + new->pkt_offset,
227 	       new->pattern_len);
228 
229 	if (total_len > ETH_HLEN) {
230 		/* Copy frame body */
231 		memcpy((u8 *)new->pattern + new->pattern_len,
232 		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
233 		       total_len - ETH_HLEN);
234 		memcpy((u8 *)new->mask + new->pattern_len,
235 		       (void *)old->mask + ETH_HLEN - old->pkt_offset,
236 		       total_len - ETH_HLEN);
237 
238 		new->pattern_len += total_len - ETH_HLEN;
239 	}
240 }
241 
242 static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
243 					    struct cfg80211_sched_scan_request *nd_config,
244 					    struct wmi_pno_scan_req *pno)
245 {
246 	int i, j;
247 	u8 ssid_len;
248 
249 	pno->enable = 1;
250 	pno->vdev_id = vdev_id;
251 	pno->uc_networks_count = nd_config->n_match_sets;
252 
253 	if (!pno->uc_networks_count ||
254 	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
255 		return -EINVAL;
256 
257 	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
258 		return -EINVAL;
259 
260 	/* Filling per profile params */
261 	for (i = 0; i < pno->uc_networks_count; i++) {
262 		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
263 
264 		if (ssid_len == 0 || ssid_len > 32)
265 			return -EINVAL;
266 
267 		pno->a_networks[i].ssid.ssid_len = ssid_len;
268 
269 		memcpy(pno->a_networks[i].ssid.ssid,
270 		       nd_config->match_sets[i].ssid.ssid,
271 		       nd_config->match_sets[i].ssid.ssid_len);
272 		pno->a_networks[i].authentication = 0;
273 		pno->a_networks[i].encryption     = 0;
274 		pno->a_networks[i].bcast_nw_type  = 0;
275 
276 		/* Copying list of valid channel into request */
277 		pno->a_networks[i].channel_count = nd_config->n_channels;
278 		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
279 
280 		for (j = 0; j < nd_config->n_channels; j++) {
281 			pno->a_networks[i].channels[j] =
282 					nd_config->channels[j]->center_freq;
283 		}
284 	}
285 
286 	/* set scan to passive if no SSIDs are specified in the request */
287 	if (nd_config->n_ssids == 0)
288 		pno->do_passive_scan = true;
289 	else
290 		pno->do_passive_scan = false;
291 
292 	for (i = 0; i < nd_config->n_ssids; i++) {
293 		j = 0;
294 		while (j < pno->uc_networks_count) {
295 			if (pno->a_networks[j].ssid.ssid_len ==
296 				nd_config->ssids[i].ssid_len &&
297 			(memcmp(pno->a_networks[j].ssid.ssid,
298 				nd_config->ssids[i].ssid,
299 				pno->a_networks[j].ssid.ssid_len) == 0)) {
300 				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
301 				break;
302 			}
303 			j++;
304 		}
305 	}
306 
307 	if (nd_config->n_scan_plans == 2) {
308 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
309 		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
310 		pno->slow_scan_period =
311 			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
312 	} else if (nd_config->n_scan_plans == 1) {
313 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
314 		pno->fast_scan_max_cycles = 1;
315 		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
316 	} else {
317 		ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
318 			    nd_config->n_scan_plans);
319 	}
320 
321 	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
322 		/* enable mac randomization */
323 		pno->enable_pno_scan_randomization = 1;
324 		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
325 		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
326 	}
327 
328 	pno->delay_start_time = nd_config->delay;
329 
330 	/* Current FW does not support min-max range for dwell time */
331 	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
332 	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
333 
334 	return 0;
335 }
336 
337 static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
338 				      struct cfg80211_wowlan *wowlan)
339 {
340 	int ret, i;
341 	unsigned long wow_mask = 0;
342 	struct ath11k *ar = arvif->ar;
343 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
344 	int pattern_id = 0;
345 
346 	/* Setup requested WOW features */
347 	switch (arvif->vdev_type) {
348 	case WMI_VDEV_TYPE_IBSS:
349 		__set_bit(WOW_BEACON_EVENT, &wow_mask);
350 		fallthrough;
351 	case WMI_VDEV_TYPE_AP:
352 		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
353 		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
354 		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
355 		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
356 		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
357 		__set_bit(WOW_HTT_EVENT, &wow_mask);
358 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
359 		break;
360 	case WMI_VDEV_TYPE_STA:
361 		if (wowlan->disconnect) {
362 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
363 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
364 			__set_bit(WOW_BMISS_EVENT, &wow_mask);
365 			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
366 		}
367 
368 		if (wowlan->magic_pkt)
369 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
370 
371 		if (wowlan->nd_config) {
372 			struct wmi_pno_scan_req *pno;
373 			int ret;
374 
375 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
376 			if (!pno)
377 				return -ENOMEM;
378 
379 			ar->nlo_enabled = true;
380 
381 			ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
382 							       wowlan->nd_config, pno);
383 			if (!ret) {
384 				ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
385 				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
386 			}
387 
388 			kfree(pno);
389 		}
390 		break;
391 	default:
392 		break;
393 	}
394 
395 	for (i = 0; i < wowlan->n_patterns; i++) {
396 		u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
397 		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
398 		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
399 		struct cfg80211_pkt_pattern new_pattern = {};
400 		struct cfg80211_pkt_pattern old_pattern = patterns[i];
401 		int j;
402 
403 		new_pattern.pattern = ath_pattern;
404 		new_pattern.mask = ath_bitmask;
405 		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
406 			continue;
407 		/* convert bytemask to bitmask */
408 		for (j = 0; j < patterns[i].pattern_len; j++)
409 			if (patterns[i].mask[j / 8] & BIT(j % 8))
410 				bitmask[j] = 0xff;
411 		old_pattern.mask = bitmask;
412 
413 		if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
414 		    ATH11K_HW_TXRX_NATIVE_WIFI) {
415 			if (patterns[i].pkt_offset < ETH_HLEN) {
416 				u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
417 
418 				memcpy(pattern_ext, old_pattern.pattern,
419 				       old_pattern.pattern_len);
420 				old_pattern.pattern = pattern_ext;
421 				ath11k_wow_convert_8023_to_80211(&new_pattern,
422 								 &old_pattern);
423 			} else {
424 				new_pattern = old_pattern;
425 				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
426 			}
427 		}
428 
429 		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
430 			return -EINVAL;
431 
432 		ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
433 						 pattern_id,
434 						 new_pattern.pattern,
435 						 new_pattern.mask,
436 						 new_pattern.pattern_len,
437 						 new_pattern.pkt_offset);
438 		if (ret) {
439 			ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
440 				    pattern_id,
441 				    arvif->vdev_id, ret);
442 			return ret;
443 		}
444 
445 		pattern_id++;
446 		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
447 	}
448 
449 	for (i = 0; i < WOW_EVENT_MAX; i++) {
450 		if (!test_bit(i, &wow_mask))
451 			continue;
452 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
453 		if (ret) {
454 			ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
455 				    wow_wakeup_event(i), arvif->vdev_id, ret);
456 			return ret;
457 		}
458 	}
459 
460 	return 0;
461 }
462 
463 static int ath11k_wow_set_wakeups(struct ath11k *ar,
464 				  struct cfg80211_wowlan *wowlan)
465 {
466 	struct ath11k_vif *arvif;
467 	int ret;
468 
469 	lockdep_assert_held(&ar->conf_mutex);
470 
471 	list_for_each_entry(arvif, &ar->arvifs, list) {
472 		ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
473 		if (ret) {
474 			ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
475 				    arvif->vdev_id, ret);
476 			return ret;
477 		}
478 	}
479 
480 	return 0;
481 }
482 
483 static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
484 {
485 	int ret = 0;
486 	struct ath11k *ar = arvif->ar;
487 
488 	switch (arvif->vdev_type) {
489 	case WMI_VDEV_TYPE_STA:
490 		if (ar->nlo_enabled) {
491 			struct wmi_pno_scan_req *pno;
492 
493 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
494 			if (!pno)
495 				return -ENOMEM;
496 
497 			pno->enable = 0;
498 			ar->nlo_enabled = false;
499 			ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
500 			kfree(pno);
501 		}
502 		break;
503 	default:
504 		break;
505 	}
506 	return ret;
507 }
508 
509 static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
510 {
511 	struct ath11k_vif *arvif;
512 	int ret;
513 
514 	lockdep_assert_held(&ar->conf_mutex);
515 
516 	list_for_each_entry(arvif, &ar->arvifs, list) {
517 		ret = ath11k_vif_wow_clean_nlo(arvif);
518 		if (ret) {
519 			ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
520 				    arvif->vdev_id, ret);
521 			return ret;
522 		}
523 	}
524 
525 	return 0;
526 }
527 
528 static int ath11k_wow_set_hw_filter(struct ath11k *ar)
529 {
530 	struct ath11k_vif *arvif;
531 	u32 bitmap;
532 	int ret;
533 
534 	lockdep_assert_held(&ar->conf_mutex);
535 
536 	list_for_each_entry(arvif, &ar->arvifs, list) {
537 		bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
538 			WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
539 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
540 						    bitmap,
541 						    true);
542 		if (ret) {
543 			ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
544 				    arvif->vdev_id, ret);
545 			return ret;
546 		}
547 	}
548 
549 	return 0;
550 }
551 
552 static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
553 {
554 	struct ath11k_vif *arvif;
555 	int ret;
556 
557 	lockdep_assert_held(&ar->conf_mutex);
558 
559 	list_for_each_entry(arvif, &ar->arvifs, list) {
560 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
561 
562 		if (ret) {
563 			ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
564 				    arvif->vdev_id, ret);
565 			return ret;
566 		}
567 	}
568 
569 	return 0;
570 }
571 
572 static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
573 {
574 	struct ath11k_vif *arvif;
575 	int ret;
576 
577 	lockdep_assert_held(&ar->conf_mutex);
578 
579 	list_for_each_entry(arvif, &ar->arvifs, list) {
580 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
581 			continue;
582 
583 		ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
584 
585 		if (ret) {
586 			ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
587 				    arvif->vdev_id, enable, ret);
588 			return ret;
589 		}
590 	}
591 
592 	return 0;
593 }
594 
595 static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
596 {
597 	struct ath11k_vif *arvif;
598 	int ret;
599 
600 	lockdep_assert_held(&ar->conf_mutex);
601 
602 	list_for_each_entry(arvif, &ar->arvifs, list) {
603 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
604 		    !arvif->is_up ||
605 		    !arvif->rekey_data.enable_offload)
606 			continue;
607 
608 		/* get rekey info before disable rekey offload */
609 		if (!enable) {
610 			ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
611 			if (ret) {
612 				ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
613 					    arvif->vdev_id, ret);
614 				return ret;
615 			}
616 		}
617 
618 		ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
619 
620 		if (ret) {
621 			ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
622 				    arvif->vdev_id, enable, ret);
623 			return ret;
624 		}
625 	}
626 
627 	return 0;
628 }
629 
630 static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
631 {
632 	int ret;
633 
634 	ret = ath11k_wow_arp_ns_offload(ar, enable);
635 	if (ret) {
636 		ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
637 			    enable, ret);
638 		return ret;
639 	}
640 
641 	ret = ath11k_gtk_rekey_offload(ar, enable);
642 	if (ret) {
643 		ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
644 			    enable, ret);
645 		return ret;
646 	}
647 
648 	return 0;
649 }
650 
651 static int ath11k_wow_set_keepalive(struct ath11k *ar,
652 				    enum wmi_sta_keepalive_method method,
653 				    u32 interval)
654 {
655 	struct ath11k_vif *arvif;
656 	int ret;
657 
658 	lockdep_assert_held(&ar->conf_mutex);
659 
660 	list_for_each_entry(arvif, &ar->arvifs, list) {
661 		ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
662 		if (ret)
663 			return ret;
664 	}
665 
666 	return 0;
667 }
668 
669 int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
670 			  struct cfg80211_wowlan *wowlan)
671 {
672 	struct ath11k *ar = hw->priv;
673 	int ret;
674 
675 	ret = ath11k_mac_wait_tx_complete(ar);
676 	if (ret) {
677 		ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
678 		return ret;
679 	}
680 
681 	mutex_lock(&ar->conf_mutex);
682 
683 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
684 	if (ret) {
685 		ath11k_warn(ar->ab,
686 			    "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
687 			    ret);
688 		goto exit;
689 	}
690 
691 	ret =  ath11k_wow_cleanup(ar);
692 	if (ret) {
693 		ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
694 			    ret);
695 		goto exit;
696 	}
697 
698 	ret = ath11k_wow_set_wakeups(ar, wowlan);
699 	if (ret) {
700 		ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
701 			    ret);
702 		goto cleanup;
703 	}
704 
705 	ret = ath11k_wow_protocol_offload(ar, true);
706 	if (ret) {
707 		ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
708 			    ret);
709 		goto cleanup;
710 	}
711 
712 	ret = ath11k_wow_set_hw_filter(ar);
713 	if (ret) {
714 		ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
715 			    ret);
716 		goto cleanup;
717 	}
718 
719 	ret = ath11k_wow_set_keepalive(ar,
720 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
721 				       WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
722 	if (ret) {
723 		ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
724 		goto cleanup;
725 	}
726 
727 	ret = ath11k_wow_enable(ar->ab);
728 	if (ret) {
729 		ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
730 		goto cleanup;
731 	}
732 
733 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
734 	if (ret) {
735 		ath11k_warn(ar->ab,
736 			    "failed to stop dp rx pktlog during wow suspend: %d\n",
737 			    ret);
738 		goto cleanup;
739 	}
740 
741 	ath11k_ce_stop_shadow_timers(ar->ab);
742 	ath11k_dp_stop_shadow_timers(ar->ab);
743 
744 	ath11k_hif_irq_disable(ar->ab);
745 	ath11k_hif_ce_irq_disable(ar->ab);
746 
747 	ret = ath11k_hif_suspend(ar->ab);
748 	if (ret) {
749 		ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
750 		goto wakeup;
751 	}
752 
753 	goto exit;
754 
755 wakeup:
756 	ath11k_wow_wakeup(ar->ab);
757 
758 cleanup:
759 	ath11k_wow_cleanup(ar);
760 
761 exit:
762 	mutex_unlock(&ar->conf_mutex);
763 	return ret ? 1 : 0;
764 }
765 
766 void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
767 {
768 	struct ath11k *ar = hw->priv;
769 
770 	mutex_lock(&ar->conf_mutex);
771 	device_set_wakeup_enable(ar->ab->dev, enabled);
772 	mutex_unlock(&ar->conf_mutex);
773 }
774 
775 int ath11k_wow_op_resume(struct ieee80211_hw *hw)
776 {
777 	struct ath11k *ar = hw->priv;
778 	int ret;
779 
780 	mutex_lock(&ar->conf_mutex);
781 
782 	ret = ath11k_hif_resume(ar->ab);
783 	if (ret) {
784 		ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
785 		goto exit;
786 	}
787 
788 	ath11k_hif_ce_irq_enable(ar->ab);
789 	ath11k_hif_irq_enable(ar->ab);
790 
791 	ret = ath11k_dp_rx_pktlog_start(ar->ab);
792 	if (ret) {
793 		ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
794 		goto exit;
795 	}
796 
797 	ret = ath11k_wow_wakeup(ar->ab);
798 	if (ret) {
799 		ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
800 		goto exit;
801 	}
802 
803 	ret = ath11k_wow_nlo_cleanup(ar);
804 	if (ret) {
805 		ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
806 		goto exit;
807 	}
808 
809 	ret = ath11k_wow_clear_hw_filter(ar);
810 	if (ret) {
811 		ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
812 		goto exit;
813 	}
814 
815 	ret = ath11k_wow_protocol_offload(ar, false);
816 	if (ret) {
817 		ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
818 			    ret);
819 		goto exit;
820 	}
821 
822 	ret = ath11k_wow_set_keepalive(ar,
823 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
824 				       WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
825 	if (ret) {
826 		ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
827 		goto exit;
828 	}
829 
830 exit:
831 	if (ret) {
832 		switch (ar->state) {
833 		case ATH11K_STATE_ON:
834 			ar->state = ATH11K_STATE_RESTARTING;
835 			ret = 1;
836 			break;
837 		case ATH11K_STATE_OFF:
838 		case ATH11K_STATE_RESTARTING:
839 		case ATH11K_STATE_RESTARTED:
840 		case ATH11K_STATE_WEDGED:
841 			ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
842 				    ar->state);
843 			ret = -EIO;
844 			break;
845 		}
846 	}
847 
848 	mutex_unlock(&ar->conf_mutex);
849 	return ret;
850 }
851 
852 int ath11k_wow_init(struct ath11k *ar)
853 {
854 	if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
855 		return 0;
856 
857 	ar->wow.wowlan_support = ath11k_wowlan_support;
858 
859 	if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
860 	    ATH11K_HW_TXRX_NATIVE_WIFI) {
861 		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
862 		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
863 	}
864 
865 	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
866 		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
867 		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
868 	}
869 
870 	ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
871 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
872 	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
873 
874 	device_set_wakeup_capable(ar->ab->dev, true);
875 
876 	return 0;
877 }
878