xref: /linux/drivers/net/wireless/ath/ath11k/wow.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/delay.h>
8 
9 #include "mac.h"
10 
11 #include <net/mac80211.h>
12 #include "core.h"
13 #include "hif.h"
14 #include "debug.h"
15 #include "wmi.h"
16 #include "wow.h"
17 #include "dp_rx.h"
18 
19 static const struct wiphy_wowlan_support ath11k_wowlan_support = {
20 	.flags = WIPHY_WOWLAN_DISCONNECT |
21 		 WIPHY_WOWLAN_MAGIC_PKT |
22 		 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
23 		 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
24 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
25 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
26 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
27 };
28 
29 int ath11k_wow_enable(struct ath11k_base *ab)
30 {
31 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
32 	int i, ret;
33 
34 	clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
35 
36 	for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
37 		reinit_completion(&ab->htc_suspend);
38 
39 		ret = ath11k_wmi_wow_enable(ar);
40 		if (ret) {
41 			ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
42 			return ret;
43 		}
44 
45 		ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
46 		if (ret == 0) {
47 			ath11k_warn(ab,
48 				    "timed out while waiting for htc suspend completion\n");
49 			return -ETIMEDOUT;
50 		}
51 
52 		if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
53 			/* success, suspend complete received */
54 			return 0;
55 
56 		ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
57 			    i);
58 		msleep(ATH11K_WOW_RETRY_WAIT_MS);
59 	}
60 
61 	ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
62 
63 	return -ETIMEDOUT;
64 }
65 
66 int ath11k_wow_wakeup(struct ath11k_base *ab)
67 {
68 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
69 	int ret;
70 
71 	/* In the case of WCN6750, WoW wakeup is done
72 	 * by sending SMP2P power save exit message
73 	 * to the target processor.
74 	 */
75 	if (ab->hw_params.smp2p_wow_exit)
76 		return 0;
77 
78 	reinit_completion(&ab->wow.wakeup_completed);
79 
80 	ret = ath11k_wmi_wow_host_wakeup_ind(ar);
81 	if (ret) {
82 		ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
83 			    ret);
84 		return ret;
85 	}
86 
87 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
88 	if (ret == 0) {
89 		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
90 		return -ETIMEDOUT;
91 	}
92 
93 	return 0;
94 }
95 
96 static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
97 {
98 	struct ath11k *ar = arvif->ar;
99 	int i, ret;
100 
101 	for (i = 0; i < WOW_EVENT_MAX; i++) {
102 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
103 		if (ret) {
104 			ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
105 				    wow_wakeup_event(i), arvif->vdev_id, ret);
106 			return ret;
107 		}
108 	}
109 
110 	for (i = 0; i < ar->wow.max_num_patterns; i++) {
111 		ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
112 		if (ret) {
113 			ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
114 				    i, arvif->vdev_id, ret);
115 			return ret;
116 		}
117 	}
118 
119 	return 0;
120 }
121 
122 static int ath11k_wow_cleanup(struct ath11k *ar)
123 {
124 	struct ath11k_vif *arvif;
125 	int ret;
126 
127 	lockdep_assert_held(&ar->conf_mutex);
128 
129 	list_for_each_entry(arvif, &ar->arvifs, list) {
130 		ret = ath11k_wow_vif_cleanup(arvif);
131 		if (ret) {
132 			ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
133 				    arvif->vdev_id, ret);
134 			return ret;
135 		}
136 	}
137 
138 	return 0;
139 }
140 
141 /* Convert a 802.3 format to a 802.11 format.
142  *         +------------+-----------+--------+----------------+
143  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
144  *         +------------+-----------+--------+----------------+
145  *                |__         |_______    |____________  |________
146  *                   |                |                |          |
147  *         +--+------------+----+-----------+---------------+-----------+
148  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
149  *         +--+------------+----+-----------+---------------+-----------+
150  */
151 static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
152 					     const struct cfg80211_pkt_pattern *old)
153 {
154 	u8 hdr_8023_pattern[ETH_HLEN] = {};
155 	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
156 	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
157 	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
158 	u8 bytemask[WOW_MAX_PATTERN_SIZE] = {};
159 
160 	int total_len = old->pkt_offset + old->pattern_len;
161 	int hdr_80211_end_offset;
162 
163 	struct ieee80211_hdr_3addr *new_hdr_pattern =
164 		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
165 	struct ieee80211_hdr_3addr *new_hdr_mask =
166 		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
167 	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
168 	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
169 	int hdr_len = sizeof(*new_hdr_pattern);
170 
171 	struct rfc1042_hdr *new_rfc_pattern =
172 		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
173 	struct rfc1042_hdr *new_rfc_mask =
174 		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
175 	int rfc_len = sizeof(*new_rfc_pattern);
176 	int i;
177 
178 	/* convert bitmask to bytemask */
179 	for (i = 0; i < old->pattern_len; i++)
180 		if (old->mask[i / 8] & BIT(i % 8))
181 			bytemask[i] = 0xff;
182 
183 	memcpy(hdr_8023_pattern + old->pkt_offset,
184 	       old->pattern, ETH_HLEN - old->pkt_offset);
185 	memcpy(hdr_8023_bit_mask + old->pkt_offset,
186 	       bytemask, ETH_HLEN - old->pkt_offset);
187 
188 	/* Copy destination address */
189 	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
190 	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
191 
192 	/* Copy source address */
193 	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
194 	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
195 
196 	/* Copy logic link type */
197 	memcpy(&new_rfc_pattern->snap_type,
198 	       &old_hdr_pattern->h_proto,
199 	       sizeof(old_hdr_pattern->h_proto));
200 	memcpy(&new_rfc_mask->snap_type,
201 	       &old_hdr_mask->h_proto,
202 	       sizeof(old_hdr_mask->h_proto));
203 
204 	/* Compute new pkt_offset */
205 	if (old->pkt_offset < ETH_ALEN)
206 		new->pkt_offset = old->pkt_offset +
207 			offsetof(struct ieee80211_hdr_3addr, addr1);
208 	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
209 		new->pkt_offset = old->pkt_offset +
210 			offsetof(struct ieee80211_hdr_3addr, addr3) -
211 			offsetof(struct ethhdr, h_source);
212 	else
213 		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
214 
215 	/* Compute new hdr end offset */
216 	if (total_len > ETH_HLEN)
217 		hdr_80211_end_offset = hdr_len + rfc_len;
218 	else if (total_len > offsetof(struct ethhdr, h_proto))
219 		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
220 	else if (total_len > ETH_ALEN)
221 		hdr_80211_end_offset = total_len - ETH_ALEN +
222 			offsetof(struct ieee80211_hdr_3addr, addr3);
223 	else
224 		hdr_80211_end_offset = total_len +
225 			offsetof(struct ieee80211_hdr_3addr, addr1);
226 
227 	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
228 
229 	memcpy((u8 *)new->pattern,
230 	       hdr_80211_pattern + new->pkt_offset,
231 	       new->pattern_len);
232 	memcpy((u8 *)new->mask,
233 	       hdr_80211_bit_mask + new->pkt_offset,
234 	       new->pattern_len);
235 
236 	if (total_len > ETH_HLEN) {
237 		/* Copy frame body */
238 		memcpy((u8 *)new->pattern + new->pattern_len,
239 		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
240 		       total_len - ETH_HLEN);
241 		memcpy((u8 *)new->mask + new->pattern_len,
242 		       bytemask + ETH_HLEN - old->pkt_offset,
243 		       total_len - ETH_HLEN);
244 
245 		new->pattern_len += total_len - ETH_HLEN;
246 	}
247 }
248 
249 static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
250 					    struct cfg80211_sched_scan_request *nd_config,
251 					    struct wmi_pno_scan_req *pno)
252 {
253 	int i, j;
254 	u8 ssid_len;
255 
256 	pno->enable = 1;
257 	pno->vdev_id = vdev_id;
258 	pno->uc_networks_count = nd_config->n_match_sets;
259 
260 	if (!pno->uc_networks_count ||
261 	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
262 		return -EINVAL;
263 
264 	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
265 		return -EINVAL;
266 
267 	/* Filling per profile params */
268 	for (i = 0; i < pno->uc_networks_count; i++) {
269 		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
270 
271 		if (ssid_len == 0 || ssid_len > 32)
272 			return -EINVAL;
273 
274 		pno->a_networks[i].ssid.ssid_len = ssid_len;
275 
276 		memcpy(pno->a_networks[i].ssid.ssid,
277 		       nd_config->match_sets[i].ssid.ssid,
278 		       nd_config->match_sets[i].ssid.ssid_len);
279 		pno->a_networks[i].authentication = 0;
280 		pno->a_networks[i].encryption     = 0;
281 		pno->a_networks[i].bcast_nw_type  = 0;
282 
283 		/* Copying list of valid channel into request */
284 		pno->a_networks[i].channel_count = nd_config->n_channels;
285 		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
286 
287 		for (j = 0; j < nd_config->n_channels; j++) {
288 			pno->a_networks[i].channels[j] =
289 					nd_config->channels[j]->center_freq;
290 		}
291 	}
292 
293 	/* set scan to passive if no SSIDs are specified in the request */
294 	if (nd_config->n_ssids == 0)
295 		pno->do_passive_scan = true;
296 	else
297 		pno->do_passive_scan = false;
298 
299 	for (i = 0; i < nd_config->n_ssids; i++) {
300 		j = 0;
301 		while (j < pno->uc_networks_count) {
302 			if (pno->a_networks[j].ssid.ssid_len ==
303 				nd_config->ssids[i].ssid_len &&
304 			(memcmp(pno->a_networks[j].ssid.ssid,
305 				nd_config->ssids[i].ssid,
306 				pno->a_networks[j].ssid.ssid_len) == 0)) {
307 				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
308 				break;
309 			}
310 			j++;
311 		}
312 	}
313 
314 	if (nd_config->n_scan_plans == 2) {
315 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
316 		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
317 		pno->slow_scan_period =
318 			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
319 	} else if (nd_config->n_scan_plans == 1) {
320 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
321 		pno->fast_scan_max_cycles = 1;
322 		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
323 	} else {
324 		ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
325 			    nd_config->n_scan_plans);
326 	}
327 
328 	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
329 		/* enable mac randomization */
330 		pno->enable_pno_scan_randomization = 1;
331 		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
332 		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
333 	}
334 
335 	pno->delay_start_time = nd_config->delay;
336 
337 	/* Current FW does not support min-max range for dwell time */
338 	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
339 	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
340 
341 	return 0;
342 }
343 
344 static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
345 				      struct cfg80211_wowlan *wowlan)
346 {
347 	int ret, i;
348 	unsigned long wow_mask = 0;
349 	struct ath11k *ar = arvif->ar;
350 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
351 	int pattern_id = 0;
352 
353 	/* Setup requested WOW features */
354 	switch (arvif->vdev_type) {
355 	case WMI_VDEV_TYPE_IBSS:
356 		__set_bit(WOW_BEACON_EVENT, &wow_mask);
357 		fallthrough;
358 	case WMI_VDEV_TYPE_AP:
359 		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
360 		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
361 		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
362 		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
363 		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
364 		__set_bit(WOW_HTT_EVENT, &wow_mask);
365 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
366 		break;
367 	case WMI_VDEV_TYPE_STA:
368 		if (wowlan->disconnect) {
369 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
370 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
371 			__set_bit(WOW_BMISS_EVENT, &wow_mask);
372 			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
373 		}
374 
375 		if (wowlan->magic_pkt)
376 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
377 
378 		if (wowlan->nd_config) {
379 			struct wmi_pno_scan_req *pno;
380 			int ret;
381 
382 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
383 			if (!pno)
384 				return -ENOMEM;
385 
386 			ar->nlo_enabled = true;
387 
388 			ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
389 							       wowlan->nd_config, pno);
390 			if (!ret) {
391 				ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
392 				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
393 			}
394 
395 			kfree(pno);
396 		}
397 		break;
398 	default:
399 		break;
400 	}
401 
402 	for (i = 0; i < wowlan->n_patterns; i++) {
403 		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
404 		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
405 		struct cfg80211_pkt_pattern new_pattern = {};
406 
407 		new_pattern.pattern = ath_pattern;
408 		new_pattern.mask = ath_bitmask;
409 		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
410 			continue;
411 
412 		if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
413 		    ATH11K_HW_TXRX_NATIVE_WIFI) {
414 			if (patterns[i].pkt_offset < ETH_HLEN) {
415 				ath11k_wow_convert_8023_to_80211(&new_pattern,
416 								 &patterns[i]);
417 			} else {
418 				int j;
419 
420 				new_pattern = patterns[i];
421 				new_pattern.mask = ath_bitmask;
422 
423 				/* convert bitmask to bytemask */
424 				for (j = 0; j < patterns[i].pattern_len; j++)
425 					if (patterns[i].mask[j / 8] & BIT(j % 8))
426 						ath_bitmask[j] = 0xff;
427 
428 				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
429 			}
430 		}
431 
432 		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
433 			return -EINVAL;
434 
435 		ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
436 						 pattern_id,
437 						 new_pattern.pattern,
438 						 new_pattern.mask,
439 						 new_pattern.pattern_len,
440 						 new_pattern.pkt_offset);
441 		if (ret) {
442 			ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
443 				    pattern_id,
444 				    arvif->vdev_id, ret);
445 			return ret;
446 		}
447 
448 		pattern_id++;
449 		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
450 	}
451 
452 	for (i = 0; i < WOW_EVENT_MAX; i++) {
453 		if (!test_bit(i, &wow_mask))
454 			continue;
455 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
456 		if (ret) {
457 			ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
458 				    wow_wakeup_event(i), arvif->vdev_id, ret);
459 			return ret;
460 		}
461 	}
462 
463 	return 0;
464 }
465 
466 static int ath11k_wow_set_wakeups(struct ath11k *ar,
467 				  struct cfg80211_wowlan *wowlan)
468 {
469 	struct ath11k_vif *arvif;
470 	int ret;
471 
472 	lockdep_assert_held(&ar->conf_mutex);
473 
474 	list_for_each_entry(arvif, &ar->arvifs, list) {
475 		ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
476 		if (ret) {
477 			ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
478 				    arvif->vdev_id, ret);
479 			return ret;
480 		}
481 	}
482 
483 	return 0;
484 }
485 
486 static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
487 {
488 	int ret = 0;
489 	struct ath11k *ar = arvif->ar;
490 
491 	switch (arvif->vdev_type) {
492 	case WMI_VDEV_TYPE_STA:
493 		if (ar->nlo_enabled) {
494 			struct wmi_pno_scan_req *pno;
495 
496 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
497 			if (!pno)
498 				return -ENOMEM;
499 
500 			pno->enable = 0;
501 			ar->nlo_enabled = false;
502 			ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
503 			kfree(pno);
504 		}
505 		break;
506 	default:
507 		break;
508 	}
509 	return ret;
510 }
511 
512 static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
513 {
514 	struct ath11k_vif *arvif;
515 	int ret;
516 
517 	lockdep_assert_held(&ar->conf_mutex);
518 
519 	list_for_each_entry(arvif, &ar->arvifs, list) {
520 		ret = ath11k_vif_wow_clean_nlo(arvif);
521 		if (ret) {
522 			ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
523 				    arvif->vdev_id, ret);
524 			return ret;
525 		}
526 	}
527 
528 	return 0;
529 }
530 
531 static int ath11k_wow_set_hw_filter(struct ath11k *ar)
532 {
533 	struct ath11k_vif *arvif;
534 	u32 bitmap;
535 	int ret;
536 
537 	lockdep_assert_held(&ar->conf_mutex);
538 
539 	list_for_each_entry(arvif, &ar->arvifs, list) {
540 		bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
541 			WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
542 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
543 						    bitmap,
544 						    true);
545 		if (ret) {
546 			ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
547 				    arvif->vdev_id, ret);
548 			return ret;
549 		}
550 	}
551 
552 	return 0;
553 }
554 
555 static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
556 {
557 	struct ath11k_vif *arvif;
558 	int ret;
559 
560 	lockdep_assert_held(&ar->conf_mutex);
561 
562 	list_for_each_entry(arvif, &ar->arvifs, list) {
563 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
564 
565 		if (ret) {
566 			ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
567 				    arvif->vdev_id, ret);
568 			return ret;
569 		}
570 	}
571 
572 	return 0;
573 }
574 
575 static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
576 {
577 	struct ath11k_vif *arvif;
578 	int ret;
579 
580 	lockdep_assert_held(&ar->conf_mutex);
581 
582 	list_for_each_entry(arvif, &ar->arvifs, list) {
583 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
584 			continue;
585 
586 		ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
587 
588 		if (ret) {
589 			ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
590 				    arvif->vdev_id, enable, ret);
591 			return ret;
592 		}
593 	}
594 
595 	return 0;
596 }
597 
598 static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
599 {
600 	struct ath11k_vif *arvif;
601 	int ret;
602 
603 	lockdep_assert_held(&ar->conf_mutex);
604 
605 	list_for_each_entry(arvif, &ar->arvifs, list) {
606 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
607 		    !arvif->is_up ||
608 		    !arvif->rekey_data.enable_offload)
609 			continue;
610 
611 		/* get rekey info before disable rekey offload */
612 		if (!enable) {
613 			ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
614 			if (ret) {
615 				ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
616 					    arvif->vdev_id, ret);
617 				return ret;
618 			}
619 		}
620 
621 		ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
622 
623 		if (ret) {
624 			ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
625 				    arvif->vdev_id, enable, ret);
626 			return ret;
627 		}
628 	}
629 
630 	return 0;
631 }
632 
633 static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
634 {
635 	int ret;
636 
637 	ret = ath11k_wow_arp_ns_offload(ar, enable);
638 	if (ret) {
639 		ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
640 			    enable, ret);
641 		return ret;
642 	}
643 
644 	ret = ath11k_gtk_rekey_offload(ar, enable);
645 	if (ret) {
646 		ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
647 			    enable, ret);
648 		return ret;
649 	}
650 
651 	return 0;
652 }
653 
654 static int ath11k_wow_set_keepalive(struct ath11k *ar,
655 				    enum wmi_sta_keepalive_method method,
656 				    u32 interval)
657 {
658 	struct ath11k_vif *arvif;
659 	int ret;
660 
661 	lockdep_assert_held(&ar->conf_mutex);
662 
663 	list_for_each_entry(arvif, &ar->arvifs, list) {
664 		ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
665 		if (ret)
666 			return ret;
667 	}
668 
669 	return 0;
670 }
671 
672 int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
673 			  struct cfg80211_wowlan *wowlan)
674 {
675 	struct ath11k *ar = hw->priv;
676 	int ret;
677 
678 	ret = ath11k_mac_wait_tx_complete(ar);
679 	if (ret) {
680 		ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
681 		return ret;
682 	}
683 
684 	mutex_lock(&ar->conf_mutex);
685 
686 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
687 	if (ret) {
688 		ath11k_warn(ar->ab,
689 			    "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
690 			    ret);
691 		goto exit;
692 	}
693 
694 	ret =  ath11k_wow_cleanup(ar);
695 	if (ret) {
696 		ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
697 			    ret);
698 		goto exit;
699 	}
700 
701 	ret = ath11k_wow_set_wakeups(ar, wowlan);
702 	if (ret) {
703 		ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
704 			    ret);
705 		goto cleanup;
706 	}
707 
708 	ret = ath11k_wow_protocol_offload(ar, true);
709 	if (ret) {
710 		ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
711 			    ret);
712 		goto cleanup;
713 	}
714 
715 	ret = ath11k_wow_set_hw_filter(ar);
716 	if (ret) {
717 		ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
718 			    ret);
719 		goto cleanup;
720 	}
721 
722 	ret = ath11k_wow_set_keepalive(ar,
723 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
724 				       WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
725 	if (ret) {
726 		ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
727 		goto cleanup;
728 	}
729 
730 	ret = ath11k_wow_enable(ar->ab);
731 	if (ret) {
732 		ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
733 		goto cleanup;
734 	}
735 
736 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
737 	if (ret) {
738 		ath11k_warn(ar->ab,
739 			    "failed to stop dp rx pktlog during wow suspend: %d\n",
740 			    ret);
741 		goto cleanup;
742 	}
743 
744 	ath11k_ce_stop_shadow_timers(ar->ab);
745 	ath11k_dp_stop_shadow_timers(ar->ab);
746 
747 	ath11k_hif_irq_disable(ar->ab);
748 	ath11k_hif_ce_irq_disable(ar->ab);
749 
750 	ret = ath11k_hif_suspend(ar->ab);
751 	if (ret) {
752 		ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
753 		goto wakeup;
754 	}
755 
756 	goto exit;
757 
758 wakeup:
759 	ath11k_wow_wakeup(ar->ab);
760 
761 cleanup:
762 	ath11k_wow_cleanup(ar);
763 
764 exit:
765 	mutex_unlock(&ar->conf_mutex);
766 	return ret ? 1 : 0;
767 }
768 
769 void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
770 {
771 	struct ath11k *ar = hw->priv;
772 
773 	mutex_lock(&ar->conf_mutex);
774 	device_set_wakeup_enable(ar->ab->dev, enabled);
775 	mutex_unlock(&ar->conf_mutex);
776 }
777 
778 int ath11k_wow_op_resume(struct ieee80211_hw *hw)
779 {
780 	struct ath11k *ar = hw->priv;
781 	int ret;
782 
783 	mutex_lock(&ar->conf_mutex);
784 
785 	ret = ath11k_hif_resume(ar->ab);
786 	if (ret) {
787 		ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
788 		goto exit;
789 	}
790 
791 	ath11k_hif_ce_irq_enable(ar->ab);
792 	ath11k_hif_irq_enable(ar->ab);
793 
794 	ret = ath11k_dp_rx_pktlog_start(ar->ab);
795 	if (ret) {
796 		ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
797 		goto exit;
798 	}
799 
800 	ret = ath11k_wow_wakeup(ar->ab);
801 	if (ret) {
802 		ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
803 		goto exit;
804 	}
805 
806 	ret = ath11k_wow_nlo_cleanup(ar);
807 	if (ret) {
808 		ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
809 		goto exit;
810 	}
811 
812 	ret = ath11k_wow_clear_hw_filter(ar);
813 	if (ret) {
814 		ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
815 		goto exit;
816 	}
817 
818 	ret = ath11k_wow_protocol_offload(ar, false);
819 	if (ret) {
820 		ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
821 			    ret);
822 		goto exit;
823 	}
824 
825 	ret = ath11k_wow_set_keepalive(ar,
826 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
827 				       WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
828 	if (ret) {
829 		ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
830 		goto exit;
831 	}
832 
833 exit:
834 	if (ret) {
835 		switch (ar->state) {
836 		case ATH11K_STATE_ON:
837 			ar->state = ATH11K_STATE_RESTARTING;
838 			ret = 1;
839 			break;
840 		case ATH11K_STATE_OFF:
841 		case ATH11K_STATE_RESTARTING:
842 		case ATH11K_STATE_RESTARTED:
843 		case ATH11K_STATE_WEDGED:
844 		case ATH11K_STATE_FTM:
845 			ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
846 				    ar->state);
847 			ret = -EIO;
848 			break;
849 		}
850 	}
851 
852 	mutex_unlock(&ar->conf_mutex);
853 	return ret;
854 }
855 
856 int ath11k_wow_init(struct ath11k *ar)
857 {
858 	if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
859 		return 0;
860 
861 	ar->wow.wowlan_support = ath11k_wowlan_support;
862 
863 	if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
864 	    ATH11K_HW_TXRX_NATIVE_WIFI) {
865 		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
866 		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
867 	}
868 
869 	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
870 		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
871 		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
872 	}
873 
874 	ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
875 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
876 	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
877 
878 	device_set_wakeup_capable(ar->ab->dev, true);
879 
880 	return 0;
881 }
882