xref: /linux/drivers/net/wireless/ath/ath11k/wow.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/delay.h>
8 
9 #include "mac.h"
10 
11 #include <net/mac80211.h>
12 #include "core.h"
13 #include "hif.h"
14 #include "debug.h"
15 #include "wmi.h"
16 #include "wow.h"
17 #include "dp_rx.h"
18 
19 static const struct wiphy_wowlan_support ath11k_wowlan_support = {
20 	.flags = WIPHY_WOWLAN_DISCONNECT |
21 		 WIPHY_WOWLAN_MAGIC_PKT |
22 		 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
23 		 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
24 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
25 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
26 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
27 };
28 
29 int ath11k_wow_enable(struct ath11k_base *ab)
30 {
31 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
32 	int i, ret;
33 
34 	clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
35 
36 	for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
37 		reinit_completion(&ab->htc_suspend);
38 
39 		ret = ath11k_wmi_wow_enable(ar);
40 		if (ret) {
41 			ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
42 			return ret;
43 		}
44 
45 		ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
46 		if (ret == 0) {
47 			ath11k_warn(ab,
48 				    "timed out while waiting for htc suspend completion\n");
49 			return -ETIMEDOUT;
50 		}
51 
52 		if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
53 			/* success, suspend complete received */
54 			return 0;
55 
56 		ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
57 			    i);
58 		msleep(ATH11K_WOW_RETRY_WAIT_MS);
59 	}
60 
61 	ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
62 
63 	return -ETIMEDOUT;
64 }
65 
66 int ath11k_wow_wakeup(struct ath11k_base *ab)
67 {
68 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
69 	int ret;
70 
71 	/* In the case of WCN6750, WoW wakeup is done
72 	 * by sending SMP2P power save exit message
73 	 * to the target processor.
74 	 */
75 	if (ab->hw_params.smp2p_wow_exit)
76 		return 0;
77 
78 	reinit_completion(&ab->wow.wakeup_completed);
79 
80 	ret = ath11k_wmi_wow_host_wakeup_ind(ar);
81 	if (ret) {
82 		ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
83 			    ret);
84 		return ret;
85 	}
86 
87 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
88 	if (ret == 0) {
89 		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
90 		return -ETIMEDOUT;
91 	}
92 
93 	return 0;
94 }
95 
96 static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
97 {
98 	struct ath11k *ar = arvif->ar;
99 	int i, ret;
100 
101 	for (i = 0; i < WOW_EVENT_MAX; i++) {
102 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
103 		if (ret) {
104 			ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
105 				    wow_wakeup_event(i), arvif->vdev_id, ret);
106 			return ret;
107 		}
108 	}
109 
110 	for (i = 0; i < ar->wow.max_num_patterns; i++) {
111 		ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
112 		if (ret) {
113 			ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
114 				    i, arvif->vdev_id, ret);
115 			return ret;
116 		}
117 	}
118 
119 	return 0;
120 }
121 
122 static int ath11k_wow_cleanup(struct ath11k *ar)
123 {
124 	struct ath11k_vif *arvif;
125 	int ret;
126 
127 	lockdep_assert_held(&ar->conf_mutex);
128 
129 	list_for_each_entry(arvif, &ar->arvifs, list) {
130 		ret = ath11k_wow_vif_cleanup(arvif);
131 		if (ret) {
132 			ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
133 				    arvif->vdev_id, ret);
134 			return ret;
135 		}
136 	}
137 
138 	return 0;
139 }
140 
141 /* Convert a 802.3 format to a 802.11 format.
142  *         +------------+-----------+--------+----------------+
143  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
144  *         +------------+-----------+--------+----------------+
145  *                |__         |_______    |____________  |________
146  *                   |                |                |          |
147  *         +--+------------+----+-----------+---------------+-----------+
148  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
149  *         +--+------------+----+-----------+---------------+-----------+
150  */
151 /* clang stack usage explodes if this is inlined */
152 static noinline_for_stack
153 void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
154 				      const struct cfg80211_pkt_pattern *old)
155 {
156 	u8 hdr_8023_pattern[ETH_HLEN] = {};
157 	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
158 	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
159 	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
160 	u8 bytemask[WOW_MAX_PATTERN_SIZE] = {};
161 
162 	int total_len = old->pkt_offset + old->pattern_len;
163 	int hdr_80211_end_offset;
164 
165 	struct ieee80211_hdr_3addr *new_hdr_pattern =
166 		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
167 	struct ieee80211_hdr_3addr *new_hdr_mask =
168 		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
169 	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
170 	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
171 	int hdr_len = sizeof(*new_hdr_pattern);
172 
173 	struct rfc1042_hdr *new_rfc_pattern =
174 		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
175 	struct rfc1042_hdr *new_rfc_mask =
176 		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
177 	int rfc_len = sizeof(*new_rfc_pattern);
178 	int i;
179 
180 	/* convert bitmask to bytemask */
181 	for (i = 0; i < old->pattern_len; i++)
182 		if (old->mask[i / 8] & BIT(i % 8))
183 			bytemask[i] = 0xff;
184 
185 	memcpy(hdr_8023_pattern + old->pkt_offset,
186 	       old->pattern, ETH_HLEN - old->pkt_offset);
187 	memcpy(hdr_8023_bit_mask + old->pkt_offset,
188 	       bytemask, ETH_HLEN - old->pkt_offset);
189 
190 	/* Copy destination address */
191 	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
192 	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
193 
194 	/* Copy source address */
195 	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
196 	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
197 
198 	/* Copy logic link type */
199 	memcpy(&new_rfc_pattern->snap_type,
200 	       &old_hdr_pattern->h_proto,
201 	       sizeof(old_hdr_pattern->h_proto));
202 	memcpy(&new_rfc_mask->snap_type,
203 	       &old_hdr_mask->h_proto,
204 	       sizeof(old_hdr_mask->h_proto));
205 
206 	/* Compute new pkt_offset */
207 	if (old->pkt_offset < ETH_ALEN)
208 		new->pkt_offset = old->pkt_offset +
209 			offsetof(struct ieee80211_hdr_3addr, addr1);
210 	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
211 		new->pkt_offset = old->pkt_offset +
212 			offsetof(struct ieee80211_hdr_3addr, addr3) -
213 			offsetof(struct ethhdr, h_source);
214 	else
215 		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
216 
217 	/* Compute new hdr end offset */
218 	if (total_len > ETH_HLEN)
219 		hdr_80211_end_offset = hdr_len + rfc_len;
220 	else if (total_len > offsetof(struct ethhdr, h_proto))
221 		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
222 	else if (total_len > ETH_ALEN)
223 		hdr_80211_end_offset = total_len - ETH_ALEN +
224 			offsetof(struct ieee80211_hdr_3addr, addr3);
225 	else
226 		hdr_80211_end_offset = total_len +
227 			offsetof(struct ieee80211_hdr_3addr, addr1);
228 
229 	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
230 
231 	memcpy((u8 *)new->pattern,
232 	       hdr_80211_pattern + new->pkt_offset,
233 	       new->pattern_len);
234 	memcpy((u8 *)new->mask,
235 	       hdr_80211_bit_mask + new->pkt_offset,
236 	       new->pattern_len);
237 
238 	if (total_len > ETH_HLEN) {
239 		/* Copy frame body */
240 		memcpy((u8 *)new->pattern + new->pattern_len,
241 		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
242 		       total_len - ETH_HLEN);
243 		memcpy((u8 *)new->mask + new->pattern_len,
244 		       bytemask + ETH_HLEN - old->pkt_offset,
245 		       total_len - ETH_HLEN);
246 
247 		new->pattern_len += total_len - ETH_HLEN;
248 	}
249 }
250 
251 static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
252 					    struct cfg80211_sched_scan_request *nd_config,
253 					    struct wmi_pno_scan_req *pno)
254 {
255 	int i, j;
256 	u8 ssid_len;
257 
258 	pno->enable = 1;
259 	pno->vdev_id = vdev_id;
260 	pno->uc_networks_count = nd_config->n_match_sets;
261 
262 	if (!pno->uc_networks_count ||
263 	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
264 		return -EINVAL;
265 
266 	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
267 		return -EINVAL;
268 
269 	/* Filling per profile params */
270 	for (i = 0; i < pno->uc_networks_count; i++) {
271 		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
272 
273 		if (ssid_len == 0 || ssid_len > 32)
274 			return -EINVAL;
275 
276 		pno->a_networks[i].ssid.ssid_len = ssid_len;
277 
278 		memcpy(pno->a_networks[i].ssid.ssid,
279 		       nd_config->match_sets[i].ssid.ssid,
280 		       nd_config->match_sets[i].ssid.ssid_len);
281 		pno->a_networks[i].authentication = 0;
282 		pno->a_networks[i].encryption     = 0;
283 		pno->a_networks[i].bcast_nw_type  = 0;
284 
285 		/* Copying list of valid channel into request */
286 		pno->a_networks[i].channel_count = nd_config->n_channels;
287 		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
288 
289 		for (j = 0; j < nd_config->n_channels; j++) {
290 			pno->a_networks[i].channels[j] =
291 					nd_config->channels[j]->center_freq;
292 		}
293 	}
294 
295 	/* set scan to passive if no SSIDs are specified in the request */
296 	if (nd_config->n_ssids == 0)
297 		pno->do_passive_scan = true;
298 	else
299 		pno->do_passive_scan = false;
300 
301 	for (i = 0; i < nd_config->n_ssids; i++) {
302 		j = 0;
303 		while (j < pno->uc_networks_count) {
304 			if (pno->a_networks[j].ssid.ssid_len ==
305 				nd_config->ssids[i].ssid_len &&
306 			(memcmp(pno->a_networks[j].ssid.ssid,
307 				nd_config->ssids[i].ssid,
308 				pno->a_networks[j].ssid.ssid_len) == 0)) {
309 				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
310 				break;
311 			}
312 			j++;
313 		}
314 	}
315 
316 	if (nd_config->n_scan_plans == 2) {
317 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
318 		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
319 		pno->slow_scan_period =
320 			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
321 	} else if (nd_config->n_scan_plans == 1) {
322 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
323 		pno->fast_scan_max_cycles = 1;
324 		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
325 	} else {
326 		ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
327 			    nd_config->n_scan_plans);
328 	}
329 
330 	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
331 		/* enable mac randomization */
332 		pno->enable_pno_scan_randomization = 1;
333 		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
334 		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
335 	}
336 
337 	pno->delay_start_time = nd_config->delay;
338 
339 	/* Current FW does not support min-max range for dwell time */
340 	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
341 	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
342 
343 	return 0;
344 }
345 
346 static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
347 				      struct cfg80211_wowlan *wowlan)
348 {
349 	int ret, i;
350 	unsigned long wow_mask = 0;
351 	struct ath11k *ar = arvif->ar;
352 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
353 	int pattern_id = 0;
354 
355 	/* Setup requested WOW features */
356 	switch (arvif->vdev_type) {
357 	case WMI_VDEV_TYPE_IBSS:
358 		__set_bit(WOW_BEACON_EVENT, &wow_mask);
359 		fallthrough;
360 	case WMI_VDEV_TYPE_AP:
361 		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
362 		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
363 		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
364 		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
365 		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
366 		__set_bit(WOW_HTT_EVENT, &wow_mask);
367 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
368 		break;
369 	case WMI_VDEV_TYPE_STA:
370 		if (wowlan->disconnect) {
371 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
372 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
373 			__set_bit(WOW_BMISS_EVENT, &wow_mask);
374 			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
375 		}
376 
377 		if (wowlan->magic_pkt)
378 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
379 
380 		if (wowlan->nd_config) {
381 			struct wmi_pno_scan_req *pno;
382 			int ret;
383 
384 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
385 			if (!pno)
386 				return -ENOMEM;
387 
388 			ar->nlo_enabled = true;
389 
390 			ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
391 							       wowlan->nd_config, pno);
392 			if (!ret) {
393 				ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
394 				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
395 			}
396 
397 			kfree(pno);
398 		}
399 		break;
400 	default:
401 		break;
402 	}
403 
404 	for (i = 0; i < wowlan->n_patterns; i++) {
405 		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
406 		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
407 		struct cfg80211_pkt_pattern new_pattern = {};
408 
409 		new_pattern.pattern = ath_pattern;
410 		new_pattern.mask = ath_bitmask;
411 		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
412 			continue;
413 
414 		if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
415 		    ATH11K_HW_TXRX_NATIVE_WIFI) {
416 			if (patterns[i].pkt_offset < ETH_HLEN) {
417 				ath11k_wow_convert_8023_to_80211(&new_pattern,
418 								 &patterns[i]);
419 			} else {
420 				int j;
421 
422 				new_pattern = patterns[i];
423 				new_pattern.mask = ath_bitmask;
424 
425 				/* convert bitmask to bytemask */
426 				for (j = 0; j < patterns[i].pattern_len; j++)
427 					if (patterns[i].mask[j / 8] & BIT(j % 8))
428 						ath_bitmask[j] = 0xff;
429 
430 				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
431 			}
432 		}
433 
434 		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
435 			return -EINVAL;
436 
437 		ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
438 						 pattern_id,
439 						 new_pattern.pattern,
440 						 new_pattern.mask,
441 						 new_pattern.pattern_len,
442 						 new_pattern.pkt_offset);
443 		if (ret) {
444 			ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
445 				    pattern_id,
446 				    arvif->vdev_id, ret);
447 			return ret;
448 		}
449 
450 		pattern_id++;
451 		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
452 	}
453 
454 	for (i = 0; i < WOW_EVENT_MAX; i++) {
455 		if (!test_bit(i, &wow_mask))
456 			continue;
457 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
458 		if (ret) {
459 			ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
460 				    wow_wakeup_event(i), arvif->vdev_id, ret);
461 			return ret;
462 		}
463 	}
464 
465 	return 0;
466 }
467 
468 static int ath11k_wow_set_wakeups(struct ath11k *ar,
469 				  struct cfg80211_wowlan *wowlan)
470 {
471 	struct ath11k_vif *arvif;
472 	int ret;
473 
474 	lockdep_assert_held(&ar->conf_mutex);
475 
476 	list_for_each_entry(arvif, &ar->arvifs, list) {
477 		ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
478 		if (ret) {
479 			ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
480 				    arvif->vdev_id, ret);
481 			return ret;
482 		}
483 	}
484 
485 	return 0;
486 }
487 
488 static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
489 {
490 	int ret = 0;
491 	struct ath11k *ar = arvif->ar;
492 
493 	switch (arvif->vdev_type) {
494 	case WMI_VDEV_TYPE_STA:
495 		if (ar->nlo_enabled) {
496 			struct wmi_pno_scan_req *pno;
497 
498 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
499 			if (!pno)
500 				return -ENOMEM;
501 
502 			pno->enable = 0;
503 			ar->nlo_enabled = false;
504 			ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
505 			kfree(pno);
506 		}
507 		break;
508 	default:
509 		break;
510 	}
511 	return ret;
512 }
513 
514 static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
515 {
516 	struct ath11k_vif *arvif;
517 	int ret;
518 
519 	lockdep_assert_held(&ar->conf_mutex);
520 
521 	list_for_each_entry(arvif, &ar->arvifs, list) {
522 		ret = ath11k_vif_wow_clean_nlo(arvif);
523 		if (ret) {
524 			ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
525 				    arvif->vdev_id, ret);
526 			return ret;
527 		}
528 	}
529 
530 	return 0;
531 }
532 
533 static int ath11k_wow_set_hw_filter(struct ath11k *ar)
534 {
535 	struct ath11k_vif *arvif;
536 	u32 bitmap;
537 	int ret;
538 
539 	lockdep_assert_held(&ar->conf_mutex);
540 
541 	list_for_each_entry(arvif, &ar->arvifs, list) {
542 		bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
543 			WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
544 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
545 						    bitmap,
546 						    true);
547 		if (ret) {
548 			ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
549 				    arvif->vdev_id, ret);
550 			return ret;
551 		}
552 	}
553 
554 	return 0;
555 }
556 
557 static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
558 {
559 	struct ath11k_vif *arvif;
560 	int ret;
561 
562 	lockdep_assert_held(&ar->conf_mutex);
563 
564 	list_for_each_entry(arvif, &ar->arvifs, list) {
565 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
566 
567 		if (ret) {
568 			ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
569 				    arvif->vdev_id, ret);
570 			return ret;
571 		}
572 	}
573 
574 	return 0;
575 }
576 
577 static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
578 {
579 	struct ath11k_vif *arvif;
580 	int ret;
581 
582 	lockdep_assert_held(&ar->conf_mutex);
583 
584 	list_for_each_entry(arvif, &ar->arvifs, list) {
585 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
586 			continue;
587 
588 		ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
589 
590 		if (ret) {
591 			ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
592 				    arvif->vdev_id, enable, ret);
593 			return ret;
594 		}
595 	}
596 
597 	return 0;
598 }
599 
600 static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
601 {
602 	struct ath11k_vif *arvif;
603 	int ret;
604 
605 	lockdep_assert_held(&ar->conf_mutex);
606 
607 	list_for_each_entry(arvif, &ar->arvifs, list) {
608 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
609 		    !arvif->is_up ||
610 		    !arvif->rekey_data.enable_offload)
611 			continue;
612 
613 		/* get rekey info before disable rekey offload */
614 		if (!enable) {
615 			ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
616 			if (ret) {
617 				ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
618 					    arvif->vdev_id, ret);
619 				return ret;
620 			}
621 		}
622 
623 		ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
624 
625 		if (ret) {
626 			ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
627 				    arvif->vdev_id, enable, ret);
628 			return ret;
629 		}
630 	}
631 
632 	return 0;
633 }
634 
635 static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
636 {
637 	int ret;
638 
639 	ret = ath11k_wow_arp_ns_offload(ar, enable);
640 	if (ret) {
641 		ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
642 			    enable, ret);
643 		return ret;
644 	}
645 
646 	ret = ath11k_gtk_rekey_offload(ar, enable);
647 	if (ret) {
648 		ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
649 			    enable, ret);
650 		return ret;
651 	}
652 
653 	return 0;
654 }
655 
656 static int ath11k_wow_set_keepalive(struct ath11k *ar,
657 				    enum wmi_sta_keepalive_method method,
658 				    u32 interval)
659 {
660 	struct ath11k_vif *arvif;
661 	int ret;
662 
663 	lockdep_assert_held(&ar->conf_mutex);
664 
665 	list_for_each_entry(arvif, &ar->arvifs, list) {
666 		ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
667 		if (ret)
668 			return ret;
669 	}
670 
671 	return 0;
672 }
673 
674 int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
675 			  struct cfg80211_wowlan *wowlan)
676 {
677 	struct ath11k *ar = hw->priv;
678 	int ret;
679 
680 	ret = ath11k_mac_wait_tx_complete(ar);
681 	if (ret) {
682 		ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
683 		return ret;
684 	}
685 
686 	mutex_lock(&ar->conf_mutex);
687 
688 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
689 	if (ret) {
690 		ath11k_warn(ar->ab,
691 			    "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
692 			    ret);
693 		goto exit;
694 	}
695 
696 	ret =  ath11k_wow_cleanup(ar);
697 	if (ret) {
698 		ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
699 			    ret);
700 		goto exit;
701 	}
702 
703 	ret = ath11k_wow_set_wakeups(ar, wowlan);
704 	if (ret) {
705 		ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
706 			    ret);
707 		goto cleanup;
708 	}
709 
710 	ret = ath11k_wow_protocol_offload(ar, true);
711 	if (ret) {
712 		ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
713 			    ret);
714 		goto cleanup;
715 	}
716 
717 	ret = ath11k_wow_set_hw_filter(ar);
718 	if (ret) {
719 		ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
720 			    ret);
721 		goto cleanup;
722 	}
723 
724 	ret = ath11k_wow_set_keepalive(ar,
725 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
726 				       WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
727 	if (ret) {
728 		ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
729 		goto cleanup;
730 	}
731 
732 	ret = ath11k_wow_enable(ar->ab);
733 	if (ret) {
734 		ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
735 		goto cleanup;
736 	}
737 
738 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
739 	if (ret) {
740 		ath11k_warn(ar->ab,
741 			    "failed to stop dp rx pktlog during wow suspend: %d\n",
742 			    ret);
743 		goto cleanup;
744 	}
745 
746 	ath11k_ce_stop_shadow_timers(ar->ab);
747 	ath11k_dp_stop_shadow_timers(ar->ab);
748 
749 	ath11k_hif_irq_disable(ar->ab);
750 	ath11k_hif_ce_irq_disable(ar->ab);
751 
752 	ret = ath11k_hif_suspend(ar->ab);
753 	if (ret) {
754 		ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
755 		goto wakeup;
756 	}
757 
758 	goto exit;
759 
760 wakeup:
761 	ath11k_wow_wakeup(ar->ab);
762 
763 cleanup:
764 	ath11k_wow_cleanup(ar);
765 
766 exit:
767 	mutex_unlock(&ar->conf_mutex);
768 	return ret ? 1 : 0;
769 }
770 
771 void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
772 {
773 	struct ath11k *ar = hw->priv;
774 
775 	mutex_lock(&ar->conf_mutex);
776 	device_set_wakeup_enable(ar->ab->dev, enabled);
777 	mutex_unlock(&ar->conf_mutex);
778 }
779 
780 int ath11k_wow_op_resume(struct ieee80211_hw *hw)
781 {
782 	struct ath11k *ar = hw->priv;
783 	int ret;
784 
785 	mutex_lock(&ar->conf_mutex);
786 
787 	ret = ath11k_hif_resume(ar->ab);
788 	if (ret) {
789 		ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
790 		goto exit;
791 	}
792 
793 	ath11k_hif_ce_irq_enable(ar->ab);
794 	ath11k_hif_irq_enable(ar->ab);
795 
796 	ret = ath11k_dp_rx_pktlog_start(ar->ab);
797 	if (ret) {
798 		ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
799 		goto exit;
800 	}
801 
802 	ret = ath11k_wow_wakeup(ar->ab);
803 	if (ret) {
804 		ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
805 		goto exit;
806 	}
807 
808 	ret = ath11k_wow_nlo_cleanup(ar);
809 	if (ret) {
810 		ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
811 		goto exit;
812 	}
813 
814 	ret = ath11k_wow_clear_hw_filter(ar);
815 	if (ret) {
816 		ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
817 		goto exit;
818 	}
819 
820 	ret = ath11k_wow_protocol_offload(ar, false);
821 	if (ret) {
822 		ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
823 			    ret);
824 		goto exit;
825 	}
826 
827 	ret = ath11k_wow_set_keepalive(ar,
828 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
829 				       WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
830 	if (ret) {
831 		ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
832 		goto exit;
833 	}
834 
835 exit:
836 	if (ret) {
837 		switch (ar->state) {
838 		case ATH11K_STATE_ON:
839 			ar->state = ATH11K_STATE_RESTARTING;
840 			ret = 1;
841 			break;
842 		case ATH11K_STATE_OFF:
843 		case ATH11K_STATE_RESTARTING:
844 		case ATH11K_STATE_RESTARTED:
845 		case ATH11K_STATE_WEDGED:
846 		case ATH11K_STATE_FTM:
847 			ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
848 				    ar->state);
849 			ret = -EIO;
850 			break;
851 		}
852 	}
853 
854 	mutex_unlock(&ar->conf_mutex);
855 	return ret;
856 }
857 
858 int ath11k_wow_init(struct ath11k *ar)
859 {
860 	if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
861 		return 0;
862 
863 	ar->wow.wowlan_support = ath11k_wowlan_support;
864 
865 	if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
866 	    ATH11K_HW_TXRX_NATIVE_WIFI) {
867 		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
868 		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
869 	}
870 
871 	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
872 		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
873 		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
874 	}
875 
876 	ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
877 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
878 	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
879 
880 	device_set_wakeup_capable(ar->ab->dev, true);
881 
882 	return 0;
883 }
884