xref: /freebsd/sys/contrib/dev/athk/ath12k/wow.c (revision a96550206e4bde15bf615ff2127b80404a7ec41f)
1*a9655020SBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2*a9655020SBjoern A. Zeeb /*
3*a9655020SBjoern A. Zeeb  * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4*a9655020SBjoern A. Zeeb  * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5*a9655020SBjoern A. Zeeb  */
6*a9655020SBjoern A. Zeeb 
7*a9655020SBjoern A. Zeeb #include <linux/delay.h>
8*a9655020SBjoern A. Zeeb #include <linux/inetdevice.h>
9*a9655020SBjoern A. Zeeb #include <net/addrconf.h>
10*a9655020SBjoern A. Zeeb #include <net/if_inet6.h>
11*a9655020SBjoern A. Zeeb #include <net/ipv6.h>
12*a9655020SBjoern A. Zeeb 
13*a9655020SBjoern A. Zeeb #include "mac.h"
14*a9655020SBjoern A. Zeeb 
15*a9655020SBjoern A. Zeeb #include <net/mac80211.h>
16*a9655020SBjoern A. Zeeb #include "core.h"
17*a9655020SBjoern A. Zeeb #include "hif.h"
18*a9655020SBjoern A. Zeeb #include "debug.h"
19*a9655020SBjoern A. Zeeb #include "wmi.h"
20*a9655020SBjoern A. Zeeb #include "wow.h"
21*a9655020SBjoern A. Zeeb 
22*a9655020SBjoern A. Zeeb static const struct wiphy_wowlan_support ath12k_wowlan_support = {
23*a9655020SBjoern A. Zeeb 	.flags = WIPHY_WOWLAN_DISCONNECT |
24*a9655020SBjoern A. Zeeb 		 WIPHY_WOWLAN_MAGIC_PKT |
25*a9655020SBjoern A. Zeeb 		 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
26*a9655020SBjoern A. Zeeb 		 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
27*a9655020SBjoern A. Zeeb 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
28*a9655020SBjoern A. Zeeb 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
29*a9655020SBjoern A. Zeeb 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
30*a9655020SBjoern A. Zeeb };
31*a9655020SBjoern A. Zeeb 
ath12k_wow_is_p2p_vdev(struct ath12k_vif * ahvif)32*a9655020SBjoern A. Zeeb static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *ahvif)
33*a9655020SBjoern A. Zeeb {
34*a9655020SBjoern A. Zeeb 	return (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
35*a9655020SBjoern A. Zeeb 		ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
36*a9655020SBjoern A. Zeeb 		ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
37*a9655020SBjoern A. Zeeb }
38*a9655020SBjoern A. Zeeb 
ath12k_wow_enable(struct ath12k * ar)39*a9655020SBjoern A. Zeeb int ath12k_wow_enable(struct ath12k *ar)
40*a9655020SBjoern A. Zeeb {
41*a9655020SBjoern A. Zeeb 	struct ath12k_base *ab = ar->ab;
42*a9655020SBjoern A. Zeeb 	int i, ret;
43*a9655020SBjoern A. Zeeb 
44*a9655020SBjoern A. Zeeb 	clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
45*a9655020SBjoern A. Zeeb 
46*a9655020SBjoern A. Zeeb 	/* The firmware might be busy and it can not enter WoW immediately.
47*a9655020SBjoern A. Zeeb 	 * In that case firmware notifies host with
48*a9655020SBjoern A. Zeeb 	 * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again
49*a9655020SBjoern A. Zeeb 	 * later. Per the firmware team there could be up to 10 loops.
50*a9655020SBjoern A. Zeeb 	 */
51*a9655020SBjoern A. Zeeb 	for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) {
52*a9655020SBjoern A. Zeeb 		reinit_completion(&ab->htc_suspend);
53*a9655020SBjoern A. Zeeb 
54*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_wow_enable(ar);
55*a9655020SBjoern A. Zeeb 		if (ret) {
56*a9655020SBjoern A. Zeeb 			ath12k_warn(ab, "failed to issue wow enable: %d\n", ret);
57*a9655020SBjoern A. Zeeb 			return ret;
58*a9655020SBjoern A. Zeeb 		}
59*a9655020SBjoern A. Zeeb 
60*a9655020SBjoern A. Zeeb 		ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
61*a9655020SBjoern A. Zeeb 		if (ret == 0) {
62*a9655020SBjoern A. Zeeb 			ath12k_warn(ab,
63*a9655020SBjoern A. Zeeb 				    "timed out while waiting for htc suspend completion\n");
64*a9655020SBjoern A. Zeeb 			return -ETIMEDOUT;
65*a9655020SBjoern A. Zeeb 		}
66*a9655020SBjoern A. Zeeb 
67*a9655020SBjoern A. Zeeb 		if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
68*a9655020SBjoern A. Zeeb 			/* success, suspend complete received */
69*a9655020SBjoern A. Zeeb 			return 0;
70*a9655020SBjoern A. Zeeb 
71*a9655020SBjoern A. Zeeb 		ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
72*a9655020SBjoern A. Zeeb 			    i);
73*a9655020SBjoern A. Zeeb 		msleep(ATH12K_WOW_RETRY_WAIT_MS);
74*a9655020SBjoern A. Zeeb 	}
75*a9655020SBjoern A. Zeeb 
76*a9655020SBjoern A. Zeeb 	ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
77*a9655020SBjoern A. Zeeb 
78*a9655020SBjoern A. Zeeb 	return -ETIMEDOUT;
79*a9655020SBjoern A. Zeeb }
80*a9655020SBjoern A. Zeeb 
ath12k_wow_wakeup(struct ath12k * ar)81*a9655020SBjoern A. Zeeb int ath12k_wow_wakeup(struct ath12k *ar)
82*a9655020SBjoern A. Zeeb {
83*a9655020SBjoern A. Zeeb 	struct ath12k_base *ab = ar->ab;
84*a9655020SBjoern A. Zeeb 	int ret;
85*a9655020SBjoern A. Zeeb 
86*a9655020SBjoern A. Zeeb 	reinit_completion(&ab->wow.wakeup_completed);
87*a9655020SBjoern A. Zeeb 
88*a9655020SBjoern A. Zeeb 	ret = ath12k_wmi_wow_host_wakeup_ind(ar);
89*a9655020SBjoern A. Zeeb 	if (ret) {
90*a9655020SBjoern A. Zeeb 		ath12k_warn(ab, "failed to send wow wakeup indication: %d\n",
91*a9655020SBjoern A. Zeeb 			    ret);
92*a9655020SBjoern A. Zeeb 		return ret;
93*a9655020SBjoern A. Zeeb 	}
94*a9655020SBjoern A. Zeeb 
95*a9655020SBjoern A. Zeeb 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
96*a9655020SBjoern A. Zeeb 	if (ret == 0) {
97*a9655020SBjoern A. Zeeb 		ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n");
98*a9655020SBjoern A. Zeeb 		return -ETIMEDOUT;
99*a9655020SBjoern A. Zeeb 	}
100*a9655020SBjoern A. Zeeb 
101*a9655020SBjoern A. Zeeb 	return 0;
102*a9655020SBjoern A. Zeeb }
103*a9655020SBjoern A. Zeeb 
ath12k_wow_vif_cleanup(struct ath12k_link_vif * arvif)104*a9655020SBjoern A. Zeeb static int ath12k_wow_vif_cleanup(struct ath12k_link_vif *arvif)
105*a9655020SBjoern A. Zeeb {
106*a9655020SBjoern A. Zeeb 	struct ath12k *ar = arvif->ar;
107*a9655020SBjoern A. Zeeb 	int i, ret;
108*a9655020SBjoern A. Zeeb 
109*a9655020SBjoern A. Zeeb 	for (i = 0; i < WOW_EVENT_MAX; i++) {
110*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
111*a9655020SBjoern A. Zeeb 		if (ret) {
112*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
113*a9655020SBjoern A. Zeeb 				    wow_wakeup_event(i), arvif->vdev_id, ret);
114*a9655020SBjoern A. Zeeb 			return ret;
115*a9655020SBjoern A. Zeeb 		}
116*a9655020SBjoern A. Zeeb 	}
117*a9655020SBjoern A. Zeeb 
118*a9655020SBjoern A. Zeeb 	for (i = 0; i < ar->wow.max_num_patterns; i++) {
119*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
120*a9655020SBjoern A. Zeeb 		if (ret) {
121*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
122*a9655020SBjoern A. Zeeb 				    i, arvif->vdev_id, ret);
123*a9655020SBjoern A. Zeeb 			return ret;
124*a9655020SBjoern A. Zeeb 		}
125*a9655020SBjoern A. Zeeb 	}
126*a9655020SBjoern A. Zeeb 
127*a9655020SBjoern A. Zeeb 	return 0;
128*a9655020SBjoern A. Zeeb }
129*a9655020SBjoern A. Zeeb 
ath12k_wow_cleanup(struct ath12k * ar)130*a9655020SBjoern A. Zeeb static int ath12k_wow_cleanup(struct ath12k *ar)
131*a9655020SBjoern A. Zeeb {
132*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
133*a9655020SBjoern A. Zeeb 	int ret;
134*a9655020SBjoern A. Zeeb 
135*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
136*a9655020SBjoern A. Zeeb 
137*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
138*a9655020SBjoern A. Zeeb 		ret = ath12k_wow_vif_cleanup(arvif);
139*a9655020SBjoern A. Zeeb 		if (ret) {
140*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
141*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, ret);
142*a9655020SBjoern A. Zeeb 			return ret;
143*a9655020SBjoern A. Zeeb 		}
144*a9655020SBjoern A. Zeeb 	}
145*a9655020SBjoern A. Zeeb 
146*a9655020SBjoern A. Zeeb 	return 0;
147*a9655020SBjoern A. Zeeb }
148*a9655020SBjoern A. Zeeb 
149*a9655020SBjoern A. Zeeb /* Convert a 802.3 format to a 802.11 format.
150*a9655020SBjoern A. Zeeb  *         +------------+-----------+--------+----------------+
151*a9655020SBjoern A. Zeeb  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
152*a9655020SBjoern A. Zeeb  *         +------------+-----------+--------+----------------+
153*a9655020SBjoern A. Zeeb  *                |__         |_______    |____________  |________
154*a9655020SBjoern A. Zeeb  *                   |                |                |          |
155*a9655020SBjoern A. Zeeb  *         +--+------------+----+-----------+---------------+-----------+
156*a9655020SBjoern A. Zeeb  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
157*a9655020SBjoern A. Zeeb  *         +--+------------+----+-----------+---------------+-----------+
158*a9655020SBjoern A. Zeeb  */
159*a9655020SBjoern A. Zeeb static void
ath12k_wow_convert_8023_to_80211(struct ath12k * ar,const struct cfg80211_pkt_pattern * eth_pattern,struct ath12k_pkt_pattern * i80211_pattern)160*a9655020SBjoern A. Zeeb ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
161*a9655020SBjoern A. Zeeb 				 const struct cfg80211_pkt_pattern *eth_pattern,
162*a9655020SBjoern A. Zeeb 				 struct ath12k_pkt_pattern *i80211_pattern)
163*a9655020SBjoern A. Zeeb {
164*a9655020SBjoern A. Zeeb 	size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type);
165*a9655020SBjoern A. Zeeb 	size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1);
166*a9655020SBjoern A. Zeeb 	size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3);
167*a9655020SBjoern A. Zeeb 	size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr);
168*a9655020SBjoern A. Zeeb 	size_t prot_ofs = offsetof(struct ethhdr, h_proto);
169*a9655020SBjoern A. Zeeb 	size_t src_ofs = offsetof(struct ethhdr, h_source);
170*a9655020SBjoern A. Zeeb 	u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {};
171*a9655020SBjoern A. Zeeb 	const u8 *eth_pat = eth_pattern->pattern;
172*a9655020SBjoern A. Zeeb 	size_t eth_pat_len = eth_pattern->pattern_len;
173*a9655020SBjoern A. Zeeb 	size_t eth_pkt_ofs = eth_pattern->pkt_offset;
174*a9655020SBjoern A. Zeeb 	u8 *bytemask = i80211_pattern->bytemask;
175*a9655020SBjoern A. Zeeb 	u8 *pat = i80211_pattern->pattern;
176*a9655020SBjoern A. Zeeb 	size_t pat_len = 0;
177*a9655020SBjoern A. Zeeb 	size_t pkt_ofs = 0;
178*a9655020SBjoern A. Zeeb 	size_t delta;
179*a9655020SBjoern A. Zeeb 	int i;
180*a9655020SBjoern A. Zeeb 
181*a9655020SBjoern A. Zeeb 	/* convert bitmask to bytemask */
182*a9655020SBjoern A. Zeeb 	for (i = 0; i < eth_pat_len; i++)
183*a9655020SBjoern A. Zeeb 		if (eth_pattern->mask[i / 8] & BIT(i % 8))
184*a9655020SBjoern A. Zeeb 			eth_bytemask[i] = 0xff;
185*a9655020SBjoern A. Zeeb 
186*a9655020SBjoern A. Zeeb 	if (eth_pkt_ofs < ETH_ALEN) {
187*a9655020SBjoern A. Zeeb 		pkt_ofs = eth_pkt_ofs + a1_ofs;
188*a9655020SBjoern A. Zeeb 
189*a9655020SBjoern A. Zeeb 		if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) {
190*a9655020SBjoern A. Zeeb 			memcpy(pat, eth_pat, eth_pat_len);
191*a9655020SBjoern A. Zeeb 			memcpy(bytemask, eth_bytemask, eth_pat_len);
192*a9655020SBjoern A. Zeeb 
193*a9655020SBjoern A. Zeeb 			pat_len = eth_pat_len;
194*a9655020SBjoern A. Zeeb 		} else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
195*a9655020SBjoern A. Zeeb 			memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
196*a9655020SBjoern A. Zeeb 			memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
197*a9655020SBjoern A. Zeeb 
198*a9655020SBjoern A. Zeeb 			delta = eth_pkt_ofs + eth_pat_len - src_ofs;
199*a9655020SBjoern A. Zeeb 			memcpy(pat + a3_ofs - pkt_ofs,
200*a9655020SBjoern A. Zeeb 			       eth_pat + ETH_ALEN - eth_pkt_ofs,
201*a9655020SBjoern A. Zeeb 			       delta);
202*a9655020SBjoern A. Zeeb 			memcpy(bytemask + a3_ofs - pkt_ofs,
203*a9655020SBjoern A. Zeeb 			       eth_bytemask + ETH_ALEN - eth_pkt_ofs,
204*a9655020SBjoern A. Zeeb 			       delta);
205*a9655020SBjoern A. Zeeb 
206*a9655020SBjoern A. Zeeb 			pat_len = a3_ofs - pkt_ofs + delta;
207*a9655020SBjoern A. Zeeb 		} else {
208*a9655020SBjoern A. Zeeb 			memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
209*a9655020SBjoern A. Zeeb 			memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
210*a9655020SBjoern A. Zeeb 
211*a9655020SBjoern A. Zeeb 			memcpy(pat + a3_ofs - pkt_ofs,
212*a9655020SBjoern A. Zeeb 			       eth_pat + ETH_ALEN - eth_pkt_ofs,
213*a9655020SBjoern A. Zeeb 			       ETH_ALEN);
214*a9655020SBjoern A. Zeeb 			memcpy(bytemask + a3_ofs - pkt_ofs,
215*a9655020SBjoern A. Zeeb 			       eth_bytemask + ETH_ALEN - eth_pkt_ofs,
216*a9655020SBjoern A. Zeeb 			       ETH_ALEN);
217*a9655020SBjoern A. Zeeb 
218*a9655020SBjoern A. Zeeb 			delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
219*a9655020SBjoern A. Zeeb 			memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
220*a9655020SBjoern A. Zeeb 			       eth_pat + prot_ofs - eth_pkt_ofs,
221*a9655020SBjoern A. Zeeb 			       delta);
222*a9655020SBjoern A. Zeeb 			memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
223*a9655020SBjoern A. Zeeb 			       eth_bytemask + prot_ofs - eth_pkt_ofs,
224*a9655020SBjoern A. Zeeb 			       delta);
225*a9655020SBjoern A. Zeeb 
226*a9655020SBjoern A. Zeeb 			pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
227*a9655020SBjoern A. Zeeb 		}
228*a9655020SBjoern A. Zeeb 	} else if (eth_pkt_ofs < prot_ofs) {
229*a9655020SBjoern A. Zeeb 		pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs;
230*a9655020SBjoern A. Zeeb 
231*a9655020SBjoern A. Zeeb 		if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
232*a9655020SBjoern A. Zeeb 			memcpy(pat, eth_pat, eth_pat_len);
233*a9655020SBjoern A. Zeeb 			memcpy(bytemask, eth_bytemask, eth_pat_len);
234*a9655020SBjoern A. Zeeb 
235*a9655020SBjoern A. Zeeb 			pat_len = eth_pat_len;
236*a9655020SBjoern A. Zeeb 		} else {
237*a9655020SBjoern A. Zeeb 			memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs);
238*a9655020SBjoern A. Zeeb 			memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs);
239*a9655020SBjoern A. Zeeb 
240*a9655020SBjoern A. Zeeb 			delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
241*a9655020SBjoern A. Zeeb 			memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
242*a9655020SBjoern A. Zeeb 			       eth_pat +  prot_ofs - eth_pkt_ofs,
243*a9655020SBjoern A. Zeeb 			       delta);
244*a9655020SBjoern A. Zeeb 			memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
245*a9655020SBjoern A. Zeeb 			       eth_bytemask + prot_ofs - eth_pkt_ofs,
246*a9655020SBjoern A. Zeeb 			       delta);
247*a9655020SBjoern A. Zeeb 
248*a9655020SBjoern A. Zeeb 			pat_len =  i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
249*a9655020SBjoern A. Zeeb 		}
250*a9655020SBjoern A. Zeeb 	} else {
251*a9655020SBjoern A. Zeeb 		pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs;
252*a9655020SBjoern A. Zeeb 
253*a9655020SBjoern A. Zeeb 		memcpy(pat, eth_pat, eth_pat_len);
254*a9655020SBjoern A. Zeeb 		memcpy(bytemask, eth_bytemask, eth_pat_len);
255*a9655020SBjoern A. Zeeb 
256*a9655020SBjoern A. Zeeb 		pat_len = eth_pat_len;
257*a9655020SBjoern A. Zeeb 	}
258*a9655020SBjoern A. Zeeb 
259*a9655020SBjoern A. Zeeb 	i80211_pattern->pattern_len = pat_len;
260*a9655020SBjoern A. Zeeb 	i80211_pattern->pkt_offset = pkt_ofs;
261*a9655020SBjoern A. Zeeb }
262*a9655020SBjoern A. Zeeb 
263*a9655020SBjoern A. Zeeb static int
ath12k_wow_pno_check_and_convert(struct ath12k * ar,u32 vdev_id,const struct cfg80211_sched_scan_request * nd_config,struct wmi_pno_scan_req_arg * pno)264*a9655020SBjoern A. Zeeb ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id,
265*a9655020SBjoern A. Zeeb 				 const struct cfg80211_sched_scan_request *nd_config,
266*a9655020SBjoern A. Zeeb 				 struct wmi_pno_scan_req_arg *pno)
267*a9655020SBjoern A. Zeeb {
268*a9655020SBjoern A. Zeeb 	int i, j;
269*a9655020SBjoern A. Zeeb 	u8 ssid_len;
270*a9655020SBjoern A. Zeeb 
271*a9655020SBjoern A. Zeeb 	pno->enable = 1;
272*a9655020SBjoern A. Zeeb 	pno->vdev_id = vdev_id;
273*a9655020SBjoern A. Zeeb 	pno->uc_networks_count = nd_config->n_match_sets;
274*a9655020SBjoern A. Zeeb 
275*a9655020SBjoern A. Zeeb 	if (!pno->uc_networks_count ||
276*a9655020SBjoern A. Zeeb 	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
277*a9655020SBjoern A. Zeeb 		return -EINVAL;
278*a9655020SBjoern A. Zeeb 
279*a9655020SBjoern A. Zeeb 	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
280*a9655020SBjoern A. Zeeb 		return -EINVAL;
281*a9655020SBjoern A. Zeeb 
282*a9655020SBjoern A. Zeeb 	/* Filling per profile params */
283*a9655020SBjoern A. Zeeb 	for (i = 0; i < pno->uc_networks_count; i++) {
284*a9655020SBjoern A. Zeeb 		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
285*a9655020SBjoern A. Zeeb 
286*a9655020SBjoern A. Zeeb 		if (ssid_len == 0 || ssid_len > 32)
287*a9655020SBjoern A. Zeeb 			return -EINVAL;
288*a9655020SBjoern A. Zeeb 
289*a9655020SBjoern A. Zeeb 		pno->a_networks[i].ssid.ssid_len = ssid_len;
290*a9655020SBjoern A. Zeeb 
291*a9655020SBjoern A. Zeeb 		memcpy(pno->a_networks[i].ssid.ssid,
292*a9655020SBjoern A. Zeeb 		       nd_config->match_sets[i].ssid.ssid,
293*a9655020SBjoern A. Zeeb 		       ssid_len);
294*a9655020SBjoern A. Zeeb 		pno->a_networks[i].authentication = 0;
295*a9655020SBjoern A. Zeeb 		pno->a_networks[i].encryption     = 0;
296*a9655020SBjoern A. Zeeb 		pno->a_networks[i].bcast_nw_type  = 0;
297*a9655020SBjoern A. Zeeb 
298*a9655020SBjoern A. Zeeb 		/* Copying list of valid channel into request */
299*a9655020SBjoern A. Zeeb 		pno->a_networks[i].channel_count = nd_config->n_channels;
300*a9655020SBjoern A. Zeeb 		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
301*a9655020SBjoern A. Zeeb 
302*a9655020SBjoern A. Zeeb 		for (j = 0; j < nd_config->n_channels; j++) {
303*a9655020SBjoern A. Zeeb 			pno->a_networks[i].channels[j] =
304*a9655020SBjoern A. Zeeb 					nd_config->channels[j]->center_freq;
305*a9655020SBjoern A. Zeeb 		}
306*a9655020SBjoern A. Zeeb 	}
307*a9655020SBjoern A. Zeeb 
308*a9655020SBjoern A. Zeeb 	/* set scan to passive if no SSIDs are specified in the request */
309*a9655020SBjoern A. Zeeb 	if (nd_config->n_ssids == 0)
310*a9655020SBjoern A. Zeeb 		pno->do_passive_scan = true;
311*a9655020SBjoern A. Zeeb 	else
312*a9655020SBjoern A. Zeeb 		pno->do_passive_scan = false;
313*a9655020SBjoern A. Zeeb 
314*a9655020SBjoern A. Zeeb 	for (i = 0; i < nd_config->n_ssids; i++) {
315*a9655020SBjoern A. Zeeb 		for (j = 0; j < pno->uc_networks_count; j++) {
316*a9655020SBjoern A. Zeeb 			if (pno->a_networks[j].ssid.ssid_len ==
317*a9655020SBjoern A. Zeeb 				nd_config->ssids[i].ssid_len &&
318*a9655020SBjoern A. Zeeb 			    !memcmp(pno->a_networks[j].ssid.ssid,
319*a9655020SBjoern A. Zeeb 				    nd_config->ssids[i].ssid,
320*a9655020SBjoern A. Zeeb 				    pno->a_networks[j].ssid.ssid_len)) {
321*a9655020SBjoern A. Zeeb 				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
322*a9655020SBjoern A. Zeeb 				break;
323*a9655020SBjoern A. Zeeb 			}
324*a9655020SBjoern A. Zeeb 		}
325*a9655020SBjoern A. Zeeb 	}
326*a9655020SBjoern A. Zeeb 
327*a9655020SBjoern A. Zeeb 	if (nd_config->n_scan_plans == 2) {
328*a9655020SBjoern A. Zeeb 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
329*a9655020SBjoern A. Zeeb 		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
330*a9655020SBjoern A. Zeeb 		pno->slow_scan_period =
331*a9655020SBjoern A. Zeeb 			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
332*a9655020SBjoern A. Zeeb 	} else if (nd_config->n_scan_plans == 1) {
333*a9655020SBjoern A. Zeeb 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
334*a9655020SBjoern A. Zeeb 		pno->fast_scan_max_cycles = 1;
335*a9655020SBjoern A. Zeeb 		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
336*a9655020SBjoern A. Zeeb 	} else {
337*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d",
338*a9655020SBjoern A. Zeeb 			    nd_config->n_scan_plans);
339*a9655020SBjoern A. Zeeb 	}
340*a9655020SBjoern A. Zeeb 
341*a9655020SBjoern A. Zeeb 	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
342*a9655020SBjoern A. Zeeb 		/* enable mac randomization */
343*a9655020SBjoern A. Zeeb 		pno->enable_pno_scan_randomization = 1;
344*a9655020SBjoern A. Zeeb 		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
345*a9655020SBjoern A. Zeeb 		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
346*a9655020SBjoern A. Zeeb 	}
347*a9655020SBjoern A. Zeeb 
348*a9655020SBjoern A. Zeeb 	pno->delay_start_time = nd_config->delay;
349*a9655020SBjoern A. Zeeb 
350*a9655020SBjoern A. Zeeb 	/* Current FW does not support min-max range for dwell time */
351*a9655020SBjoern A. Zeeb 	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
352*a9655020SBjoern A. Zeeb 	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
353*a9655020SBjoern A. Zeeb 
354*a9655020SBjoern A. Zeeb 	return 0;
355*a9655020SBjoern A. Zeeb }
356*a9655020SBjoern A. Zeeb 
ath12k_wow_vif_set_wakeups(struct ath12k_link_vif * arvif,struct cfg80211_wowlan * wowlan)357*a9655020SBjoern A. Zeeb static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif *arvif,
358*a9655020SBjoern A. Zeeb 				      struct cfg80211_wowlan *wowlan)
359*a9655020SBjoern A. Zeeb {
360*a9655020SBjoern A. Zeeb 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
361*a9655020SBjoern A. Zeeb 	struct ath12k *ar = arvif->ar;
362*a9655020SBjoern A. Zeeb 	unsigned long wow_mask = 0;
363*a9655020SBjoern A. Zeeb 	int pattern_id = 0;
364*a9655020SBjoern A. Zeeb 	int ret, i, j;
365*a9655020SBjoern A. Zeeb 
366*a9655020SBjoern A. Zeeb 	/* Setup requested WOW features */
367*a9655020SBjoern A. Zeeb 	switch (arvif->ahvif->vdev_type) {
368*a9655020SBjoern A. Zeeb 	case WMI_VDEV_TYPE_IBSS:
369*a9655020SBjoern A. Zeeb 		__set_bit(WOW_BEACON_EVENT, &wow_mask);
370*a9655020SBjoern A. Zeeb 		fallthrough;
371*a9655020SBjoern A. Zeeb 	case WMI_VDEV_TYPE_AP:
372*a9655020SBjoern A. Zeeb 		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
373*a9655020SBjoern A. Zeeb 		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
374*a9655020SBjoern A. Zeeb 		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
375*a9655020SBjoern A. Zeeb 		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
376*a9655020SBjoern A. Zeeb 		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
377*a9655020SBjoern A. Zeeb 		__set_bit(WOW_HTT_EVENT, &wow_mask);
378*a9655020SBjoern A. Zeeb 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
379*a9655020SBjoern A. Zeeb 		break;
380*a9655020SBjoern A. Zeeb 	case WMI_VDEV_TYPE_STA:
381*a9655020SBjoern A. Zeeb 		if (wowlan->disconnect) {
382*a9655020SBjoern A. Zeeb 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
383*a9655020SBjoern A. Zeeb 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
384*a9655020SBjoern A. Zeeb 			__set_bit(WOW_BMISS_EVENT, &wow_mask);
385*a9655020SBjoern A. Zeeb 			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
386*a9655020SBjoern A. Zeeb 		}
387*a9655020SBjoern A. Zeeb 
388*a9655020SBjoern A. Zeeb 		if (wowlan->magic_pkt)
389*a9655020SBjoern A. Zeeb 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
390*a9655020SBjoern A. Zeeb 
391*a9655020SBjoern A. Zeeb 		if (wowlan->nd_config) {
392*a9655020SBjoern A. Zeeb 			struct wmi_pno_scan_req_arg *pno;
393*a9655020SBjoern A. Zeeb 			int ret;
394*a9655020SBjoern A. Zeeb 
395*a9655020SBjoern A. Zeeb 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
396*a9655020SBjoern A. Zeeb 			if (!pno)
397*a9655020SBjoern A. Zeeb 				return -ENOMEM;
398*a9655020SBjoern A. Zeeb 
399*a9655020SBjoern A. Zeeb 			ar->nlo_enabled = true;
400*a9655020SBjoern A. Zeeb 
401*a9655020SBjoern A. Zeeb 			ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id,
402*a9655020SBjoern A. Zeeb 							       wowlan->nd_config, pno);
403*a9655020SBjoern A. Zeeb 			if (!ret) {
404*a9655020SBjoern A. Zeeb 				ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
405*a9655020SBjoern A. Zeeb 				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
406*a9655020SBjoern A. Zeeb 			}
407*a9655020SBjoern A. Zeeb 
408*a9655020SBjoern A. Zeeb 			kfree(pno);
409*a9655020SBjoern A. Zeeb 		}
410*a9655020SBjoern A. Zeeb 		break;
411*a9655020SBjoern A. Zeeb 	default:
412*a9655020SBjoern A. Zeeb 		break;
413*a9655020SBjoern A. Zeeb 	}
414*a9655020SBjoern A. Zeeb 
415*a9655020SBjoern A. Zeeb 	for (i = 0; i < wowlan->n_patterns; i++) {
416*a9655020SBjoern A. Zeeb 		const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i];
417*a9655020SBjoern A. Zeeb 		struct ath12k_pkt_pattern new_pattern = {};
418*a9655020SBjoern A. Zeeb 
419*a9655020SBjoern A. Zeeb 		if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE))
420*a9655020SBjoern A. Zeeb 			return -EINVAL;
421*a9655020SBjoern A. Zeeb 
422*a9655020SBjoern A. Zeeb 		if (ar->ab->wow.wmi_conf_rx_decap_mode ==
423*a9655020SBjoern A. Zeeb 		    ATH12K_HW_TXRX_NATIVE_WIFI) {
424*a9655020SBjoern A. Zeeb 			ath12k_wow_convert_8023_to_80211(ar, eth_pattern,
425*a9655020SBjoern A. Zeeb 							 &new_pattern);
426*a9655020SBjoern A. Zeeb 
427*a9655020SBjoern A. Zeeb 			if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
428*a9655020SBjoern A. Zeeb 				return -EINVAL;
429*a9655020SBjoern A. Zeeb 		} else {
430*a9655020SBjoern A. Zeeb 			memcpy(new_pattern.pattern, eth_pattern->pattern,
431*a9655020SBjoern A. Zeeb 			       eth_pattern->pattern_len);
432*a9655020SBjoern A. Zeeb 
433*a9655020SBjoern A. Zeeb 			/* convert bitmask to bytemask */
434*a9655020SBjoern A. Zeeb 			for (j = 0; j < eth_pattern->pattern_len; j++)
435*a9655020SBjoern A. Zeeb 				if (eth_pattern->mask[j / 8] & BIT(j % 8))
436*a9655020SBjoern A. Zeeb 					new_pattern.bytemask[j] = 0xff;
437*a9655020SBjoern A. Zeeb 
438*a9655020SBjoern A. Zeeb 			new_pattern.pattern_len = eth_pattern->pattern_len;
439*a9655020SBjoern A. Zeeb 			new_pattern.pkt_offset = eth_pattern->pkt_offset;
440*a9655020SBjoern A. Zeeb 		}
441*a9655020SBjoern A. Zeeb 
442*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id,
443*a9655020SBjoern A. Zeeb 						 pattern_id,
444*a9655020SBjoern A. Zeeb 						 new_pattern.pattern,
445*a9655020SBjoern A. Zeeb 						 new_pattern.bytemask,
446*a9655020SBjoern A. Zeeb 						 new_pattern.pattern_len,
447*a9655020SBjoern A. Zeeb 						 new_pattern.pkt_offset);
448*a9655020SBjoern A. Zeeb 		if (ret) {
449*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
450*a9655020SBjoern A. Zeeb 				    pattern_id,
451*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, ret);
452*a9655020SBjoern A. Zeeb 			return ret;
453*a9655020SBjoern A. Zeeb 		}
454*a9655020SBjoern A. Zeeb 
455*a9655020SBjoern A. Zeeb 		pattern_id++;
456*a9655020SBjoern A. Zeeb 		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
457*a9655020SBjoern A. Zeeb 	}
458*a9655020SBjoern A. Zeeb 
459*a9655020SBjoern A. Zeeb 	for (i = 0; i < WOW_EVENT_MAX; i++) {
460*a9655020SBjoern A. Zeeb 		if (!test_bit(i, &wow_mask))
461*a9655020SBjoern A. Zeeb 			continue;
462*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
463*a9655020SBjoern A. Zeeb 		if (ret) {
464*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
465*a9655020SBjoern A. Zeeb 				    wow_wakeup_event(i), arvif->vdev_id, ret);
466*a9655020SBjoern A. Zeeb 			return ret;
467*a9655020SBjoern A. Zeeb 		}
468*a9655020SBjoern A. Zeeb 	}
469*a9655020SBjoern A. Zeeb 
470*a9655020SBjoern A. Zeeb 	return 0;
471*a9655020SBjoern A. Zeeb }
472*a9655020SBjoern A. Zeeb 
ath12k_wow_set_wakeups(struct ath12k * ar,struct cfg80211_wowlan * wowlan)473*a9655020SBjoern A. Zeeb static int ath12k_wow_set_wakeups(struct ath12k *ar,
474*a9655020SBjoern A. Zeeb 				  struct cfg80211_wowlan *wowlan)
475*a9655020SBjoern A. Zeeb {
476*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
477*a9655020SBjoern A. Zeeb 	int ret;
478*a9655020SBjoern A. Zeeb 
479*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
480*a9655020SBjoern A. Zeeb 
481*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
482*a9655020SBjoern A. Zeeb 		if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
483*a9655020SBjoern A. Zeeb 			continue;
484*a9655020SBjoern A. Zeeb 		ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
485*a9655020SBjoern A. Zeeb 		if (ret) {
486*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
487*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, ret);
488*a9655020SBjoern A. Zeeb 			return ret;
489*a9655020SBjoern A. Zeeb 		}
490*a9655020SBjoern A. Zeeb 	}
491*a9655020SBjoern A. Zeeb 
492*a9655020SBjoern A. Zeeb 	return 0;
493*a9655020SBjoern A. Zeeb }
494*a9655020SBjoern A. Zeeb 
ath12k_wow_vdev_clean_nlo(struct ath12k * ar,u32 vdev_id)495*a9655020SBjoern A. Zeeb static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id)
496*a9655020SBjoern A. Zeeb {
497*a9655020SBjoern A. Zeeb 	struct wmi_pno_scan_req_arg *pno;
498*a9655020SBjoern A. Zeeb 	int ret;
499*a9655020SBjoern A. Zeeb 
500*a9655020SBjoern A. Zeeb 	if (!ar->nlo_enabled)
501*a9655020SBjoern A. Zeeb 		return 0;
502*a9655020SBjoern A. Zeeb 
503*a9655020SBjoern A. Zeeb 	pno = kzalloc(sizeof(*pno), GFP_KERNEL);
504*a9655020SBjoern A. Zeeb 	if (!pno)
505*a9655020SBjoern A. Zeeb 		return -ENOMEM;
506*a9655020SBjoern A. Zeeb 
507*a9655020SBjoern A. Zeeb 	pno->enable = 0;
508*a9655020SBjoern A. Zeeb 	ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno);
509*a9655020SBjoern A. Zeeb 	if (ret) {
510*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to disable PNO: %d", ret);
511*a9655020SBjoern A. Zeeb 		goto out;
512*a9655020SBjoern A. Zeeb 	}
513*a9655020SBjoern A. Zeeb 
514*a9655020SBjoern A. Zeeb 	ar->nlo_enabled = false;
515*a9655020SBjoern A. Zeeb 
516*a9655020SBjoern A. Zeeb out:
517*a9655020SBjoern A. Zeeb 	kfree(pno);
518*a9655020SBjoern A. Zeeb 	return ret;
519*a9655020SBjoern A. Zeeb }
520*a9655020SBjoern A. Zeeb 
ath12k_wow_vif_clean_nlo(struct ath12k_link_vif * arvif)521*a9655020SBjoern A. Zeeb static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif *arvif)
522*a9655020SBjoern A. Zeeb {
523*a9655020SBjoern A. Zeeb 	struct ath12k *ar = arvif->ar;
524*a9655020SBjoern A. Zeeb 
525*a9655020SBjoern A. Zeeb 	switch (arvif->ahvif->vdev_type) {
526*a9655020SBjoern A. Zeeb 	case WMI_VDEV_TYPE_STA:
527*a9655020SBjoern A. Zeeb 		return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id);
528*a9655020SBjoern A. Zeeb 	default:
529*a9655020SBjoern A. Zeeb 		return 0;
530*a9655020SBjoern A. Zeeb 	}
531*a9655020SBjoern A. Zeeb }
532*a9655020SBjoern A. Zeeb 
ath12k_wow_nlo_cleanup(struct ath12k * ar)533*a9655020SBjoern A. Zeeb static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
534*a9655020SBjoern A. Zeeb {
535*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
536*a9655020SBjoern A. Zeeb 	int ret;
537*a9655020SBjoern A. Zeeb 
538*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
539*a9655020SBjoern A. Zeeb 
540*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
541*a9655020SBjoern A. Zeeb 		if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
542*a9655020SBjoern A. Zeeb 			continue;
543*a9655020SBjoern A. Zeeb 
544*a9655020SBjoern A. Zeeb 		ret = ath12k_wow_vif_clean_nlo(arvif);
545*a9655020SBjoern A. Zeeb 		if (ret) {
546*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
547*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, ret);
548*a9655020SBjoern A. Zeeb 			return ret;
549*a9655020SBjoern A. Zeeb 		}
550*a9655020SBjoern A. Zeeb 	}
551*a9655020SBjoern A. Zeeb 
552*a9655020SBjoern A. Zeeb 	return 0;
553*a9655020SBjoern A. Zeeb }
554*a9655020SBjoern A. Zeeb 
ath12k_wow_set_hw_filter(struct ath12k * ar)555*a9655020SBjoern A. Zeeb static int ath12k_wow_set_hw_filter(struct ath12k *ar)
556*a9655020SBjoern A. Zeeb {
557*a9655020SBjoern A. Zeeb 	struct wmi_hw_data_filter_arg arg;
558*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
559*a9655020SBjoern A. Zeeb 	int ret;
560*a9655020SBjoern A. Zeeb 
561*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
562*a9655020SBjoern A. Zeeb 
563*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
564*a9655020SBjoern A. Zeeb 		if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
565*a9655020SBjoern A. Zeeb 			continue;
566*a9655020SBjoern A. Zeeb 
567*a9655020SBjoern A. Zeeb 		arg.vdev_id = arvif->vdev_id;
568*a9655020SBjoern A. Zeeb 		arg.enable = true;
569*a9655020SBjoern A. Zeeb 		arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC;
570*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
571*a9655020SBjoern A. Zeeb 		if (ret) {
572*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
573*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, ret);
574*a9655020SBjoern A. Zeeb 			return ret;
575*a9655020SBjoern A. Zeeb 		}
576*a9655020SBjoern A. Zeeb 	}
577*a9655020SBjoern A. Zeeb 
578*a9655020SBjoern A. Zeeb 	return 0;
579*a9655020SBjoern A. Zeeb }
580*a9655020SBjoern A. Zeeb 
ath12k_wow_clear_hw_filter(struct ath12k * ar)581*a9655020SBjoern A. Zeeb static int ath12k_wow_clear_hw_filter(struct ath12k *ar)
582*a9655020SBjoern A. Zeeb {
583*a9655020SBjoern A. Zeeb 	struct wmi_hw_data_filter_arg arg;
584*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
585*a9655020SBjoern A. Zeeb 	int ret;
586*a9655020SBjoern A. Zeeb 
587*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
588*a9655020SBjoern A. Zeeb 
589*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
590*a9655020SBjoern A. Zeeb 		if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
591*a9655020SBjoern A. Zeeb 			continue;
592*a9655020SBjoern A. Zeeb 
593*a9655020SBjoern A. Zeeb 		arg.vdev_id = arvif->vdev_id;
594*a9655020SBjoern A. Zeeb 		arg.enable = false;
595*a9655020SBjoern A. Zeeb 		arg.hw_filter_bitmap = 0;
596*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
597*a9655020SBjoern A. Zeeb 
598*a9655020SBjoern A. Zeeb 		if (ret) {
599*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
600*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, ret);
601*a9655020SBjoern A. Zeeb 			return ret;
602*a9655020SBjoern A. Zeeb 		}
603*a9655020SBjoern A. Zeeb 	}
604*a9655020SBjoern A. Zeeb 
605*a9655020SBjoern A. Zeeb 	return 0;
606*a9655020SBjoern A. Zeeb }
607*a9655020SBjoern A. Zeeb 
ath12k_wow_generate_ns_mc_addr(struct ath12k_base * ab,struct wmi_arp_ns_offload_arg * offload)608*a9655020SBjoern A. Zeeb static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab,
609*a9655020SBjoern A. Zeeb 					   struct wmi_arp_ns_offload_arg *offload)
610*a9655020SBjoern A. Zeeb {
611*a9655020SBjoern A. Zeeb 	int i;
612*a9655020SBjoern A. Zeeb 
613*a9655020SBjoern A. Zeeb 	for (i = 0; i < offload->ipv6_count; i++) {
614*a9655020SBjoern A. Zeeb 		offload->self_ipv6_addr[i][0] = 0xff;
615*a9655020SBjoern A. Zeeb 		offload->self_ipv6_addr[i][1] = 0x02;
616*a9655020SBjoern A. Zeeb 		offload->self_ipv6_addr[i][11] = 0x01;
617*a9655020SBjoern A. Zeeb 		offload->self_ipv6_addr[i][12] = 0xff;
618*a9655020SBjoern A. Zeeb 		offload->self_ipv6_addr[i][13] =
619*a9655020SBjoern A. Zeeb 					offload->ipv6_addr[i][13];
620*a9655020SBjoern A. Zeeb 		offload->self_ipv6_addr[i][14] =
621*a9655020SBjoern A. Zeeb 					offload->ipv6_addr[i][14];
622*a9655020SBjoern A. Zeeb 		offload->self_ipv6_addr[i][15] =
623*a9655020SBjoern A. Zeeb 					offload->ipv6_addr[i][15];
624*a9655020SBjoern A. Zeeb 		ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n",
625*a9655020SBjoern A. Zeeb 			   offload->self_ipv6_addr[i]);
626*a9655020SBjoern A. Zeeb 	}
627*a9655020SBjoern A. Zeeb }
628*a9655020SBjoern A. Zeeb 
ath12k_wow_prepare_ns_offload(struct ath12k_link_vif * arvif,struct wmi_arp_ns_offload_arg * offload)629*a9655020SBjoern A. Zeeb static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif *arvif,
630*a9655020SBjoern A. Zeeb 					  struct wmi_arp_ns_offload_arg *offload)
631*a9655020SBjoern A. Zeeb {
632*a9655020SBjoern A. Zeeb 	struct net_device *ndev = ieee80211_vif_to_wdev(arvif->ahvif->vif)->netdev;
633*a9655020SBjoern A. Zeeb 	struct ath12k_base *ab = arvif->ar->ab;
634*a9655020SBjoern A. Zeeb 	struct inet6_ifaddr *ifa6;
635*a9655020SBjoern A. Zeeb 	struct ifacaddr6 *ifaca6;
636*a9655020SBjoern A. Zeeb 	struct inet6_dev *idev;
637*a9655020SBjoern A. Zeeb 	u32 count = 0, scope;
638*a9655020SBjoern A. Zeeb 
639*a9655020SBjoern A. Zeeb 	if (!ndev)
640*a9655020SBjoern A. Zeeb 		return;
641*a9655020SBjoern A. Zeeb 
642*a9655020SBjoern A. Zeeb 	idev = in6_dev_get(ndev);
643*a9655020SBjoern A. Zeeb 	if (!idev)
644*a9655020SBjoern A. Zeeb 		return;
645*a9655020SBjoern A. Zeeb 
646*a9655020SBjoern A. Zeeb 	ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n");
647*a9655020SBjoern A. Zeeb 
648*a9655020SBjoern A. Zeeb 	read_lock_bh(&idev->lock);
649*a9655020SBjoern A. Zeeb 
650*a9655020SBjoern A. Zeeb 	/* get unicast address */
651*a9655020SBjoern A. Zeeb 	list_for_each_entry(ifa6, &idev->addr_list, if_list) {
652*a9655020SBjoern A. Zeeb 		if (count >= WMI_IPV6_MAX_COUNT)
653*a9655020SBjoern A. Zeeb 			goto unlock;
654*a9655020SBjoern A. Zeeb 
655*a9655020SBjoern A. Zeeb 		if (ifa6->flags & IFA_F_DADFAILED)
656*a9655020SBjoern A. Zeeb 			continue;
657*a9655020SBjoern A. Zeeb 
658*a9655020SBjoern A. Zeeb 		scope = ipv6_addr_src_scope(&ifa6->addr);
659*a9655020SBjoern A. Zeeb 		if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
660*a9655020SBjoern A. Zeeb 		    scope != IPV6_ADDR_SCOPE_GLOBAL) {
661*a9655020SBjoern A. Zeeb 			ath12k_dbg(ab, ATH12K_DBG_WOW,
662*a9655020SBjoern A. Zeeb 				   "Unsupported ipv6 scope: %d\n", scope);
663*a9655020SBjoern A. Zeeb 			continue;
664*a9655020SBjoern A. Zeeb 		}
665*a9655020SBjoern A. Zeeb 
666*a9655020SBjoern A. Zeeb 		memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
667*a9655020SBjoern A. Zeeb 		       sizeof(ifa6->addr.s6_addr));
668*a9655020SBjoern A. Zeeb 		offload->ipv6_type[count] = WMI_IPV6_UC_TYPE;
669*a9655020SBjoern A. Zeeb 		ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n",
670*a9655020SBjoern A. Zeeb 			   count, offload->ipv6_addr[count],
671*a9655020SBjoern A. Zeeb 			   scope);
672*a9655020SBjoern A. Zeeb 		count++;
673*a9655020SBjoern A. Zeeb 	}
674*a9655020SBjoern A. Zeeb 
675*a9655020SBjoern A. Zeeb 	/* get anycast address */
676*a9655020SBjoern A. Zeeb 	rcu_read_lock();
677*a9655020SBjoern A. Zeeb 
678*a9655020SBjoern A. Zeeb 	for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
679*a9655020SBjoern A. Zeeb 	     ifaca6 = rcu_dereference(ifaca6->aca_next)) {
680*a9655020SBjoern A. Zeeb 		if (count >= WMI_IPV6_MAX_COUNT) {
681*a9655020SBjoern A. Zeeb 			rcu_read_unlock();
682*a9655020SBjoern A. Zeeb 			goto unlock;
683*a9655020SBjoern A. Zeeb 		}
684*a9655020SBjoern A. Zeeb 
685*a9655020SBjoern A. Zeeb 		scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
686*a9655020SBjoern A. Zeeb 		if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
687*a9655020SBjoern A. Zeeb 		    scope != IPV6_ADDR_SCOPE_GLOBAL) {
688*a9655020SBjoern A. Zeeb 			ath12k_dbg(ab, ATH12K_DBG_WOW,
689*a9655020SBjoern A. Zeeb 				   "Unsupported ipv scope: %d\n", scope);
690*a9655020SBjoern A. Zeeb 			continue;
691*a9655020SBjoern A. Zeeb 		}
692*a9655020SBjoern A. Zeeb 
693*a9655020SBjoern A. Zeeb 		memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
694*a9655020SBjoern A. Zeeb 		       sizeof(ifaca6->aca_addr));
695*a9655020SBjoern A. Zeeb 		offload->ipv6_type[count] = WMI_IPV6_AC_TYPE;
696*a9655020SBjoern A. Zeeb 		ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n",
697*a9655020SBjoern A. Zeeb 			   count, offload->ipv6_addr[count],
698*a9655020SBjoern A. Zeeb 			   scope);
699*a9655020SBjoern A. Zeeb 		count++;
700*a9655020SBjoern A. Zeeb 	}
701*a9655020SBjoern A. Zeeb 
702*a9655020SBjoern A. Zeeb 	rcu_read_unlock();
703*a9655020SBjoern A. Zeeb 
704*a9655020SBjoern A. Zeeb unlock:
705*a9655020SBjoern A. Zeeb 	read_unlock_bh(&idev->lock);
706*a9655020SBjoern A. Zeeb 
707*a9655020SBjoern A. Zeeb 	in6_dev_put(idev);
708*a9655020SBjoern A. Zeeb 
709*a9655020SBjoern A. Zeeb 	offload->ipv6_count = count;
710*a9655020SBjoern A. Zeeb 	ath12k_wow_generate_ns_mc_addr(ab, offload);
711*a9655020SBjoern A. Zeeb }
712*a9655020SBjoern A. Zeeb 
ath12k_wow_prepare_arp_offload(struct ath12k_link_vif * arvif,struct wmi_arp_ns_offload_arg * offload)713*a9655020SBjoern A. Zeeb static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif *arvif,
714*a9655020SBjoern A. Zeeb 					   struct wmi_arp_ns_offload_arg *offload)
715*a9655020SBjoern A. Zeeb {
716*a9655020SBjoern A. Zeeb 	struct ieee80211_vif *vif = arvif->ahvif->vif;
717*a9655020SBjoern A. Zeeb 	struct ieee80211_vif_cfg vif_cfg = vif->cfg;
718*a9655020SBjoern A. Zeeb 	struct ath12k_base *ab = arvif->ar->ab;
719*a9655020SBjoern A. Zeeb 	u32 ipv4_cnt;
720*a9655020SBjoern A. Zeeb 
721*a9655020SBjoern A. Zeeb 	ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n");
722*a9655020SBjoern A. Zeeb 
723*a9655020SBjoern A. Zeeb 	ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT);
724*a9655020SBjoern A. Zeeb 	memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32));
725*a9655020SBjoern A. Zeeb 	offload->ipv4_count = ipv4_cnt;
726*a9655020SBjoern A. Zeeb 
727*a9655020SBjoern A. Zeeb 	ath12k_dbg(ab, ATH12K_DBG_WOW,
728*a9655020SBjoern A. Zeeb 		   "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
729*a9655020SBjoern A. Zeeb 		   vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr);
730*a9655020SBjoern A. Zeeb }
731*a9655020SBjoern A. Zeeb 
ath12k_wow_arp_ns_offload(struct ath12k * ar,bool enable)732*a9655020SBjoern A. Zeeb static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
733*a9655020SBjoern A. Zeeb {
734*a9655020SBjoern A. Zeeb 	struct wmi_arp_ns_offload_arg *offload;
735*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
736*a9655020SBjoern A. Zeeb 	struct ath12k_vif *ahvif;
737*a9655020SBjoern A. Zeeb 	int ret;
738*a9655020SBjoern A. Zeeb 
739*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
740*a9655020SBjoern A. Zeeb 
741*a9655020SBjoern A. Zeeb 	offload = kmalloc(sizeof(*offload), GFP_KERNEL);
742*a9655020SBjoern A. Zeeb 	if (!offload)
743*a9655020SBjoern A. Zeeb 		return -ENOMEM;
744*a9655020SBjoern A. Zeeb 
745*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
746*a9655020SBjoern A. Zeeb 		ahvif = arvif->ahvif;
747*a9655020SBjoern A. Zeeb 
748*a9655020SBjoern A. Zeeb 		if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
749*a9655020SBjoern A. Zeeb 			continue;
750*a9655020SBjoern A. Zeeb 
751*a9655020SBjoern A. Zeeb 		memset(offload, 0, sizeof(*offload));
752*a9655020SBjoern A. Zeeb 
753*a9655020SBjoern A. Zeeb 		memcpy(offload->mac_addr, ahvif->vif->addr, ETH_ALEN);
754*a9655020SBjoern A. Zeeb 		ath12k_wow_prepare_ns_offload(arvif, offload);
755*a9655020SBjoern A. Zeeb 		ath12k_wow_prepare_arp_offload(arvif, offload);
756*a9655020SBjoern A. Zeeb 
757*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable);
758*a9655020SBjoern A. Zeeb 		if (ret) {
759*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
760*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, enable, ret);
761*a9655020SBjoern A. Zeeb 			kfree(offload);
762*a9655020SBjoern A. Zeeb 			return ret;
763*a9655020SBjoern A. Zeeb 		}
764*a9655020SBjoern A. Zeeb 	}
765*a9655020SBjoern A. Zeeb 
766*a9655020SBjoern A. Zeeb 	kfree(offload);
767*a9655020SBjoern A. Zeeb 
768*a9655020SBjoern A. Zeeb 	return 0;
769*a9655020SBjoern A. Zeeb }
770*a9655020SBjoern A. Zeeb 
ath12k_gtk_rekey_offload(struct ath12k * ar,bool enable)771*a9655020SBjoern A. Zeeb static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
772*a9655020SBjoern A. Zeeb {
773*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
774*a9655020SBjoern A. Zeeb 	int ret;
775*a9655020SBjoern A. Zeeb 
776*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
777*a9655020SBjoern A. Zeeb 
778*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
779*a9655020SBjoern A. Zeeb 		if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA ||
780*a9655020SBjoern A. Zeeb 		    !arvif->is_up ||
781*a9655020SBjoern A. Zeeb 		    !arvif->rekey_data.enable_offload)
782*a9655020SBjoern A. Zeeb 			continue;
783*a9655020SBjoern A. Zeeb 
784*a9655020SBjoern A. Zeeb 		/* get rekey info before disable rekey offload */
785*a9655020SBjoern A. Zeeb 		if (!enable) {
786*a9655020SBjoern A. Zeeb 			ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif);
787*a9655020SBjoern A. Zeeb 			if (ret) {
788*a9655020SBjoern A. Zeeb 				ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
789*a9655020SBjoern A. Zeeb 					    arvif->vdev_id, ret);
790*a9655020SBjoern A. Zeeb 				return ret;
791*a9655020SBjoern A. Zeeb 			}
792*a9655020SBjoern A. Zeeb 		}
793*a9655020SBjoern A. Zeeb 
794*a9655020SBjoern A. Zeeb 		ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable);
795*a9655020SBjoern A. Zeeb 
796*a9655020SBjoern A. Zeeb 		if (ret) {
797*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
798*a9655020SBjoern A. Zeeb 				    arvif->vdev_id, enable, ret);
799*a9655020SBjoern A. Zeeb 			return ret;
800*a9655020SBjoern A. Zeeb 		}
801*a9655020SBjoern A. Zeeb 	}
802*a9655020SBjoern A. Zeeb 
803*a9655020SBjoern A. Zeeb 	return 0;
804*a9655020SBjoern A. Zeeb }
805*a9655020SBjoern A. Zeeb 
ath12k_wow_protocol_offload(struct ath12k * ar,bool enable)806*a9655020SBjoern A. Zeeb static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable)
807*a9655020SBjoern A. Zeeb {
808*a9655020SBjoern A. Zeeb 	int ret;
809*a9655020SBjoern A. Zeeb 
810*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_arp_ns_offload(ar, enable);
811*a9655020SBjoern A. Zeeb 	if (ret) {
812*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
813*a9655020SBjoern A. Zeeb 			    enable, ret);
814*a9655020SBjoern A. Zeeb 		return ret;
815*a9655020SBjoern A. Zeeb 	}
816*a9655020SBjoern A. Zeeb 
817*a9655020SBjoern A. Zeeb 	ret = ath12k_gtk_rekey_offload(ar, enable);
818*a9655020SBjoern A. Zeeb 	if (ret) {
819*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
820*a9655020SBjoern A. Zeeb 			    enable, ret);
821*a9655020SBjoern A. Zeeb 		return ret;
822*a9655020SBjoern A. Zeeb 	}
823*a9655020SBjoern A. Zeeb 
824*a9655020SBjoern A. Zeeb 	return 0;
825*a9655020SBjoern A. Zeeb }
826*a9655020SBjoern A. Zeeb 
ath12k_wow_set_keepalive(struct ath12k * ar,enum wmi_sta_keepalive_method method,u32 interval)827*a9655020SBjoern A. Zeeb static int ath12k_wow_set_keepalive(struct ath12k *ar,
828*a9655020SBjoern A. Zeeb 				    enum wmi_sta_keepalive_method method,
829*a9655020SBjoern A. Zeeb 				    u32 interval)
830*a9655020SBjoern A. Zeeb {
831*a9655020SBjoern A. Zeeb 	struct ath12k_link_vif *arvif;
832*a9655020SBjoern A. Zeeb 	int ret;
833*a9655020SBjoern A. Zeeb 
834*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
835*a9655020SBjoern A. Zeeb 
836*a9655020SBjoern A. Zeeb 	list_for_each_entry(arvif, &ar->arvifs, list) {
837*a9655020SBjoern A. Zeeb 		ret = ath12k_mac_vif_set_keepalive(arvif, method, interval);
838*a9655020SBjoern A. Zeeb 		if (ret)
839*a9655020SBjoern A. Zeeb 			return ret;
840*a9655020SBjoern A. Zeeb 	}
841*a9655020SBjoern A. Zeeb 
842*a9655020SBjoern A. Zeeb 	return 0;
843*a9655020SBjoern A. Zeeb }
844*a9655020SBjoern A. Zeeb 
ath12k_wow_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wowlan)845*a9655020SBjoern A. Zeeb int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
846*a9655020SBjoern A. Zeeb 			  struct cfg80211_wowlan *wowlan)
847*a9655020SBjoern A. Zeeb {
848*a9655020SBjoern A. Zeeb 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
849*a9655020SBjoern A. Zeeb 	struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
850*a9655020SBjoern A. Zeeb 	int ret;
851*a9655020SBjoern A. Zeeb 
852*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(hw->wiphy);
853*a9655020SBjoern A. Zeeb 
854*a9655020SBjoern A. Zeeb 	ret =  ath12k_wow_cleanup(ar);
855*a9655020SBjoern A. Zeeb 	if (ret) {
856*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
857*a9655020SBjoern A. Zeeb 			    ret);
858*a9655020SBjoern A. Zeeb 		goto exit;
859*a9655020SBjoern A. Zeeb 	}
860*a9655020SBjoern A. Zeeb 
861*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_set_wakeups(ar, wowlan);
862*a9655020SBjoern A. Zeeb 	if (ret) {
863*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
864*a9655020SBjoern A. Zeeb 			    ret);
865*a9655020SBjoern A. Zeeb 		goto cleanup;
866*a9655020SBjoern A. Zeeb 	}
867*a9655020SBjoern A. Zeeb 
868*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_protocol_offload(ar, true);
869*a9655020SBjoern A. Zeeb 	if (ret) {
870*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
871*a9655020SBjoern A. Zeeb 			    ret);
872*a9655020SBjoern A. Zeeb 		goto cleanup;
873*a9655020SBjoern A. Zeeb 	}
874*a9655020SBjoern A. Zeeb 
875*a9655020SBjoern A. Zeeb 	ret = ath12k_mac_wait_tx_complete(ar);
876*a9655020SBjoern A. Zeeb 	if (ret) {
877*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
878*a9655020SBjoern A. Zeeb 		goto cleanup;
879*a9655020SBjoern A. Zeeb 	}
880*a9655020SBjoern A. Zeeb 
881*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_set_hw_filter(ar);
882*a9655020SBjoern A. Zeeb 	if (ret) {
883*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to set hw filter: %d\n",
884*a9655020SBjoern A. Zeeb 			    ret);
885*a9655020SBjoern A. Zeeb 		goto cleanup;
886*a9655020SBjoern A. Zeeb 	}
887*a9655020SBjoern A. Zeeb 
888*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_set_keepalive(ar,
889*a9655020SBjoern A. Zeeb 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
890*a9655020SBjoern A. Zeeb 				       WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
891*a9655020SBjoern A. Zeeb 	if (ret) {
892*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
893*a9655020SBjoern A. Zeeb 		goto cleanup;
894*a9655020SBjoern A. Zeeb 	}
895*a9655020SBjoern A. Zeeb 
896*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_enable(ar);
897*a9655020SBjoern A. Zeeb 	if (ret) {
898*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to start wow: %d\n", ret);
899*a9655020SBjoern A. Zeeb 		goto cleanup;
900*a9655020SBjoern A. Zeeb 	}
901*a9655020SBjoern A. Zeeb 
902*a9655020SBjoern A. Zeeb 	ath12k_hif_irq_disable(ar->ab);
903*a9655020SBjoern A. Zeeb 	ath12k_hif_ce_irq_disable(ar->ab);
904*a9655020SBjoern A. Zeeb 
905*a9655020SBjoern A. Zeeb 	ret = ath12k_hif_suspend(ar->ab);
906*a9655020SBjoern A. Zeeb 	if (ret) {
907*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
908*a9655020SBjoern A. Zeeb 		goto wakeup;
909*a9655020SBjoern A. Zeeb 	}
910*a9655020SBjoern A. Zeeb 
911*a9655020SBjoern A. Zeeb 	goto exit;
912*a9655020SBjoern A. Zeeb 
913*a9655020SBjoern A. Zeeb wakeup:
914*a9655020SBjoern A. Zeeb 	ath12k_wow_wakeup(ar);
915*a9655020SBjoern A. Zeeb 
916*a9655020SBjoern A. Zeeb cleanup:
917*a9655020SBjoern A. Zeeb 	ath12k_wow_cleanup(ar);
918*a9655020SBjoern A. Zeeb 
919*a9655020SBjoern A. Zeeb exit:
920*a9655020SBjoern A. Zeeb 	return ret ? 1 : 0;
921*a9655020SBjoern A. Zeeb }
922*a9655020SBjoern A. Zeeb 
ath12k_wow_op_set_wakeup(struct ieee80211_hw * hw,bool enabled)923*a9655020SBjoern A. Zeeb void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
924*a9655020SBjoern A. Zeeb {
925*a9655020SBjoern A. Zeeb 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
926*a9655020SBjoern A. Zeeb 	struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
927*a9655020SBjoern A. Zeeb 
928*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(hw->wiphy);
929*a9655020SBjoern A. Zeeb 
930*a9655020SBjoern A. Zeeb 	device_set_wakeup_enable(ar->ab->dev, enabled);
931*a9655020SBjoern A. Zeeb }
932*a9655020SBjoern A. Zeeb 
ath12k_wow_op_resume(struct ieee80211_hw * hw)933*a9655020SBjoern A. Zeeb int ath12k_wow_op_resume(struct ieee80211_hw *hw)
934*a9655020SBjoern A. Zeeb {
935*a9655020SBjoern A. Zeeb 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
936*a9655020SBjoern A. Zeeb 	struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
937*a9655020SBjoern A. Zeeb 	int ret;
938*a9655020SBjoern A. Zeeb 
939*a9655020SBjoern A. Zeeb 	lockdep_assert_wiphy(hw->wiphy);
940*a9655020SBjoern A. Zeeb 
941*a9655020SBjoern A. Zeeb 	ret = ath12k_hif_resume(ar->ab);
942*a9655020SBjoern A. Zeeb 	if (ret) {
943*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret);
944*a9655020SBjoern A. Zeeb 		goto exit;
945*a9655020SBjoern A. Zeeb 	}
946*a9655020SBjoern A. Zeeb 
947*a9655020SBjoern A. Zeeb 	ath12k_hif_ce_irq_enable(ar->ab);
948*a9655020SBjoern A. Zeeb 	ath12k_hif_irq_enable(ar->ab);
949*a9655020SBjoern A. Zeeb 
950*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_wakeup(ar);
951*a9655020SBjoern A. Zeeb 	if (ret) {
952*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
953*a9655020SBjoern A. Zeeb 		goto exit;
954*a9655020SBjoern A. Zeeb 	}
955*a9655020SBjoern A. Zeeb 
956*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_nlo_cleanup(ar);
957*a9655020SBjoern A. Zeeb 	if (ret) {
958*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
959*a9655020SBjoern A. Zeeb 		goto exit;
960*a9655020SBjoern A. Zeeb 	}
961*a9655020SBjoern A. Zeeb 
962*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_clear_hw_filter(ar);
963*a9655020SBjoern A. Zeeb 	if (ret) {
964*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
965*a9655020SBjoern A. Zeeb 		goto exit;
966*a9655020SBjoern A. Zeeb 	}
967*a9655020SBjoern A. Zeeb 
968*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_protocol_offload(ar, false);
969*a9655020SBjoern A. Zeeb 	if (ret) {
970*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
971*a9655020SBjoern A. Zeeb 			    ret);
972*a9655020SBjoern A. Zeeb 		goto exit;
973*a9655020SBjoern A. Zeeb 	}
974*a9655020SBjoern A. Zeeb 
975*a9655020SBjoern A. Zeeb 	ret = ath12k_wow_set_keepalive(ar,
976*a9655020SBjoern A. Zeeb 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
977*a9655020SBjoern A. Zeeb 				       WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
978*a9655020SBjoern A. Zeeb 	if (ret) {
979*a9655020SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
980*a9655020SBjoern A. Zeeb 		goto exit;
981*a9655020SBjoern A. Zeeb 	}
982*a9655020SBjoern A. Zeeb 
983*a9655020SBjoern A. Zeeb exit:
984*a9655020SBjoern A. Zeeb 	if (ret) {
985*a9655020SBjoern A. Zeeb 		switch (ah->state) {
986*a9655020SBjoern A. Zeeb 		case ATH12K_HW_STATE_ON:
987*a9655020SBjoern A. Zeeb 			ah->state = ATH12K_HW_STATE_RESTARTING;
988*a9655020SBjoern A. Zeeb 			ret = 1;
989*a9655020SBjoern A. Zeeb 			break;
990*a9655020SBjoern A. Zeeb 		case ATH12K_HW_STATE_OFF:
991*a9655020SBjoern A. Zeeb 		case ATH12K_HW_STATE_RESTARTING:
992*a9655020SBjoern A. Zeeb 		case ATH12K_HW_STATE_RESTARTED:
993*a9655020SBjoern A. Zeeb 		case ATH12K_HW_STATE_WEDGED:
994*a9655020SBjoern A. Zeeb 		case ATH12K_HW_STATE_TM:
995*a9655020SBjoern A. Zeeb 			ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
996*a9655020SBjoern A. Zeeb 				    ah->state);
997*a9655020SBjoern A. Zeeb 			ret = -EIO;
998*a9655020SBjoern A. Zeeb 			break;
999*a9655020SBjoern A. Zeeb 		}
1000*a9655020SBjoern A. Zeeb 	}
1001*a9655020SBjoern A. Zeeb 
1002*a9655020SBjoern A. Zeeb 	return ret;
1003*a9655020SBjoern A. Zeeb }
1004*a9655020SBjoern A. Zeeb 
ath12k_wow_init(struct ath12k * ar)1005*a9655020SBjoern A. Zeeb int ath12k_wow_init(struct ath12k *ar)
1006*a9655020SBjoern A. Zeeb {
1007*a9655020SBjoern A. Zeeb 	if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
1008*a9655020SBjoern A. Zeeb 		return 0;
1009*a9655020SBjoern A. Zeeb 
1010*a9655020SBjoern A. Zeeb 	ar->wow.wowlan_support = ath12k_wowlan_support;
1011*a9655020SBjoern A. Zeeb 
1012*a9655020SBjoern A. Zeeb 	if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) {
1013*a9655020SBjoern A. Zeeb 		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
1014*a9655020SBjoern A. Zeeb 		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
1015*a9655020SBjoern A. Zeeb 	}
1016*a9655020SBjoern A. Zeeb 
1017*a9655020SBjoern A. Zeeb 	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
1018*a9655020SBjoern A. Zeeb 		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
1019*a9655020SBjoern A. Zeeb 		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
1020*a9655020SBjoern A. Zeeb 	}
1021*a9655020SBjoern A. Zeeb 
1022*a9655020SBjoern A. Zeeb 	ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS;
1023*a9655020SBjoern A. Zeeb 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
1024*a9655020SBjoern A. Zeeb 	ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support;
1025*a9655020SBjoern A. Zeeb 
1026*a9655020SBjoern A. Zeeb 	device_set_wakeup_capable(ar->ab->dev, true);
1027*a9655020SBjoern A. Zeeb 
1028*a9655020SBjoern A. Zeeb 	return 0;
1029*a9655020SBjoern A. Zeeb }
1030