xref: /linux/drivers/net/ovpn/peer.c (revision dfecb0c5af3b07ebfa84be63a7a21bfc9e29a872)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  OpenVPN data channel offload
3  *
4  *  Copyright (C) 2020-2025 OpenVPN, Inc.
5  *
6  *  Author:	James Yonan <james@openvpn.net>
7  *		Antonio Quartulli <antonio@openvpn.net>
8  */
9 
10 #include <linux/skbuff.h>
11 #include <linux/list.h>
12 #include <linux/hashtable.h>
13 #include <net/ip6_route.h>
14 
15 #include "ovpnpriv.h"
16 #include "bind.h"
17 #include "pktid.h"
18 #include "crypto.h"
19 #include "io.h"
20 #include "main.h"
21 #include "netlink.h"
22 #include "peer.h"
23 #include "socket.h"
24 
25 static void unlock_ovpn(struct ovpn_priv *ovpn,
26 			 struct llist_head *release_list)
27 	__releases(&ovpn->lock)
28 {
29 	struct ovpn_peer *peer;
30 
31 	spin_unlock_bh(&ovpn->lock);
32 
33 	llist_for_each_entry(peer, release_list->first, release_entry) {
34 		ovpn_socket_release(peer);
35 		ovpn_peer_put(peer);
36 	}
37 }
38 
39 /**
40  * ovpn_peer_keepalive_set - configure keepalive values for peer
41  * @peer: the peer to configure
42  * @interval: outgoing keepalive interval
43  * @timeout: incoming keepalive timeout
44  */
45 void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout)
46 {
47 	time64_t now = ktime_get_real_seconds();
48 
49 	netdev_dbg(peer->ovpn->dev,
50 		   "scheduling keepalive for peer %u: interval=%u timeout=%u\n",
51 		   peer->id, interval, timeout);
52 
53 	peer->keepalive_interval = interval;
54 	WRITE_ONCE(peer->last_sent, now);
55 	peer->keepalive_xmit_exp = now + interval;
56 
57 	peer->keepalive_timeout = timeout;
58 	WRITE_ONCE(peer->last_recv, now);
59 	peer->keepalive_recv_exp = now + timeout;
60 
61 	/* now that interval and timeout have been changed, kick
62 	 * off the worker so that the next delay can be recomputed
63 	 */
64 	mod_delayed_work(system_percpu_wq, &peer->ovpn->keepalive_work, 0);
65 }
66 
67 /**
68  * ovpn_peer_keepalive_send - periodic worker sending keepalive packets
69  * @work: pointer to the work member of the related peer object
70  *
71  * NOTE: the reference to peer is not dropped because it gets inherited
72  * by ovpn_xmit_special()
73  */
74 static void ovpn_peer_keepalive_send(struct work_struct *work)
75 {
76 	struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
77 					      keepalive_work);
78 
79 	local_bh_disable();
80 	ovpn_xmit_special(peer, ovpn_keepalive_message,
81 			  sizeof(ovpn_keepalive_message));
82 	local_bh_enable();
83 }
84 
85 /**
86  * ovpn_peer_new - allocate and initialize a new peer object
87  * @ovpn: the openvpn instance inside which the peer should be created
88  * @id: the ID assigned to this peer
89  *
90  * Return: a pointer to the new peer on success or an error code otherwise
91  */
92 struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id)
93 {
94 	struct ovpn_peer *peer;
95 	int ret;
96 
97 	/* alloc and init peer object */
98 	peer = kzalloc_obj(*peer);
99 	if (!peer)
100 		return ERR_PTR(-ENOMEM);
101 
102 	/* in the default case TX and RX IDs are the same.
103 	 * the user may set a different TX ID via netlink
104 	 */
105 	peer->id = id;
106 	peer->tx_id = id;
107 	peer->ovpn = ovpn;
108 
109 	peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY);
110 	peer->vpn_addrs.ipv6 = in6addr_any;
111 
112 	RCU_INIT_POINTER(peer->bind, NULL);
113 	ovpn_crypto_state_init(&peer->crypto);
114 	spin_lock_init(&peer->lock);
115 	kref_init(&peer->refcount);
116 	ovpn_peer_stats_init(&peer->vpn_stats);
117 	ovpn_peer_stats_init(&peer->link_stats);
118 	INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send);
119 
120 	ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL);
121 	if (ret < 0) {
122 		netdev_err(ovpn->dev,
123 			   "cannot initialize dst cache for peer %u\n",
124 			   peer->id);
125 		kfree(peer);
126 		return ERR_PTR(ret);
127 	}
128 
129 	netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL);
130 
131 	return peer;
132 }
133 
134 /**
135  * ovpn_peer_reset_sockaddr - recreate binding for peer
136  * @peer: peer to recreate the binding for
137  * @ss: sockaddr to use as remote endpoint for the binding
138  * @local_ip: local IP for the binding
139  *
140  * Return: 0 on success or a negative error code otherwise
141  */
142 int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
143 			     const struct sockaddr_storage *ss,
144 			     const void *local_ip)
145 {
146 	struct ovpn_bind *bind;
147 	size_t ip_len;
148 
149 	lockdep_assert_held(&peer->lock);
150 
151 	/* create new ovpn_bind object */
152 	bind = ovpn_bind_from_sockaddr(ss);
153 	if (IS_ERR(bind))
154 		return PTR_ERR(bind);
155 
156 	if (local_ip) {
157 		if (ss->ss_family == AF_INET) {
158 			ip_len = sizeof(struct in_addr);
159 		} else if (ss->ss_family == AF_INET6) {
160 			ip_len = sizeof(struct in6_addr);
161 		} else {
162 			net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n",
163 					    netdev_name(peer->ovpn->dev),
164 					    ss->ss_family, peer->id);
165 			kfree(bind);
166 			return -EINVAL;
167 		}
168 
169 		memcpy(&bind->local, local_ip, ip_len);
170 	}
171 
172 	/* set binding */
173 	ovpn_bind_reset(peer, bind);
174 
175 	return 0;
176 }
177 
178 /* variable name __tbl2 needs to be different from __tbl1
179  * in the macro below to avoid confusing clang
180  */
181 #define ovpn_get_hash_slot(_tbl, _key, _key_len) ({	\
182 	typeof(_tbl) *__tbl2 = &(_tbl);			\
183 	jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2);	\
184 })
185 
186 #define ovpn_get_hash_head(_tbl, _key, _key_len) ({		\
187 	typeof(_tbl) *__tbl1 = &(_tbl);				\
188 	&(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\
189 })
190 
191 /**
192  * ovpn_peer_endpoints_update - update remote or local endpoint for peer
193  * @peer: peer to update the remote endpoint for
194  * @skb: incoming packet to retrieve the source/destination address from
195  */
196 void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb)
197 {
198 	struct hlist_nulls_head *nhead;
199 	struct sockaddr_storage ss;
200 	struct sockaddr_in6 *sa6;
201 	bool reset_cache = false;
202 	struct sockaddr_in *sa;
203 	struct ovpn_bind *bind;
204 	const void *local_ip;
205 	size_t salen = 0;
206 
207 	spin_lock_bh(&peer->lock);
208 	bind = rcu_dereference_protected(peer->bind,
209 					 lockdep_is_held(&peer->lock));
210 	if (unlikely(!bind))
211 		goto unlock;
212 
213 	switch (skb->protocol) {
214 	case htons(ETH_P_IP):
215 		/* float check */
216 		if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
217 			/* unconditionally save local endpoint in case
218 			 * of float, as it may have changed as well
219 			 */
220 			local_ip = &ip_hdr(skb)->daddr;
221 			sa = (struct sockaddr_in *)&ss;
222 			sa->sin_family = AF_INET;
223 			sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
224 			sa->sin_port = udp_hdr(skb)->source;
225 			salen = sizeof(*sa);
226 			reset_cache = true;
227 			break;
228 		}
229 
230 		/* if no float happened, let's double check if the local endpoint
231 		 * has changed
232 		 */
233 		if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) {
234 			net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n",
235 					    netdev_name(peer->ovpn->dev),
236 					    peer->id, &bind->local.ipv4.s_addr,
237 					    &ip_hdr(skb)->daddr);
238 			bind->local.ipv4.s_addr = ip_hdr(skb)->daddr;
239 			reset_cache = true;
240 		}
241 		break;
242 	case htons(ETH_P_IPV6):
243 		/* float check */
244 		if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
245 			/* unconditionally save local endpoint in case
246 			 * of float, as it may have changed as well
247 			 */
248 			local_ip = &ipv6_hdr(skb)->daddr;
249 			sa6 = (struct sockaddr_in6 *)&ss;
250 			sa6->sin6_family = AF_INET6;
251 			sa6->sin6_addr = ipv6_hdr(skb)->saddr;
252 			sa6->sin6_port = udp_hdr(skb)->source;
253 			sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr,
254 								 skb->skb_iif);
255 			salen = sizeof(*sa6);
256 			reset_cache = true;
257 			break;
258 		}
259 
260 		/* if no float happened, let's double check if the local endpoint
261 		 * has changed
262 		 */
263 		if (unlikely(!ipv6_addr_equal(&bind->local.ipv6,
264 					      &ipv6_hdr(skb)->daddr))) {
265 			net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c)\n",
266 					    netdev_name(peer->ovpn->dev),
267 					    peer->id, &bind->local.ipv6,
268 					    &ipv6_hdr(skb)->daddr);
269 			bind->local.ipv6 = ipv6_hdr(skb)->daddr;
270 			reset_cache = true;
271 		}
272 		break;
273 	default:
274 		goto unlock;
275 	}
276 
277 	if (unlikely(reset_cache))
278 		dst_cache_reset(&peer->dst_cache);
279 
280 	/* if the peer did not float, we can bail out now */
281 	if (likely(!salen))
282 		goto unlock;
283 
284 	if (unlikely(ovpn_peer_reset_sockaddr(peer,
285 					      (struct sockaddr_storage *)&ss,
286 					      local_ip) < 0))
287 		goto unlock;
288 
289 	net_dbg_ratelimited("%s: peer %d floated to %pIScp",
290 			    netdev_name(peer->ovpn->dev), peer->id, &ss);
291 
292 	spin_unlock_bh(&peer->lock);
293 
294 	ovpn_nl_peer_float_notify(peer, &ss);
295 
296 	/* rehashing is required only in MP mode as P2P has one peer
297 	 * only and thus there is no hashtable
298 	 */
299 	if (peer->ovpn->mode == OVPN_MODE_MP) {
300 		spin_lock_bh(&peer->ovpn->lock);
301 		spin_lock_bh(&peer->lock);
302 		bind = rcu_dereference_protected(peer->bind,
303 						 lockdep_is_held(&peer->lock));
304 		if (unlikely(!bind)) {
305 			spin_unlock_bh(&peer->lock);
306 			spin_unlock_bh(&peer->ovpn->lock);
307 			return;
308 		}
309 
310 		/* This function may be invoked concurrently, therefore another
311 		 * float may have happened in parallel: perform rehashing
312 		 * using the peer->bind->remote directly as key
313 		 */
314 
315 		switch (bind->remote.in4.sin_family) {
316 		case AF_INET:
317 			salen = sizeof(*sa);
318 			break;
319 		case AF_INET6:
320 			salen = sizeof(*sa6);
321 			break;
322 		}
323 
324 		/* remove old hashing */
325 		hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
326 		/* re-add with new transport address */
327 		nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr,
328 					   &bind->remote, salen);
329 		hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
330 		spin_unlock_bh(&peer->lock);
331 		spin_unlock_bh(&peer->ovpn->lock);
332 	}
333 	return;
334 unlock:
335 	spin_unlock_bh(&peer->lock);
336 }
337 
338 /**
339  * ovpn_peer_release_rcu - RCU callback performing last peer release steps
340  * @head: RCU member of the ovpn_peer
341  */
342 static void ovpn_peer_release_rcu(struct rcu_head *head)
343 {
344 	struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu);
345 
346 	/* this call will immediately free the dst_cache, therefore we
347 	 * perform it in the RCU callback, when all contexts are done
348 	 */
349 	dst_cache_destroy(&peer->dst_cache);
350 	kfree(peer);
351 }
352 
353 /**
354  * ovpn_peer_release - release peer private members
355  * @peer: the peer to release
356  */
357 void ovpn_peer_release(struct ovpn_peer *peer)
358 {
359 	ovpn_crypto_state_release(&peer->crypto);
360 	spin_lock_bh(&peer->lock);
361 	ovpn_bind_reset(peer, NULL);
362 	spin_unlock_bh(&peer->lock);
363 	call_rcu(&peer->rcu, ovpn_peer_release_rcu);
364 	netdev_put(peer->ovpn->dev, &peer->dev_tracker);
365 }
366 
367 /**
368  * ovpn_peer_release_kref - callback for kref_put
369  * @kref: the kref object belonging to the peer
370  */
371 void ovpn_peer_release_kref(struct kref *kref)
372 {
373 	struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount);
374 
375 	ovpn_peer_release(peer);
376 }
377 
378 /**
379  * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address
380  * @skb: the packet to extract data from
381  * @ss: the sockaddr to fill
382  *
383  * Return: sockaddr length on success or -1 otherwise
384  */
385 static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb,
386 				     struct sockaddr_storage *ss)
387 {
388 	struct sockaddr_in6 *sa6;
389 	struct sockaddr_in *sa4;
390 
391 	switch (skb->protocol) {
392 	case htons(ETH_P_IP):
393 		sa4 = (struct sockaddr_in *)ss;
394 		sa4->sin_family = AF_INET;
395 		sa4->sin_addr.s_addr = ip_hdr(skb)->saddr;
396 		sa4->sin_port = udp_hdr(skb)->source;
397 		return sizeof(*sa4);
398 	case htons(ETH_P_IPV6):
399 		sa6 = (struct sockaddr_in6 *)ss;
400 		sa6->sin6_family = AF_INET6;
401 		sa6->sin6_addr = ipv6_hdr(skb)->saddr;
402 		sa6->sin6_port = udp_hdr(skb)->source;
403 		return sizeof(*sa6);
404 	}
405 
406 	return -1;
407 }
408 
409 /**
410  * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb
411  * @skb: the outgoing packet
412  *
413  * Return: the IPv4 of the nexthop
414  */
415 static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb)
416 {
417 	const struct rtable *rt = skb_rtable(skb);
418 
419 	if (rt && rt->rt_uses_gateway)
420 		return rt->rt_gw4;
421 
422 	return ip_hdr(skb)->daddr;
423 }
424 
425 /**
426  * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb
427  * @skb: the outgoing packet
428  *
429  * Return: the IPv6 of the nexthop
430  */
431 static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb)
432 {
433 	const struct rt6_info *rt = skb_rt6_info(skb);
434 
435 	if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
436 		return ipv6_hdr(skb)->daddr;
437 
438 	return rt->rt6i_gateway;
439 }
440 
441 /**
442  * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address
443  * @ovpn: the openvpn instance to search
444  * @addr: VPN IPv4 to use as search key
445  *
446  * Refcounter is not increased for the returned peer.
447  *
448  * Return: the peer if found or NULL otherwise
449  */
450 static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn,
451 						    __be32 addr)
452 {
453 	struct hlist_nulls_head *nhead;
454 	struct hlist_nulls_node *ntmp;
455 	struct ovpn_peer *tmp;
456 	unsigned int slot;
457 
458 begin:
459 	slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr,
460 				  sizeof(addr));
461 	nhead = &ovpn->peers->by_vpn_addr4[slot];
462 
463 	hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4)
464 		if (addr == tmp->vpn_addrs.ipv4.s_addr)
465 			return tmp;
466 
467 	/* item may have moved during lookup - check nulls and restart
468 	 * if that's the case
469 	 */
470 	if (get_nulls_value(ntmp) != slot)
471 		goto begin;
472 
473 	return NULL;
474 }
475 
476 /**
477  * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address
478  * @ovpn: the openvpn instance to search
479  * @addr: VPN IPv6 to use as search key
480  *
481  * Refcounter is not increased for the returned peer.
482  *
483  * Return: the peer if found or NULL otherwise
484  */
485 static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn,
486 						    struct in6_addr *addr)
487 {
488 	struct hlist_nulls_head *nhead;
489 	struct hlist_nulls_node *ntmp;
490 	struct ovpn_peer *tmp;
491 	unsigned int slot;
492 
493 begin:
494 	slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr,
495 				  sizeof(*addr));
496 	nhead = &ovpn->peers->by_vpn_addr6[slot];
497 
498 	hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6)
499 		if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6))
500 			return tmp;
501 
502 	/* item may have moved during lookup - check nulls and restart
503 	 * if that's the case
504 	 */
505 	if (get_nulls_value(ntmp) != slot)
506 		goto begin;
507 
508 	return NULL;
509 }
510 
511 /**
512  * ovpn_peer_transp_match - check if sockaddr and peer binding match
513  * @peer: the peer to get the binding from
514  * @ss: the sockaddr to match
515  *
516  * Return: true if sockaddr and binding match or false otherwise
517  */
518 static bool ovpn_peer_transp_match(const struct ovpn_peer *peer,
519 				   const struct sockaddr_storage *ss)
520 {
521 	struct ovpn_bind *bind = rcu_dereference(peer->bind);
522 	struct sockaddr_in6 *sa6;
523 	struct sockaddr_in *sa4;
524 
525 	if (unlikely(!bind))
526 		return false;
527 
528 	if (ss->ss_family != bind->remote.in4.sin_family)
529 		return false;
530 
531 	switch (ss->ss_family) {
532 	case AF_INET:
533 		sa4 = (struct sockaddr_in *)ss;
534 		if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr)
535 			return false;
536 		if (sa4->sin_port != bind->remote.in4.sin_port)
537 			return false;
538 		break;
539 	case AF_INET6:
540 		sa6 = (struct sockaddr_in6 *)ss;
541 		if (!ipv6_addr_equal(&sa6->sin6_addr,
542 				     &bind->remote.in6.sin6_addr))
543 			return false;
544 		if (sa6->sin6_port != bind->remote.in6.sin6_port)
545 			return false;
546 		break;
547 	default:
548 		return false;
549 	}
550 
551 	return true;
552 }
553 
554 /**
555  * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P
556  *                                    instance
557  * @ovpn: the openvpn instance to search
558  * @ss: the transport socket address
559  *
560  * Return: the peer if found or NULL otherwise
561  */
562 static struct ovpn_peer *
563 ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn,
564 				 struct sockaddr_storage *ss)
565 {
566 	struct ovpn_peer *tmp, *peer = NULL;
567 
568 	rcu_read_lock();
569 	tmp = rcu_dereference(ovpn->peer);
570 	if (likely(tmp && ovpn_peer_transp_match(tmp, ss) &&
571 		   ovpn_peer_hold(tmp)))
572 		peer = tmp;
573 	rcu_read_unlock();
574 
575 	return peer;
576 }
577 
578 /**
579  * ovpn_peer_get_by_transp_addr - retrieve peer by transport address
580  * @ovpn: the openvpn instance to search
581  * @skb: the skb to retrieve the source transport address from
582  *
583  * Return: a pointer to the peer if found or NULL otherwise
584  */
585 struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
586 					       struct sk_buff *skb)
587 {
588 	struct ovpn_peer *tmp, *peer = NULL;
589 	struct sockaddr_storage ss = { 0 };
590 	struct hlist_nulls_head *nhead;
591 	struct hlist_nulls_node *ntmp;
592 	unsigned int slot;
593 	ssize_t sa_len;
594 
595 	sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss);
596 	if (unlikely(sa_len < 0))
597 		return NULL;
598 
599 	if (ovpn->mode == OVPN_MODE_P2P)
600 		return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss);
601 
602 	rcu_read_lock();
603 begin:
604 	slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len);
605 	nhead = &ovpn->peers->by_transp_addr[slot];
606 
607 	hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead,
608 				       hash_entry_transp_addr) {
609 		if (!ovpn_peer_transp_match(tmp, &ss))
610 			continue;
611 
612 		if (!ovpn_peer_hold(tmp))
613 			continue;
614 
615 		peer = tmp;
616 		break;
617 	}
618 
619 	/* item may have moved during lookup - check nulls and restart
620 	 * if that's the case
621 	 */
622 	if (!peer && get_nulls_value(ntmp) != slot)
623 		goto begin;
624 	rcu_read_unlock();
625 
626 	return peer;
627 }
628 
629 /**
630  * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance
631  * @ovpn: the openvpn instance to search
632  * @peer_id: the ID of the peer to find
633  *
634  * Return: the peer if found or NULL otherwise
635  */
636 static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn,
637 						 u32 peer_id)
638 {
639 	struct ovpn_peer *tmp, *peer = NULL;
640 
641 	rcu_read_lock();
642 	tmp = rcu_dereference(ovpn->peer);
643 	if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp)))
644 		peer = tmp;
645 	rcu_read_unlock();
646 
647 	return peer;
648 }
649 
650 /**
651  * ovpn_peer_get_by_id - retrieve peer by ID
652  * @ovpn: the openvpn instance to search
653  * @peer_id: the unique peer identifier to match
654  *
655  * Return: a pointer to the peer if found or NULL otherwise
656  */
657 struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id)
658 {
659 	struct ovpn_peer *tmp, *peer = NULL;
660 	struct hlist_head *head;
661 
662 	if (ovpn->mode == OVPN_MODE_P2P)
663 		return ovpn_peer_get_by_id_p2p(ovpn, peer_id);
664 
665 	head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id,
666 				  sizeof(peer_id));
667 
668 	rcu_read_lock();
669 	hlist_for_each_entry_rcu(tmp, head, hash_entry_id) {
670 		if (tmp->id != peer_id)
671 			continue;
672 
673 		if (!ovpn_peer_hold(tmp))
674 			continue;
675 
676 		peer = tmp;
677 		break;
678 	}
679 	rcu_read_unlock();
680 
681 	return peer;
682 }
683 
684 static void ovpn_peer_remove(struct ovpn_peer *peer,
685 			     enum ovpn_del_peer_reason reason,
686 			     struct llist_head *release_list)
687 {
688 	lockdep_assert_held(&peer->ovpn->lock);
689 
690 	switch (peer->ovpn->mode) {
691 	case OVPN_MODE_MP:
692 		/* prevent double remove */
693 		if (hlist_unhashed(&peer->hash_entry_id))
694 			return;
695 
696 		hlist_del_init_rcu(&peer->hash_entry_id);
697 		hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
698 		hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
699 		hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
700 		break;
701 	case OVPN_MODE_P2P:
702 		/* prevent double remove */
703 		if (peer != rcu_access_pointer(peer->ovpn->peer))
704 			return;
705 
706 		RCU_INIT_POINTER(peer->ovpn->peer, NULL);
707 		/* in P2P mode the carrier is switched off when the peer is
708 		 * deleted so that third party protocols can react accordingly
709 		 */
710 		netif_carrier_off(peer->ovpn->dev);
711 		break;
712 	}
713 
714 	peer->delete_reason = reason;
715 	ovpn_nl_peer_del_notify(peer);
716 
717 	/* append to provided list for later socket release and ref drop */
718 	llist_add(&peer->release_entry, release_list);
719 }
720 
721 /**
722  * ovpn_peer_get_by_dst - Lookup peer to send skb to
723  * @ovpn: the private data representing the current VPN session
724  * @skb: the skb to extract the destination address from
725  *
726  * This function takes a tunnel packet and looks up the peer to send it to
727  * after encapsulation. The skb is expected to be the in-tunnel packet, without
728  * any OpenVPN related header.
729  *
730  * Assume that the IP header is accessible in the skb data.
731  *
732  * Return: the peer if found or NULL otherwise.
733  */
734 struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
735 				       struct sk_buff *skb)
736 {
737 	struct ovpn_peer *peer = NULL;
738 	struct in6_addr addr6;
739 	__be32 addr4;
740 
741 	/* in P2P mode, no matter the destination, packets are always sent to
742 	 * the single peer listening on the other side
743 	 */
744 	if (ovpn->mode == OVPN_MODE_P2P) {
745 		rcu_read_lock();
746 		peer = rcu_dereference(ovpn->peer);
747 		if (unlikely(peer && !ovpn_peer_hold(peer)))
748 			peer = NULL;
749 		rcu_read_unlock();
750 		return peer;
751 	}
752 
753 	rcu_read_lock();
754 	switch (skb->protocol) {
755 	case htons(ETH_P_IP):
756 		addr4 = ovpn_nexthop_from_skb4(skb);
757 		peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4);
758 		break;
759 	case htons(ETH_P_IPV6):
760 		addr6 = ovpn_nexthop_from_skb6(skb);
761 		peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6);
762 		break;
763 	}
764 
765 	if (unlikely(peer && !ovpn_peer_hold(peer)))
766 		peer = NULL;
767 	rcu_read_unlock();
768 
769 	return peer;
770 }
771 
772 /**
773  * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination
774  * @ovpn: the private data representing the current VPN session
775  * @dest: the destination to be looked up
776  *
777  * Looks up in the IPv4 system routing table the IP of the nexthop to be used
778  * to reach the destination passed as argument. If no nexthop can be found, the
779  * destination itself is returned as it probably has to be used as nexthop.
780  *
781  * Return: the IP of the next hop if found or dest itself otherwise
782  */
783 static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest)
784 {
785 	struct rtable *rt;
786 	struct flowi4 fl = {
787 		.daddr = dest
788 	};
789 
790 	rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL);
791 	if (IS_ERR(rt)) {
792 		net_dbg_ratelimited("%s: no route to host %pI4\n",
793 				    netdev_name(ovpn->dev), &dest);
794 		/* if we end up here this packet is probably going to be
795 		 * thrown away later
796 		 */
797 		return dest;
798 	}
799 
800 	if (!rt->rt_uses_gateway)
801 		goto out;
802 
803 	dest = rt->rt_gw4;
804 out:
805 	ip_rt_put(rt);
806 	return dest;
807 }
808 
809 /**
810  * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination
811  * @ovpn: the private data representing the current VPN session
812  * @dest: the destination to be looked up
813  *
814  * Looks up in the IPv6 system routing table the IP of the nexthop to be used
815  * to reach the destination passed as argument. If no nexthop can be found, the
816  * destination itself is returned as it probably has to be used as nexthop.
817  *
818  * Return: the IP of the next hop if found or dest itself otherwise
819  */
820 static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn,
821 					     struct in6_addr dest)
822 {
823 #if IS_ENABLED(CONFIG_IPV6)
824 	struct dst_entry *entry;
825 	struct rt6_info *rt;
826 	struct flowi6 fl = {
827 		.daddr = dest,
828 	};
829 
830 	entry = ip6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl, NULL);
831 	if (IS_ERR(entry)) {
832 		net_dbg_ratelimited("%s: no route to host %pI6c\n",
833 				    netdev_name(ovpn->dev), &dest);
834 		/* if we end up here this packet is probably going to be
835 		 * thrown away later
836 		 */
837 		return dest;
838 	}
839 
840 	rt = dst_rt6_info(entry);
841 
842 	if (!(rt->rt6i_flags & RTF_GATEWAY))
843 		goto out;
844 
845 	dest = rt->rt6i_gateway;
846 out:
847 	dst_release((struct dst_entry *)rt);
848 #endif
849 	return dest;
850 }
851 
852 /**
853  * ovpn_peer_check_by_src - check that skb source is routed via peer
854  * @ovpn: the openvpn instance to search
855  * @skb: the packet to extract source address from
856  * @peer: the peer to check against the source address
857  *
858  * Return: true if the peer is matching or false otherwise
859  */
860 bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
861 			    struct ovpn_peer *peer)
862 {
863 	bool match = false;
864 	struct in6_addr addr6;
865 	__be32 addr4;
866 
867 	if (ovpn->mode == OVPN_MODE_P2P) {
868 		/* in P2P mode, no matter the destination, packets are always
869 		 * sent to the single peer listening on the other side
870 		 */
871 		return peer == rcu_access_pointer(ovpn->peer);
872 	}
873 
874 	/* This function performs a reverse path check, therefore we now
875 	 * lookup the nexthop we would use if we wanted to route a packet
876 	 * to the source IP. If the nexthop matches the sender we know the
877 	 * latter is valid and we allow the packet to come in
878 	 */
879 
880 	switch (skb->protocol) {
881 	case htons(ETH_P_IP):
882 		addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr);
883 		rcu_read_lock();
884 		match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4));
885 		rcu_read_unlock();
886 		break;
887 	case htons(ETH_P_IPV6):
888 		addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr);
889 		rcu_read_lock();
890 		match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6));
891 		rcu_read_unlock();
892 		break;
893 	}
894 
895 	return match;
896 }
897 
898 void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer)
899 {
900 	struct hlist_nulls_head *nhead;
901 
902 	lockdep_assert_held(&peer->ovpn->lock);
903 
904 	/* rehashing makes sense only in multipeer mode */
905 	if (peer->ovpn->mode != OVPN_MODE_MP)
906 		return;
907 
908 	if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
909 		/* remove potential old hashing */
910 		hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
911 
912 		nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4,
913 					   &peer->vpn_addrs.ipv4,
914 					   sizeof(peer->vpn_addrs.ipv4));
915 		hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead);
916 	}
917 
918 	if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) {
919 		/* remove potential old hashing */
920 		hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
921 
922 		nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6,
923 					   &peer->vpn_addrs.ipv6,
924 					   sizeof(peer->vpn_addrs.ipv6));
925 		hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead);
926 	}
927 }
928 
929 /**
930  * ovpn_peer_add_mp - add peer to related tables in a MP instance
931  * @ovpn: the instance to add the peer to
932  * @peer: the peer to add
933  *
934  * Return: 0 on success or a negative error code otherwise
935  */
936 static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
937 {
938 	struct sockaddr_storage sa = { 0 };
939 	struct hlist_nulls_head *nhead;
940 	struct sockaddr_in6 *sa6;
941 	struct sockaddr_in *sa4;
942 	struct ovpn_bind *bind;
943 	struct ovpn_peer *tmp;
944 	size_t salen;
945 	int ret = 0;
946 
947 	spin_lock_bh(&ovpn->lock);
948 	/* do not add duplicates */
949 	tmp = ovpn_peer_get_by_id(ovpn, peer->id);
950 	if (tmp) {
951 		ovpn_peer_put(tmp);
952 		ret = -EEXIST;
953 		goto out;
954 	}
955 
956 	bind = rcu_dereference_protected(peer->bind, true);
957 	/* peers connected via TCP have bind == NULL */
958 	if (bind) {
959 		switch (bind->remote.in4.sin_family) {
960 		case AF_INET:
961 			sa4 = (struct sockaddr_in *)&sa;
962 
963 			sa4->sin_family = AF_INET;
964 			sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr;
965 			sa4->sin_port = bind->remote.in4.sin_port;
966 			salen = sizeof(*sa4);
967 			break;
968 		case AF_INET6:
969 			sa6 = (struct sockaddr_in6 *)&sa;
970 
971 			sa6->sin6_family = AF_INET6;
972 			sa6->sin6_addr = bind->remote.in6.sin6_addr;
973 			sa6->sin6_port = bind->remote.in6.sin6_port;
974 			salen = sizeof(*sa6);
975 			break;
976 		default:
977 			ret = -EPROTONOSUPPORT;
978 			goto out;
979 		}
980 
981 		nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa,
982 					   salen);
983 		hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
984 	}
985 
986 	hlist_add_head_rcu(&peer->hash_entry_id,
987 			   ovpn_get_hash_head(ovpn->peers->by_id, &peer->id,
988 					      sizeof(peer->id)));
989 
990 	ovpn_peer_hash_vpn_ip(peer);
991 out:
992 	spin_unlock_bh(&ovpn->lock);
993 	return ret;
994 }
995 
996 /**
997  * ovpn_peer_add_p2p - add peer to related tables in a P2P instance
998  * @ovpn: the instance to add the peer to
999  * @peer: the peer to add
1000  *
1001  * Return: 0 on success or a negative error code otherwise
1002  */
1003 static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
1004 {
1005 	LLIST_HEAD(release_list);
1006 	struct ovpn_peer *tmp;
1007 
1008 	spin_lock_bh(&ovpn->lock);
1009 	/* in p2p mode it is possible to have a single peer only, therefore the
1010 	 * old one is released and substituted by the new one
1011 	 */
1012 	tmp = rcu_dereference_protected(ovpn->peer,
1013 					lockdep_is_held(&ovpn->lock));
1014 	if (tmp)
1015 		ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN,
1016 				 &release_list);
1017 
1018 	rcu_assign_pointer(ovpn->peer, peer);
1019 	/* in P2P mode the carrier is switched on when the peer is added */
1020 	netif_carrier_on(ovpn->dev);
1021 	unlock_ovpn(ovpn, &release_list);
1022 
1023 	return 0;
1024 }
1025 
1026 /**
1027  * ovpn_peer_add - add peer to the related tables
1028  * @ovpn: the openvpn instance the peer belongs to
1029  * @peer: the peer object to add
1030  *
1031  * Assume refcounter was increased by caller
1032  *
1033  * Return: 0 on success or a negative error code otherwise
1034  */
1035 int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
1036 {
1037 	switch (ovpn->mode) {
1038 	case OVPN_MODE_MP:
1039 		return ovpn_peer_add_mp(ovpn, peer);
1040 	case OVPN_MODE_P2P:
1041 		return ovpn_peer_add_p2p(ovpn, peer);
1042 	}
1043 
1044 	return -EOPNOTSUPP;
1045 }
1046 
1047 /**
1048  * ovpn_peer_del_mp - delete peer from related tables in a MP instance
1049  * @peer: the peer to delete
1050  * @reason: reason why the peer was deleted (sent to userspace)
1051  * @release_list: list where delete peer should be appended
1052  *
1053  * Return: 0 on success or a negative error code otherwise
1054  */
1055 static int ovpn_peer_del_mp(struct ovpn_peer *peer,
1056 			    enum ovpn_del_peer_reason reason,
1057 			    struct llist_head *release_list)
1058 {
1059 	struct ovpn_peer *tmp;
1060 	int ret = -ENOENT;
1061 
1062 	lockdep_assert_held(&peer->ovpn->lock);
1063 
1064 	tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id);
1065 	if (tmp == peer) {
1066 		ovpn_peer_remove(peer, reason, release_list);
1067 		ret = 0;
1068 	}
1069 
1070 	if (tmp)
1071 		ovpn_peer_put(tmp);
1072 
1073 	return ret;
1074 }
1075 
1076 /**
1077  * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance
1078  * @peer: the peer to delete
1079  * @reason: reason why the peer was deleted (sent to userspace)
1080  * @release_list: list where delete peer should be appended
1081  *
1082  * Return: 0 on success or a negative error code otherwise
1083  */
1084 static int ovpn_peer_del_p2p(struct ovpn_peer *peer,
1085 			     enum ovpn_del_peer_reason reason,
1086 			     struct llist_head *release_list)
1087 {
1088 	struct ovpn_peer *tmp;
1089 
1090 	lockdep_assert_held(&peer->ovpn->lock);
1091 
1092 	tmp = rcu_dereference_protected(peer->ovpn->peer,
1093 					lockdep_is_held(&peer->ovpn->lock));
1094 	if (tmp != peer)
1095 		return -ENOENT;
1096 
1097 	ovpn_peer_remove(peer, reason, release_list);
1098 
1099 	return 0;
1100 }
1101 
1102 /**
1103  * ovpn_peer_del - delete peer from related tables
1104  * @peer: the peer object to delete
1105  * @reason: reason for deleting peer (will be sent to userspace)
1106  *
1107  * Return: 0 on success or a negative error code otherwise
1108  */
1109 int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason)
1110 {
1111 	LLIST_HEAD(release_list);
1112 	int ret = -EOPNOTSUPP;
1113 
1114 	spin_lock_bh(&peer->ovpn->lock);
1115 	switch (peer->ovpn->mode) {
1116 	case OVPN_MODE_MP:
1117 		ret = ovpn_peer_del_mp(peer, reason, &release_list);
1118 		break;
1119 	case OVPN_MODE_P2P:
1120 		ret = ovpn_peer_del_p2p(peer, reason, &release_list);
1121 		break;
1122 	default:
1123 		break;
1124 	}
1125 	unlock_ovpn(peer->ovpn, &release_list);
1126 
1127 	return ret;
1128 }
1129 
1130 /**
1131  * ovpn_peer_release_p2p - release peer upon P2P device teardown
1132  * @ovpn: the instance being torn down
1133  * @sk: if not NULL, release peer only if it's using this specific socket
1134  * @reason: the reason for releasing the peer
1135  */
1136 static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
1137 				  enum ovpn_del_peer_reason reason)
1138 {
1139 	struct ovpn_socket *ovpn_sock;
1140 	LLIST_HEAD(release_list);
1141 	struct ovpn_peer *peer;
1142 
1143 	spin_lock_bh(&ovpn->lock);
1144 	peer = rcu_dereference_protected(ovpn->peer,
1145 					 lockdep_is_held(&ovpn->lock));
1146 	if (!peer) {
1147 		spin_unlock_bh(&ovpn->lock);
1148 		return;
1149 	}
1150 
1151 	if (sk) {
1152 		ovpn_sock = rcu_access_pointer(peer->sock);
1153 		if (!ovpn_sock || ovpn_sock->sk != sk) {
1154 			spin_unlock_bh(&ovpn->lock);
1155 			ovpn_peer_put(peer);
1156 			return;
1157 		}
1158 	}
1159 
1160 	ovpn_peer_remove(peer, reason, &release_list);
1161 	unlock_ovpn(ovpn, &release_list);
1162 }
1163 
1164 static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
1165 				  enum ovpn_del_peer_reason reason)
1166 {
1167 	struct ovpn_socket *ovpn_sock;
1168 	LLIST_HEAD(release_list);
1169 	struct ovpn_peer *peer;
1170 	struct hlist_node *tmp;
1171 	int bkt;
1172 
1173 	spin_lock_bh(&ovpn->lock);
1174 	hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
1175 		bool remove = true;
1176 
1177 		/* if a socket was passed as argument, skip all peers except
1178 		 * those using it
1179 		 */
1180 		if (sk) {
1181 			rcu_read_lock();
1182 			ovpn_sock = rcu_dereference(peer->sock);
1183 			remove = ovpn_sock && ovpn_sock->sk == sk;
1184 			rcu_read_unlock();
1185 		}
1186 
1187 		if (remove)
1188 			ovpn_peer_remove(peer, reason, &release_list);
1189 	}
1190 	unlock_ovpn(ovpn, &release_list);
1191 }
1192 
1193 /**
1194  * ovpn_peers_free - free all peers in the instance
1195  * @ovpn: the instance whose peers should be released
1196  * @sk: if not NULL, only peers using this socket are removed and the socket
1197  *      is released immediately
1198  * @reason: the reason for releasing all peers
1199  */
1200 void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk,
1201 		     enum ovpn_del_peer_reason reason)
1202 {
1203 	switch (ovpn->mode) {
1204 	case OVPN_MODE_P2P:
1205 		ovpn_peer_release_p2p(ovpn, sk, reason);
1206 		break;
1207 	case OVPN_MODE_MP:
1208 		ovpn_peers_release_mp(ovpn, sk, reason);
1209 		break;
1210 	}
1211 }
1212 
1213 static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer,
1214 						time64_t now,
1215 						struct llist_head *release_list)
1216 {
1217 	time64_t last_recv, last_sent, next_run1, next_run2;
1218 	unsigned long timeout, interval;
1219 	bool expired;
1220 
1221 	spin_lock_bh(&peer->lock);
1222 	/* we expect both timers to be configured at the same time,
1223 	 * therefore bail out if either is not set
1224 	 */
1225 	if (!peer->keepalive_timeout || !peer->keepalive_interval) {
1226 		spin_unlock_bh(&peer->lock);
1227 		return 0;
1228 	}
1229 
1230 	/* check for peer timeout */
1231 	expired = false;
1232 	timeout = peer->keepalive_timeout;
1233 	last_recv = READ_ONCE(peer->last_recv);
1234 	if (now < last_recv + timeout) {
1235 		peer->keepalive_recv_exp = last_recv + timeout;
1236 		next_run1 = peer->keepalive_recv_exp;
1237 	} else if (peer->keepalive_recv_exp > now) {
1238 		next_run1 = peer->keepalive_recv_exp;
1239 	} else {
1240 		expired = true;
1241 	}
1242 
1243 	if (expired) {
1244 		/* peer is dead -> kill it and move on */
1245 		spin_unlock_bh(&peer->lock);
1246 		netdev_dbg(peer->ovpn->dev, "peer %u expired\n",
1247 			   peer->id);
1248 		ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED,
1249 				 release_list);
1250 		return 0;
1251 	}
1252 
1253 	/* check for peer keepalive */
1254 	expired = false;
1255 	interval = peer->keepalive_interval;
1256 	last_sent = READ_ONCE(peer->last_sent);
1257 	if (now < last_sent + interval) {
1258 		peer->keepalive_xmit_exp = last_sent + interval;
1259 		next_run2 = peer->keepalive_xmit_exp;
1260 	} else if (peer->keepalive_xmit_exp > now) {
1261 		next_run2 = peer->keepalive_xmit_exp;
1262 	} else {
1263 		expired = true;
1264 		next_run2 = now + interval;
1265 	}
1266 	spin_unlock_bh(&peer->lock);
1267 
1268 	if (expired) {
1269 		/* a keepalive packet is required */
1270 		netdev_dbg(peer->ovpn->dev,
1271 			   "sending keepalive to peer %u\n",
1272 			   peer->id);
1273 		if (schedule_work(&peer->keepalive_work))
1274 			ovpn_peer_hold(peer);
1275 	}
1276 
1277 	if (next_run1 < next_run2)
1278 		return next_run1;
1279 
1280 	return next_run2;
1281 }
1282 
1283 static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn,
1284 					    time64_t now,
1285 					    struct llist_head *release_list)
1286 {
1287 	time64_t tmp_next_run, next_run = 0;
1288 	struct hlist_node *tmp;
1289 	struct ovpn_peer *peer;
1290 	int bkt;
1291 
1292 	lockdep_assert_held(&ovpn->lock);
1293 
1294 	hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
1295 		tmp_next_run = ovpn_peer_keepalive_work_single(peer, now,
1296 							       release_list);
1297 		if (!tmp_next_run)
1298 			continue;
1299 
1300 		/* the next worker run will be scheduled based on the shortest
1301 		 * required interval across all peers
1302 		 */
1303 		if (!next_run || tmp_next_run < next_run)
1304 			next_run = tmp_next_run;
1305 	}
1306 
1307 	return next_run;
1308 }
1309 
1310 static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn,
1311 					     time64_t now,
1312 					     struct llist_head *release_list)
1313 {
1314 	struct ovpn_peer *peer;
1315 	time64_t next_run = 0;
1316 
1317 	lockdep_assert_held(&ovpn->lock);
1318 
1319 	peer = rcu_dereference_protected(ovpn->peer,
1320 					 lockdep_is_held(&ovpn->lock));
1321 	if (peer)
1322 		next_run = ovpn_peer_keepalive_work_single(peer, now,
1323 							   release_list);
1324 
1325 	return next_run;
1326 }
1327 
1328 /**
1329  * ovpn_peer_keepalive_work - run keepalive logic on each known peer
1330  * @work: pointer to the work member of the related ovpn object
1331  *
1332  * Each peer has two timers (if configured):
1333  * 1. peer timeout: when no data is received for a certain interval,
1334  *    the peer is considered dead and it gets killed.
1335  * 2. peer keepalive: when no data is sent to a certain peer for a
1336  *    certain interval, a special 'keepalive' packet is explicitly sent.
1337  *
1338  * This function iterates across the whole peer collection while
1339  * checking the timers described above.
1340  */
1341 void ovpn_peer_keepalive_work(struct work_struct *work)
1342 {
1343 	struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv,
1344 					      keepalive_work.work);
1345 	time64_t next_run = 0, now = ktime_get_real_seconds();
1346 	LLIST_HEAD(release_list);
1347 
1348 	spin_lock_bh(&ovpn->lock);
1349 	switch (ovpn->mode) {
1350 	case OVPN_MODE_MP:
1351 		next_run = ovpn_peer_keepalive_work_mp(ovpn, now,
1352 						       &release_list);
1353 		break;
1354 	case OVPN_MODE_P2P:
1355 		next_run = ovpn_peer_keepalive_work_p2p(ovpn, now,
1356 							&release_list);
1357 		break;
1358 	}
1359 
1360 	/* prevent rearming if the interface is being destroyed */
1361 	if (next_run > 0) {
1362 		netdev_dbg(ovpn->dev,
1363 			   "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n",
1364 			   next_run, now, next_run - now);
1365 		schedule_delayed_work(&ovpn->keepalive_work,
1366 				      (next_run - now) * HZ);
1367 	}
1368 	unlock_ovpn(ovpn, &release_list);
1369 }
1370