1 // SPDX-License-Identifier: GPL-2.0
2 /* OpenVPN data channel offload
3 *
4 * Copyright (C) 2020-2025 OpenVPN, Inc.
5 *
6 * Author: James Yonan <james@openvpn.net>
7 * Antonio Quartulli <antonio@openvpn.net>
8 */
9
10 #include <linux/skbuff.h>
11 #include <linux/list.h>
12 #include <linux/hashtable.h>
13 #include <net/ip6_route.h>
14
15 #include "ovpnpriv.h"
16 #include "bind.h"
17 #include "pktid.h"
18 #include "crypto.h"
19 #include "io.h"
20 #include "main.h"
21 #include "netlink.h"
22 #include "peer.h"
23 #include "socket.h"
24
unlock_ovpn(struct ovpn_priv * ovpn,struct llist_head * release_list)25 static void unlock_ovpn(struct ovpn_priv *ovpn,
26 struct llist_head *release_list)
27 __releases(&ovpn->lock)
28 {
29 struct ovpn_peer *peer;
30
31 spin_unlock_bh(&ovpn->lock);
32
33 llist_for_each_entry(peer, release_list->first, release_entry) {
34 ovpn_socket_release(peer);
35 ovpn_peer_put(peer);
36 }
37 }
38
39 /**
40 * ovpn_peer_keepalive_set - configure keepalive values for peer
41 * @peer: the peer to configure
42 * @interval: outgoing keepalive interval
43 * @timeout: incoming keepalive timeout
44 */
ovpn_peer_keepalive_set(struct ovpn_peer * peer,u32 interval,u32 timeout)45 void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout)
46 {
47 time64_t now = ktime_get_real_seconds();
48
49 netdev_dbg(peer->ovpn->dev,
50 "scheduling keepalive for peer %u: interval=%u timeout=%u\n",
51 peer->id, interval, timeout);
52
53 peer->keepalive_interval = interval;
54 WRITE_ONCE(peer->last_sent, now);
55 peer->keepalive_xmit_exp = now + interval;
56
57 peer->keepalive_timeout = timeout;
58 WRITE_ONCE(peer->last_recv, now);
59 peer->keepalive_recv_exp = now + timeout;
60
61 /* now that interval and timeout have been changed, kick
62 * off the worker so that the next delay can be recomputed
63 */
64 mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0);
65 }
66
67 /**
68 * ovpn_peer_keepalive_send - periodic worker sending keepalive packets
69 * @work: pointer to the work member of the related peer object
70 *
71 * NOTE: the reference to peer is not dropped because it gets inherited
72 * by ovpn_xmit_special()
73 */
ovpn_peer_keepalive_send(struct work_struct * work)74 static void ovpn_peer_keepalive_send(struct work_struct *work)
75 {
76 struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
77 keepalive_work);
78
79 local_bh_disable();
80 ovpn_xmit_special(peer, ovpn_keepalive_message,
81 sizeof(ovpn_keepalive_message));
82 local_bh_enable();
83 }
84
85 /**
86 * ovpn_peer_new - allocate and initialize a new peer object
87 * @ovpn: the openvpn instance inside which the peer should be created
88 * @id: the ID assigned to this peer
89 *
90 * Return: a pointer to the new peer on success or an error code otherwise
91 */
ovpn_peer_new(struct ovpn_priv * ovpn,u32 id)92 struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id)
93 {
94 struct ovpn_peer *peer;
95 int ret;
96
97 /* alloc and init peer object */
98 peer = kzalloc(sizeof(*peer), GFP_KERNEL);
99 if (!peer)
100 return ERR_PTR(-ENOMEM);
101
102 peer->id = id;
103 peer->ovpn = ovpn;
104
105 peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY);
106 peer->vpn_addrs.ipv6 = in6addr_any;
107
108 RCU_INIT_POINTER(peer->bind, NULL);
109 ovpn_crypto_state_init(&peer->crypto);
110 spin_lock_init(&peer->lock);
111 kref_init(&peer->refcount);
112 ovpn_peer_stats_init(&peer->vpn_stats);
113 ovpn_peer_stats_init(&peer->link_stats);
114 INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send);
115
116 ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL);
117 if (ret < 0) {
118 netdev_err(ovpn->dev,
119 "cannot initialize dst cache for peer %u\n",
120 peer->id);
121 kfree(peer);
122 return ERR_PTR(ret);
123 }
124
125 netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL);
126
127 return peer;
128 }
129
130 /**
131 * ovpn_peer_reset_sockaddr - recreate binding for peer
132 * @peer: peer to recreate the binding for
133 * @ss: sockaddr to use as remote endpoint for the binding
134 * @local_ip: local IP for the binding
135 *
136 * Return: 0 on success or a negative error code otherwise
137 */
ovpn_peer_reset_sockaddr(struct ovpn_peer * peer,const struct sockaddr_storage * ss,const void * local_ip)138 int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
139 const struct sockaddr_storage *ss,
140 const void *local_ip)
141 {
142 struct ovpn_bind *bind;
143 size_t ip_len;
144
145 lockdep_assert_held(&peer->lock);
146
147 /* create new ovpn_bind object */
148 bind = ovpn_bind_from_sockaddr(ss);
149 if (IS_ERR(bind))
150 return PTR_ERR(bind);
151
152 if (local_ip) {
153 if (ss->ss_family == AF_INET) {
154 ip_len = sizeof(struct in_addr);
155 } else if (ss->ss_family == AF_INET6) {
156 ip_len = sizeof(struct in6_addr);
157 } else {
158 net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n",
159 netdev_name(peer->ovpn->dev),
160 ss->ss_family, peer->id);
161 kfree(bind);
162 return -EINVAL;
163 }
164
165 memcpy(&bind->local, local_ip, ip_len);
166 }
167
168 /* set binding */
169 ovpn_bind_reset(peer, bind);
170
171 return 0;
172 }
173
174 /* variable name __tbl2 needs to be different from __tbl1
175 * in the macro below to avoid confusing clang
176 */
177 #define ovpn_get_hash_slot(_tbl, _key, _key_len) ({ \
178 typeof(_tbl) *__tbl2 = &(_tbl); \
179 jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2); \
180 })
181
182 #define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \
183 typeof(_tbl) *__tbl1 = &(_tbl); \
184 &(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\
185 })
186
187 /**
188 * ovpn_peer_endpoints_update - update remote or local endpoint for peer
189 * @peer: peer to update the remote endpoint for
190 * @skb: incoming packet to retrieve the source/destination address from
191 */
ovpn_peer_endpoints_update(struct ovpn_peer * peer,struct sk_buff * skb)192 void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb)
193 {
194 struct hlist_nulls_head *nhead;
195 struct sockaddr_storage ss;
196 struct sockaddr_in6 *sa6;
197 bool reset_cache = false;
198 struct sockaddr_in *sa;
199 struct ovpn_bind *bind;
200 const void *local_ip;
201 size_t salen = 0;
202
203 spin_lock_bh(&peer->lock);
204 bind = rcu_dereference_protected(peer->bind,
205 lockdep_is_held(&peer->lock));
206 if (unlikely(!bind))
207 goto unlock;
208
209 switch (skb->protocol) {
210 case htons(ETH_P_IP):
211 /* float check */
212 if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
213 /* unconditionally save local endpoint in case
214 * of float, as it may have changed as well
215 */
216 local_ip = &ip_hdr(skb)->daddr;
217 sa = (struct sockaddr_in *)&ss;
218 sa->sin_family = AF_INET;
219 sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
220 sa->sin_port = udp_hdr(skb)->source;
221 salen = sizeof(*sa);
222 reset_cache = true;
223 break;
224 }
225
226 /* if no float happened, let's double check if the local endpoint
227 * has changed
228 */
229 if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) {
230 net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n",
231 netdev_name(peer->ovpn->dev),
232 peer->id, &bind->local.ipv4.s_addr,
233 &ip_hdr(skb)->daddr);
234 bind->local.ipv4.s_addr = ip_hdr(skb)->daddr;
235 reset_cache = true;
236 }
237 break;
238 case htons(ETH_P_IPV6):
239 /* float check */
240 if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
241 /* unconditionally save local endpoint in case
242 * of float, as it may have changed as well
243 */
244 local_ip = &ipv6_hdr(skb)->daddr;
245 sa6 = (struct sockaddr_in6 *)&ss;
246 sa6->sin6_family = AF_INET6;
247 sa6->sin6_addr = ipv6_hdr(skb)->saddr;
248 sa6->sin6_port = udp_hdr(skb)->source;
249 sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr,
250 skb->skb_iif);
251 salen = sizeof(*sa6);
252 reset_cache = true;
253 break;
254 }
255
256 /* if no float happened, let's double check if the local endpoint
257 * has changed
258 */
259 if (unlikely(!ipv6_addr_equal(&bind->local.ipv6,
260 &ipv6_hdr(skb)->daddr))) {
261 net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c)\n",
262 netdev_name(peer->ovpn->dev),
263 peer->id, &bind->local.ipv6,
264 &ipv6_hdr(skb)->daddr);
265 bind->local.ipv6 = ipv6_hdr(skb)->daddr;
266 reset_cache = true;
267 }
268 break;
269 default:
270 goto unlock;
271 }
272
273 if (unlikely(reset_cache))
274 dst_cache_reset(&peer->dst_cache);
275
276 /* if the peer did not float, we can bail out now */
277 if (likely(!salen))
278 goto unlock;
279
280 if (unlikely(ovpn_peer_reset_sockaddr(peer,
281 (struct sockaddr_storage *)&ss,
282 local_ip) < 0))
283 goto unlock;
284
285 net_dbg_ratelimited("%s: peer %d floated to %pIScp",
286 netdev_name(peer->ovpn->dev), peer->id, &ss);
287
288 spin_unlock_bh(&peer->lock);
289
290 /* rehashing is required only in MP mode as P2P has one peer
291 * only and thus there is no hashtable
292 */
293 if (peer->ovpn->mode == OVPN_MODE_MP) {
294 spin_lock_bh(&peer->ovpn->lock);
295 spin_lock_bh(&peer->lock);
296 bind = rcu_dereference_protected(peer->bind,
297 lockdep_is_held(&peer->lock));
298 if (unlikely(!bind)) {
299 spin_unlock_bh(&peer->lock);
300 spin_unlock_bh(&peer->ovpn->lock);
301 return;
302 }
303
304 /* This function may be invoked concurrently, therefore another
305 * float may have happened in parallel: perform rehashing
306 * using the peer->bind->remote directly as key
307 */
308
309 switch (bind->remote.in4.sin_family) {
310 case AF_INET:
311 salen = sizeof(*sa);
312 break;
313 case AF_INET6:
314 salen = sizeof(*sa6);
315 break;
316 }
317
318 /* remove old hashing */
319 hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
320 /* re-add with new transport address */
321 nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr,
322 &bind->remote, salen);
323 hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
324 spin_unlock_bh(&peer->lock);
325 spin_unlock_bh(&peer->ovpn->lock);
326 }
327 return;
328 unlock:
329 spin_unlock_bh(&peer->lock);
330 }
331
332 /**
333 * ovpn_peer_release_rcu - RCU callback performing last peer release steps
334 * @head: RCU member of the ovpn_peer
335 */
ovpn_peer_release_rcu(struct rcu_head * head)336 static void ovpn_peer_release_rcu(struct rcu_head *head)
337 {
338 struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu);
339
340 /* this call will immediately free the dst_cache, therefore we
341 * perform it in the RCU callback, when all contexts are done
342 */
343 dst_cache_destroy(&peer->dst_cache);
344 kfree(peer);
345 }
346
347 /**
348 * ovpn_peer_release - release peer private members
349 * @peer: the peer to release
350 */
ovpn_peer_release(struct ovpn_peer * peer)351 void ovpn_peer_release(struct ovpn_peer *peer)
352 {
353 ovpn_crypto_state_release(&peer->crypto);
354 spin_lock_bh(&peer->lock);
355 ovpn_bind_reset(peer, NULL);
356 spin_unlock_bh(&peer->lock);
357 call_rcu(&peer->rcu, ovpn_peer_release_rcu);
358 netdev_put(peer->ovpn->dev, &peer->dev_tracker);
359 }
360
361 /**
362 * ovpn_peer_release_kref - callback for kref_put
363 * @kref: the kref object belonging to the peer
364 */
ovpn_peer_release_kref(struct kref * kref)365 void ovpn_peer_release_kref(struct kref *kref)
366 {
367 struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount);
368
369 ovpn_peer_release(peer);
370 }
371
372 /**
373 * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address
374 * @skb: the packet to extract data from
375 * @ss: the sockaddr to fill
376 *
377 * Return: sockaddr length on success or -1 otherwise
378 */
ovpn_peer_skb_to_sockaddr(struct sk_buff * skb,struct sockaddr_storage * ss)379 static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb,
380 struct sockaddr_storage *ss)
381 {
382 struct sockaddr_in6 *sa6;
383 struct sockaddr_in *sa4;
384
385 switch (skb->protocol) {
386 case htons(ETH_P_IP):
387 sa4 = (struct sockaddr_in *)ss;
388 sa4->sin_family = AF_INET;
389 sa4->sin_addr.s_addr = ip_hdr(skb)->saddr;
390 sa4->sin_port = udp_hdr(skb)->source;
391 return sizeof(*sa4);
392 case htons(ETH_P_IPV6):
393 sa6 = (struct sockaddr_in6 *)ss;
394 sa6->sin6_family = AF_INET6;
395 sa6->sin6_addr = ipv6_hdr(skb)->saddr;
396 sa6->sin6_port = udp_hdr(skb)->source;
397 return sizeof(*sa6);
398 }
399
400 return -1;
401 }
402
403 /**
404 * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb
405 * @skb: the outgoing packet
406 *
407 * Return: the IPv4 of the nexthop
408 */
ovpn_nexthop_from_skb4(struct sk_buff * skb)409 static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb)
410 {
411 const struct rtable *rt = skb_rtable(skb);
412
413 if (rt && rt->rt_uses_gateway)
414 return rt->rt_gw4;
415
416 return ip_hdr(skb)->daddr;
417 }
418
419 /**
420 * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb
421 * @skb: the outgoing packet
422 *
423 * Return: the IPv6 of the nexthop
424 */
ovpn_nexthop_from_skb6(struct sk_buff * skb)425 static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb)
426 {
427 const struct rt6_info *rt = skb_rt6_info(skb);
428
429 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
430 return ipv6_hdr(skb)->daddr;
431
432 return rt->rt6i_gateway;
433 }
434
435 /**
436 * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address
437 * @ovpn: the openvpn instance to search
438 * @addr: VPN IPv4 to use as search key
439 *
440 * Refcounter is not increased for the returned peer.
441 *
442 * Return: the peer if found or NULL otherwise
443 */
ovpn_peer_get_by_vpn_addr4(struct ovpn_priv * ovpn,__be32 addr)444 static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn,
445 __be32 addr)
446 {
447 struct hlist_nulls_head *nhead;
448 struct hlist_nulls_node *ntmp;
449 struct ovpn_peer *tmp;
450 unsigned int slot;
451
452 begin:
453 slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr,
454 sizeof(addr));
455 nhead = &ovpn->peers->by_vpn_addr4[slot];
456
457 hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4)
458 if (addr == tmp->vpn_addrs.ipv4.s_addr)
459 return tmp;
460
461 /* item may have moved during lookup - check nulls and restart
462 * if that's the case
463 */
464 if (get_nulls_value(ntmp) != slot)
465 goto begin;
466
467 return NULL;
468 }
469
470 /**
471 * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address
472 * @ovpn: the openvpn instance to search
473 * @addr: VPN IPv6 to use as search key
474 *
475 * Refcounter is not increased for the returned peer.
476 *
477 * Return: the peer if found or NULL otherwise
478 */
ovpn_peer_get_by_vpn_addr6(struct ovpn_priv * ovpn,struct in6_addr * addr)479 static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn,
480 struct in6_addr *addr)
481 {
482 struct hlist_nulls_head *nhead;
483 struct hlist_nulls_node *ntmp;
484 struct ovpn_peer *tmp;
485 unsigned int slot;
486
487 begin:
488 slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr,
489 sizeof(*addr));
490 nhead = &ovpn->peers->by_vpn_addr6[slot];
491
492 hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6)
493 if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6))
494 return tmp;
495
496 /* item may have moved during lookup - check nulls and restart
497 * if that's the case
498 */
499 if (get_nulls_value(ntmp) != slot)
500 goto begin;
501
502 return NULL;
503 }
504
505 /**
506 * ovpn_peer_transp_match - check if sockaddr and peer binding match
507 * @peer: the peer to get the binding from
508 * @ss: the sockaddr to match
509 *
510 * Return: true if sockaddr and binding match or false otherwise
511 */
ovpn_peer_transp_match(const struct ovpn_peer * peer,const struct sockaddr_storage * ss)512 static bool ovpn_peer_transp_match(const struct ovpn_peer *peer,
513 const struct sockaddr_storage *ss)
514 {
515 struct ovpn_bind *bind = rcu_dereference(peer->bind);
516 struct sockaddr_in6 *sa6;
517 struct sockaddr_in *sa4;
518
519 if (unlikely(!bind))
520 return false;
521
522 if (ss->ss_family != bind->remote.in4.sin_family)
523 return false;
524
525 switch (ss->ss_family) {
526 case AF_INET:
527 sa4 = (struct sockaddr_in *)ss;
528 if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr)
529 return false;
530 if (sa4->sin_port != bind->remote.in4.sin_port)
531 return false;
532 break;
533 case AF_INET6:
534 sa6 = (struct sockaddr_in6 *)ss;
535 if (!ipv6_addr_equal(&sa6->sin6_addr,
536 &bind->remote.in6.sin6_addr))
537 return false;
538 if (sa6->sin6_port != bind->remote.in6.sin6_port)
539 return false;
540 break;
541 default:
542 return false;
543 }
544
545 return true;
546 }
547
548 /**
549 * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P
550 * instance
551 * @ovpn: the openvpn instance to search
552 * @ss: the transport socket address
553 *
554 * Return: the peer if found or NULL otherwise
555 */
556 static struct ovpn_peer *
ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv * ovpn,struct sockaddr_storage * ss)557 ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn,
558 struct sockaddr_storage *ss)
559 {
560 struct ovpn_peer *tmp, *peer = NULL;
561
562 rcu_read_lock();
563 tmp = rcu_dereference(ovpn->peer);
564 if (likely(tmp && ovpn_peer_transp_match(tmp, ss) &&
565 ovpn_peer_hold(tmp)))
566 peer = tmp;
567 rcu_read_unlock();
568
569 return peer;
570 }
571
572 /**
573 * ovpn_peer_get_by_transp_addr - retrieve peer by transport address
574 * @ovpn: the openvpn instance to search
575 * @skb: the skb to retrieve the source transport address from
576 *
577 * Return: a pointer to the peer if found or NULL otherwise
578 */
ovpn_peer_get_by_transp_addr(struct ovpn_priv * ovpn,struct sk_buff * skb)579 struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
580 struct sk_buff *skb)
581 {
582 struct ovpn_peer *tmp, *peer = NULL;
583 struct sockaddr_storage ss = { 0 };
584 struct hlist_nulls_head *nhead;
585 struct hlist_nulls_node *ntmp;
586 unsigned int slot;
587 ssize_t sa_len;
588
589 sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss);
590 if (unlikely(sa_len < 0))
591 return NULL;
592
593 if (ovpn->mode == OVPN_MODE_P2P)
594 return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss);
595
596 rcu_read_lock();
597 begin:
598 slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len);
599 nhead = &ovpn->peers->by_transp_addr[slot];
600
601 hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead,
602 hash_entry_transp_addr) {
603 if (!ovpn_peer_transp_match(tmp, &ss))
604 continue;
605
606 if (!ovpn_peer_hold(tmp))
607 continue;
608
609 peer = tmp;
610 break;
611 }
612
613 /* item may have moved during lookup - check nulls and restart
614 * if that's the case
615 */
616 if (!peer && get_nulls_value(ntmp) != slot)
617 goto begin;
618 rcu_read_unlock();
619
620 return peer;
621 }
622
623 /**
624 * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance
625 * @ovpn: the openvpn instance to search
626 * @peer_id: the ID of the peer to find
627 *
628 * Return: the peer if found or NULL otherwise
629 */
ovpn_peer_get_by_id_p2p(struct ovpn_priv * ovpn,u32 peer_id)630 static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn,
631 u32 peer_id)
632 {
633 struct ovpn_peer *tmp, *peer = NULL;
634
635 rcu_read_lock();
636 tmp = rcu_dereference(ovpn->peer);
637 if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp)))
638 peer = tmp;
639 rcu_read_unlock();
640
641 return peer;
642 }
643
644 /**
645 * ovpn_peer_get_by_id - retrieve peer by ID
646 * @ovpn: the openvpn instance to search
647 * @peer_id: the unique peer identifier to match
648 *
649 * Return: a pointer to the peer if found or NULL otherwise
650 */
ovpn_peer_get_by_id(struct ovpn_priv * ovpn,u32 peer_id)651 struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id)
652 {
653 struct ovpn_peer *tmp, *peer = NULL;
654 struct hlist_head *head;
655
656 if (ovpn->mode == OVPN_MODE_P2P)
657 return ovpn_peer_get_by_id_p2p(ovpn, peer_id);
658
659 head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id,
660 sizeof(peer_id));
661
662 rcu_read_lock();
663 hlist_for_each_entry_rcu(tmp, head, hash_entry_id) {
664 if (tmp->id != peer_id)
665 continue;
666
667 if (!ovpn_peer_hold(tmp))
668 continue;
669
670 peer = tmp;
671 break;
672 }
673 rcu_read_unlock();
674
675 return peer;
676 }
677
ovpn_peer_remove(struct ovpn_peer * peer,enum ovpn_del_peer_reason reason,struct llist_head * release_list)678 static void ovpn_peer_remove(struct ovpn_peer *peer,
679 enum ovpn_del_peer_reason reason,
680 struct llist_head *release_list)
681 {
682 lockdep_assert_held(&peer->ovpn->lock);
683
684 switch (peer->ovpn->mode) {
685 case OVPN_MODE_MP:
686 /* prevent double remove */
687 if (hlist_unhashed(&peer->hash_entry_id))
688 return;
689
690 hlist_del_init_rcu(&peer->hash_entry_id);
691 hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
692 hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
693 hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
694 break;
695 case OVPN_MODE_P2P:
696 /* prevent double remove */
697 if (peer != rcu_access_pointer(peer->ovpn->peer))
698 return;
699
700 RCU_INIT_POINTER(peer->ovpn->peer, NULL);
701 /* in P2P mode the carrier is switched off when the peer is
702 * deleted so that third party protocols can react accordingly
703 */
704 netif_carrier_off(peer->ovpn->dev);
705 break;
706 }
707
708 peer->delete_reason = reason;
709 ovpn_nl_peer_del_notify(peer);
710
711 /* append to provided list for later socket release and ref drop */
712 llist_add(&peer->release_entry, release_list);
713 }
714
715 /**
716 * ovpn_peer_get_by_dst - Lookup peer to send skb to
717 * @ovpn: the private data representing the current VPN session
718 * @skb: the skb to extract the destination address from
719 *
720 * This function takes a tunnel packet and looks up the peer to send it to
721 * after encapsulation. The skb is expected to be the in-tunnel packet, without
722 * any OpenVPN related header.
723 *
724 * Assume that the IP header is accessible in the skb data.
725 *
726 * Return: the peer if found or NULL otherwise.
727 */
ovpn_peer_get_by_dst(struct ovpn_priv * ovpn,struct sk_buff * skb)728 struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
729 struct sk_buff *skb)
730 {
731 struct ovpn_peer *peer = NULL;
732 struct in6_addr addr6;
733 __be32 addr4;
734
735 /* in P2P mode, no matter the destination, packets are always sent to
736 * the single peer listening on the other side
737 */
738 if (ovpn->mode == OVPN_MODE_P2P) {
739 rcu_read_lock();
740 peer = rcu_dereference(ovpn->peer);
741 if (unlikely(peer && !ovpn_peer_hold(peer)))
742 peer = NULL;
743 rcu_read_unlock();
744 return peer;
745 }
746
747 rcu_read_lock();
748 switch (skb->protocol) {
749 case htons(ETH_P_IP):
750 addr4 = ovpn_nexthop_from_skb4(skb);
751 peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4);
752 break;
753 case htons(ETH_P_IPV6):
754 addr6 = ovpn_nexthop_from_skb6(skb);
755 peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6);
756 break;
757 }
758
759 if (unlikely(peer && !ovpn_peer_hold(peer)))
760 peer = NULL;
761 rcu_read_unlock();
762
763 return peer;
764 }
765
766 /**
767 * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination
768 * @ovpn: the private data representing the current VPN session
769 * @dest: the destination to be looked up
770 *
771 * Looks up in the IPv4 system routing table the IP of the nexthop to be used
772 * to reach the destination passed as argument. If no nexthop can be found, the
773 * destination itself is returned as it probably has to be used as nexthop.
774 *
775 * Return: the IP of the next hop if found or dest itself otherwise
776 */
ovpn_nexthop_from_rt4(struct ovpn_priv * ovpn,__be32 dest)777 static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest)
778 {
779 struct rtable *rt;
780 struct flowi4 fl = {
781 .daddr = dest
782 };
783
784 rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL);
785 if (IS_ERR(rt)) {
786 net_dbg_ratelimited("%s: no route to host %pI4\n",
787 netdev_name(ovpn->dev), &dest);
788 /* if we end up here this packet is probably going to be
789 * thrown away later
790 */
791 return dest;
792 }
793
794 if (!rt->rt_uses_gateway)
795 goto out;
796
797 dest = rt->rt_gw4;
798 out:
799 ip_rt_put(rt);
800 return dest;
801 }
802
803 /**
804 * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination
805 * @ovpn: the private data representing the current VPN session
806 * @dest: the destination to be looked up
807 *
808 * Looks up in the IPv6 system routing table the IP of the nexthop to be used
809 * to reach the destination passed as argument. If no nexthop can be found, the
810 * destination itself is returned as it probably has to be used as nexthop.
811 *
812 * Return: the IP of the next hop if found or dest itself otherwise
813 */
ovpn_nexthop_from_rt6(struct ovpn_priv * ovpn,struct in6_addr dest)814 static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn,
815 struct in6_addr dest)
816 {
817 #if IS_ENABLED(CONFIG_IPV6)
818 struct dst_entry *entry;
819 struct rt6_info *rt;
820 struct flowi6 fl = {
821 .daddr = dest,
822 };
823
824 entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl,
825 NULL);
826 if (IS_ERR(entry)) {
827 net_dbg_ratelimited("%s: no route to host %pI6c\n",
828 netdev_name(ovpn->dev), &dest);
829 /* if we end up here this packet is probably going to be
830 * thrown away later
831 */
832 return dest;
833 }
834
835 rt = dst_rt6_info(entry);
836
837 if (!(rt->rt6i_flags & RTF_GATEWAY))
838 goto out;
839
840 dest = rt->rt6i_gateway;
841 out:
842 dst_release((struct dst_entry *)rt);
843 #endif
844 return dest;
845 }
846
847 /**
848 * ovpn_peer_check_by_src - check that skb source is routed via peer
849 * @ovpn: the openvpn instance to search
850 * @skb: the packet to extract source address from
851 * @peer: the peer to check against the source address
852 *
853 * Return: true if the peer is matching or false otherwise
854 */
ovpn_peer_check_by_src(struct ovpn_priv * ovpn,struct sk_buff * skb,struct ovpn_peer * peer)855 bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
856 struct ovpn_peer *peer)
857 {
858 bool match = false;
859 struct in6_addr addr6;
860 __be32 addr4;
861
862 if (ovpn->mode == OVPN_MODE_P2P) {
863 /* in P2P mode, no matter the destination, packets are always
864 * sent to the single peer listening on the other side
865 */
866 return peer == rcu_access_pointer(ovpn->peer);
867 }
868
869 /* This function performs a reverse path check, therefore we now
870 * lookup the nexthop we would use if we wanted to route a packet
871 * to the source IP. If the nexthop matches the sender we know the
872 * latter is valid and we allow the packet to come in
873 */
874
875 switch (skb->protocol) {
876 case htons(ETH_P_IP):
877 addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr);
878 rcu_read_lock();
879 match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4));
880 rcu_read_unlock();
881 break;
882 case htons(ETH_P_IPV6):
883 addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr);
884 rcu_read_lock();
885 match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6));
886 rcu_read_unlock();
887 break;
888 }
889
890 return match;
891 }
892
ovpn_peer_hash_vpn_ip(struct ovpn_peer * peer)893 void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer)
894 {
895 struct hlist_nulls_head *nhead;
896
897 lockdep_assert_held(&peer->ovpn->lock);
898
899 /* rehashing makes sense only in multipeer mode */
900 if (peer->ovpn->mode != OVPN_MODE_MP)
901 return;
902
903 if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
904 /* remove potential old hashing */
905 hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
906
907 nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4,
908 &peer->vpn_addrs.ipv4,
909 sizeof(peer->vpn_addrs.ipv4));
910 hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead);
911 }
912
913 if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) {
914 /* remove potential old hashing */
915 hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
916
917 nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6,
918 &peer->vpn_addrs.ipv6,
919 sizeof(peer->vpn_addrs.ipv6));
920 hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead);
921 }
922 }
923
924 /**
925 * ovpn_peer_add_mp - add peer to related tables in a MP instance
926 * @ovpn: the instance to add the peer to
927 * @peer: the peer to add
928 *
929 * Return: 0 on success or a negative error code otherwise
930 */
ovpn_peer_add_mp(struct ovpn_priv * ovpn,struct ovpn_peer * peer)931 static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
932 {
933 struct sockaddr_storage sa = { 0 };
934 struct hlist_nulls_head *nhead;
935 struct sockaddr_in6 *sa6;
936 struct sockaddr_in *sa4;
937 struct ovpn_bind *bind;
938 struct ovpn_peer *tmp;
939 size_t salen;
940 int ret = 0;
941
942 spin_lock_bh(&ovpn->lock);
943 /* do not add duplicates */
944 tmp = ovpn_peer_get_by_id(ovpn, peer->id);
945 if (tmp) {
946 ovpn_peer_put(tmp);
947 ret = -EEXIST;
948 goto out;
949 }
950
951 bind = rcu_dereference_protected(peer->bind, true);
952 /* peers connected via TCP have bind == NULL */
953 if (bind) {
954 switch (bind->remote.in4.sin_family) {
955 case AF_INET:
956 sa4 = (struct sockaddr_in *)&sa;
957
958 sa4->sin_family = AF_INET;
959 sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr;
960 sa4->sin_port = bind->remote.in4.sin_port;
961 salen = sizeof(*sa4);
962 break;
963 case AF_INET6:
964 sa6 = (struct sockaddr_in6 *)&sa;
965
966 sa6->sin6_family = AF_INET6;
967 sa6->sin6_addr = bind->remote.in6.sin6_addr;
968 sa6->sin6_port = bind->remote.in6.sin6_port;
969 salen = sizeof(*sa6);
970 break;
971 default:
972 ret = -EPROTONOSUPPORT;
973 goto out;
974 }
975
976 nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa,
977 salen);
978 hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
979 }
980
981 hlist_add_head_rcu(&peer->hash_entry_id,
982 ovpn_get_hash_head(ovpn->peers->by_id, &peer->id,
983 sizeof(peer->id)));
984
985 ovpn_peer_hash_vpn_ip(peer);
986 out:
987 spin_unlock_bh(&ovpn->lock);
988 return ret;
989 }
990
991 /**
992 * ovpn_peer_add_p2p - add peer to related tables in a P2P instance
993 * @ovpn: the instance to add the peer to
994 * @peer: the peer to add
995 *
996 * Return: 0 on success or a negative error code otherwise
997 */
ovpn_peer_add_p2p(struct ovpn_priv * ovpn,struct ovpn_peer * peer)998 static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
999 {
1000 LLIST_HEAD(release_list);
1001 struct ovpn_peer *tmp;
1002
1003 spin_lock_bh(&ovpn->lock);
1004 /* in p2p mode it is possible to have a single peer only, therefore the
1005 * old one is released and substituted by the new one
1006 */
1007 tmp = rcu_dereference_protected(ovpn->peer,
1008 lockdep_is_held(&ovpn->lock));
1009 if (tmp)
1010 ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN,
1011 &release_list);
1012
1013 rcu_assign_pointer(ovpn->peer, peer);
1014 /* in P2P mode the carrier is switched on when the peer is added */
1015 netif_carrier_on(ovpn->dev);
1016 unlock_ovpn(ovpn, &release_list);
1017
1018 return 0;
1019 }
1020
1021 /**
1022 * ovpn_peer_add - add peer to the related tables
1023 * @ovpn: the openvpn instance the peer belongs to
1024 * @peer: the peer object to add
1025 *
1026 * Assume refcounter was increased by caller
1027 *
1028 * Return: 0 on success or a negative error code otherwise
1029 */
ovpn_peer_add(struct ovpn_priv * ovpn,struct ovpn_peer * peer)1030 int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
1031 {
1032 switch (ovpn->mode) {
1033 case OVPN_MODE_MP:
1034 return ovpn_peer_add_mp(ovpn, peer);
1035 case OVPN_MODE_P2P:
1036 return ovpn_peer_add_p2p(ovpn, peer);
1037 }
1038
1039 return -EOPNOTSUPP;
1040 }
1041
1042 /**
1043 * ovpn_peer_del_mp - delete peer from related tables in a MP instance
1044 * @peer: the peer to delete
1045 * @reason: reason why the peer was deleted (sent to userspace)
1046 * @release_list: list where delete peer should be appended
1047 *
1048 * Return: 0 on success or a negative error code otherwise
1049 */
ovpn_peer_del_mp(struct ovpn_peer * peer,enum ovpn_del_peer_reason reason,struct llist_head * release_list)1050 static int ovpn_peer_del_mp(struct ovpn_peer *peer,
1051 enum ovpn_del_peer_reason reason,
1052 struct llist_head *release_list)
1053 {
1054 struct ovpn_peer *tmp;
1055 int ret = -ENOENT;
1056
1057 lockdep_assert_held(&peer->ovpn->lock);
1058
1059 tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id);
1060 if (tmp == peer) {
1061 ovpn_peer_remove(peer, reason, release_list);
1062 ret = 0;
1063 }
1064
1065 if (tmp)
1066 ovpn_peer_put(tmp);
1067
1068 return ret;
1069 }
1070
1071 /**
1072 * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance
1073 * @peer: the peer to delete
1074 * @reason: reason why the peer was deleted (sent to userspace)
1075 * @release_list: list where delete peer should be appended
1076 *
1077 * Return: 0 on success or a negative error code otherwise
1078 */
ovpn_peer_del_p2p(struct ovpn_peer * peer,enum ovpn_del_peer_reason reason,struct llist_head * release_list)1079 static int ovpn_peer_del_p2p(struct ovpn_peer *peer,
1080 enum ovpn_del_peer_reason reason,
1081 struct llist_head *release_list)
1082 {
1083 struct ovpn_peer *tmp;
1084
1085 lockdep_assert_held(&peer->ovpn->lock);
1086
1087 tmp = rcu_dereference_protected(peer->ovpn->peer,
1088 lockdep_is_held(&peer->ovpn->lock));
1089 if (tmp != peer)
1090 return -ENOENT;
1091
1092 ovpn_peer_remove(peer, reason, release_list);
1093
1094 return 0;
1095 }
1096
1097 /**
1098 * ovpn_peer_del - delete peer from related tables
1099 * @peer: the peer object to delete
1100 * @reason: reason for deleting peer (will be sent to userspace)
1101 *
1102 * Return: 0 on success or a negative error code otherwise
1103 */
ovpn_peer_del(struct ovpn_peer * peer,enum ovpn_del_peer_reason reason)1104 int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason)
1105 {
1106 LLIST_HEAD(release_list);
1107 int ret = -EOPNOTSUPP;
1108
1109 spin_lock_bh(&peer->ovpn->lock);
1110 switch (peer->ovpn->mode) {
1111 case OVPN_MODE_MP:
1112 ret = ovpn_peer_del_mp(peer, reason, &release_list);
1113 break;
1114 case OVPN_MODE_P2P:
1115 ret = ovpn_peer_del_p2p(peer, reason, &release_list);
1116 break;
1117 default:
1118 break;
1119 }
1120 unlock_ovpn(peer->ovpn, &release_list);
1121
1122 return ret;
1123 }
1124
1125 /**
1126 * ovpn_peer_release_p2p - release peer upon P2P device teardown
1127 * @ovpn: the instance being torn down
1128 * @sk: if not NULL, release peer only if it's using this specific socket
1129 * @reason: the reason for releasing the peer
1130 */
ovpn_peer_release_p2p(struct ovpn_priv * ovpn,struct sock * sk,enum ovpn_del_peer_reason reason)1131 static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
1132 enum ovpn_del_peer_reason reason)
1133 {
1134 struct ovpn_socket *ovpn_sock;
1135 LLIST_HEAD(release_list);
1136 struct ovpn_peer *peer;
1137
1138 spin_lock_bh(&ovpn->lock);
1139 peer = rcu_dereference_protected(ovpn->peer,
1140 lockdep_is_held(&ovpn->lock));
1141 if (!peer) {
1142 spin_unlock_bh(&ovpn->lock);
1143 return;
1144 }
1145
1146 if (sk) {
1147 ovpn_sock = rcu_access_pointer(peer->sock);
1148 if (!ovpn_sock || ovpn_sock->sk != sk) {
1149 spin_unlock_bh(&ovpn->lock);
1150 ovpn_peer_put(peer);
1151 return;
1152 }
1153 }
1154
1155 ovpn_peer_remove(peer, reason, &release_list);
1156 unlock_ovpn(ovpn, &release_list);
1157 }
1158
ovpn_peers_release_mp(struct ovpn_priv * ovpn,struct sock * sk,enum ovpn_del_peer_reason reason)1159 static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
1160 enum ovpn_del_peer_reason reason)
1161 {
1162 struct ovpn_socket *ovpn_sock;
1163 LLIST_HEAD(release_list);
1164 struct ovpn_peer *peer;
1165 struct hlist_node *tmp;
1166 int bkt;
1167
1168 spin_lock_bh(&ovpn->lock);
1169 hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
1170 bool remove = true;
1171
1172 /* if a socket was passed as argument, skip all peers except
1173 * those using it
1174 */
1175 if (sk) {
1176 rcu_read_lock();
1177 ovpn_sock = rcu_dereference(peer->sock);
1178 remove = ovpn_sock && ovpn_sock->sk == sk;
1179 rcu_read_unlock();
1180 }
1181
1182 if (remove)
1183 ovpn_peer_remove(peer, reason, &release_list);
1184 }
1185 unlock_ovpn(ovpn, &release_list);
1186 }
1187
1188 /**
1189 * ovpn_peers_free - free all peers in the instance
1190 * @ovpn: the instance whose peers should be released
1191 * @sk: if not NULL, only peers using this socket are removed and the socket
1192 * is released immediately
1193 * @reason: the reason for releasing all peers
1194 */
ovpn_peers_free(struct ovpn_priv * ovpn,struct sock * sk,enum ovpn_del_peer_reason reason)1195 void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk,
1196 enum ovpn_del_peer_reason reason)
1197 {
1198 switch (ovpn->mode) {
1199 case OVPN_MODE_P2P:
1200 ovpn_peer_release_p2p(ovpn, sk, reason);
1201 break;
1202 case OVPN_MODE_MP:
1203 ovpn_peers_release_mp(ovpn, sk, reason);
1204 break;
1205 }
1206 }
1207
ovpn_peer_keepalive_work_single(struct ovpn_peer * peer,time64_t now,struct llist_head * release_list)1208 static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer,
1209 time64_t now,
1210 struct llist_head *release_list)
1211 {
1212 time64_t last_recv, last_sent, next_run1, next_run2;
1213 unsigned long timeout, interval;
1214 bool expired;
1215
1216 spin_lock_bh(&peer->lock);
1217 /* we expect both timers to be configured at the same time,
1218 * therefore bail out if either is not set
1219 */
1220 if (!peer->keepalive_timeout || !peer->keepalive_interval) {
1221 spin_unlock_bh(&peer->lock);
1222 return 0;
1223 }
1224
1225 /* check for peer timeout */
1226 expired = false;
1227 timeout = peer->keepalive_timeout;
1228 last_recv = READ_ONCE(peer->last_recv);
1229 if (now < last_recv + timeout) {
1230 peer->keepalive_recv_exp = last_recv + timeout;
1231 next_run1 = peer->keepalive_recv_exp;
1232 } else if (peer->keepalive_recv_exp > now) {
1233 next_run1 = peer->keepalive_recv_exp;
1234 } else {
1235 expired = true;
1236 }
1237
1238 if (expired) {
1239 /* peer is dead -> kill it and move on */
1240 spin_unlock_bh(&peer->lock);
1241 netdev_dbg(peer->ovpn->dev, "peer %u expired\n",
1242 peer->id);
1243 ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED,
1244 release_list);
1245 return 0;
1246 }
1247
1248 /* check for peer keepalive */
1249 expired = false;
1250 interval = peer->keepalive_interval;
1251 last_sent = READ_ONCE(peer->last_sent);
1252 if (now < last_sent + interval) {
1253 peer->keepalive_xmit_exp = last_sent + interval;
1254 next_run2 = peer->keepalive_xmit_exp;
1255 } else if (peer->keepalive_xmit_exp > now) {
1256 next_run2 = peer->keepalive_xmit_exp;
1257 } else {
1258 expired = true;
1259 next_run2 = now + interval;
1260 }
1261 spin_unlock_bh(&peer->lock);
1262
1263 if (expired) {
1264 /* a keepalive packet is required */
1265 netdev_dbg(peer->ovpn->dev,
1266 "sending keepalive to peer %u\n",
1267 peer->id);
1268 if (schedule_work(&peer->keepalive_work))
1269 ovpn_peer_hold(peer);
1270 }
1271
1272 if (next_run1 < next_run2)
1273 return next_run1;
1274
1275 return next_run2;
1276 }
1277
ovpn_peer_keepalive_work_mp(struct ovpn_priv * ovpn,time64_t now,struct llist_head * release_list)1278 static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn,
1279 time64_t now,
1280 struct llist_head *release_list)
1281 {
1282 time64_t tmp_next_run, next_run = 0;
1283 struct hlist_node *tmp;
1284 struct ovpn_peer *peer;
1285 int bkt;
1286
1287 lockdep_assert_held(&ovpn->lock);
1288
1289 hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
1290 tmp_next_run = ovpn_peer_keepalive_work_single(peer, now,
1291 release_list);
1292 if (!tmp_next_run)
1293 continue;
1294
1295 /* the next worker run will be scheduled based on the shortest
1296 * required interval across all peers
1297 */
1298 if (!next_run || tmp_next_run < next_run)
1299 next_run = tmp_next_run;
1300 }
1301
1302 return next_run;
1303 }
1304
ovpn_peer_keepalive_work_p2p(struct ovpn_priv * ovpn,time64_t now,struct llist_head * release_list)1305 static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn,
1306 time64_t now,
1307 struct llist_head *release_list)
1308 {
1309 struct ovpn_peer *peer;
1310 time64_t next_run = 0;
1311
1312 lockdep_assert_held(&ovpn->lock);
1313
1314 peer = rcu_dereference_protected(ovpn->peer,
1315 lockdep_is_held(&ovpn->lock));
1316 if (peer)
1317 next_run = ovpn_peer_keepalive_work_single(peer, now,
1318 release_list);
1319
1320 return next_run;
1321 }
1322
1323 /**
1324 * ovpn_peer_keepalive_work - run keepalive logic on each known peer
1325 * @work: pointer to the work member of the related ovpn object
1326 *
1327 * Each peer has two timers (if configured):
1328 * 1. peer timeout: when no data is received for a certain interval,
1329 * the peer is considered dead and it gets killed.
1330 * 2. peer keepalive: when no data is sent to a certain peer for a
1331 * certain interval, a special 'keepalive' packet is explicitly sent.
1332 *
1333 * This function iterates across the whole peer collection while
1334 * checking the timers described above.
1335 */
ovpn_peer_keepalive_work(struct work_struct * work)1336 void ovpn_peer_keepalive_work(struct work_struct *work)
1337 {
1338 struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv,
1339 keepalive_work.work);
1340 time64_t next_run = 0, now = ktime_get_real_seconds();
1341 LLIST_HEAD(release_list);
1342
1343 spin_lock_bh(&ovpn->lock);
1344 switch (ovpn->mode) {
1345 case OVPN_MODE_MP:
1346 next_run = ovpn_peer_keepalive_work_mp(ovpn, now,
1347 &release_list);
1348 break;
1349 case OVPN_MODE_P2P:
1350 next_run = ovpn_peer_keepalive_work_p2p(ovpn, now,
1351 &release_list);
1352 break;
1353 }
1354
1355 /* prevent rearming if the interface is being destroyed */
1356 if (next_run > 0) {
1357 netdev_dbg(ovpn->dev,
1358 "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n",
1359 next_run, now, next_run - now);
1360 schedule_delayed_work(&ovpn->keepalive_work,
1361 (next_run - now) * HZ);
1362 }
1363 unlock_ovpn(ovpn, &release_list);
1364 }
1365