1 // SPDX-License-Identifier: GPL-2.0 2 /* OpenVPN data channel offload 3 * 4 * Copyright (C) 2020-2025 OpenVPN, Inc. 5 * 6 * Author: James Yonan <james@openvpn.net> 7 * Antonio Quartulli <antonio@openvpn.net> 8 */ 9 10 #include <linux/skbuff.h> 11 #include <linux/list.h> 12 #include <linux/hashtable.h> 13 #include <net/ip6_route.h> 14 15 #include "ovpnpriv.h" 16 #include "bind.h" 17 #include "pktid.h" 18 #include "crypto.h" 19 #include "io.h" 20 #include "main.h" 21 #include "netlink.h" 22 #include "peer.h" 23 #include "socket.h" 24 25 static void unlock_ovpn(struct ovpn_priv *ovpn, 26 struct llist_head *release_list) 27 __releases(&ovpn->lock) 28 { 29 struct ovpn_peer *peer; 30 31 spin_unlock_bh(&ovpn->lock); 32 33 llist_for_each_entry(peer, release_list->first, release_entry) { 34 ovpn_socket_release(peer); 35 ovpn_peer_put(peer); 36 } 37 } 38 39 /** 40 * ovpn_peer_keepalive_set - configure keepalive values for peer 41 * @peer: the peer to configure 42 * @interval: outgoing keepalive interval 43 * @timeout: incoming keepalive timeout 44 */ 45 void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout) 46 { 47 time64_t now = ktime_get_real_seconds(); 48 49 netdev_dbg(peer->ovpn->dev, 50 "scheduling keepalive for peer %u: interval=%u timeout=%u\n", 51 peer->id, interval, timeout); 52 53 peer->keepalive_interval = interval; 54 WRITE_ONCE(peer->last_sent, now); 55 peer->keepalive_xmit_exp = now + interval; 56 57 peer->keepalive_timeout = timeout; 58 WRITE_ONCE(peer->last_recv, now); 59 peer->keepalive_recv_exp = now + timeout; 60 61 /* now that interval and timeout have been changed, kick 62 * off the worker so that the next delay can be recomputed 63 */ 64 mod_delayed_work(system_percpu_wq, &peer->ovpn->keepalive_work, 0); 65 } 66 67 /** 68 * ovpn_peer_keepalive_send - periodic worker sending keepalive packets 69 * @work: pointer to the work member of the related peer object 70 * 71 * NOTE: the reference to peer is not dropped because it gets inherited 72 * by ovpn_xmit_special() 73 */ 74 static void ovpn_peer_keepalive_send(struct work_struct *work) 75 { 76 struct ovpn_peer *peer = container_of(work, struct ovpn_peer, 77 keepalive_work); 78 79 local_bh_disable(); 80 ovpn_xmit_special(peer, ovpn_keepalive_message, 81 sizeof(ovpn_keepalive_message)); 82 local_bh_enable(); 83 } 84 85 /** 86 * ovpn_peer_new - allocate and initialize a new peer object 87 * @ovpn: the openvpn instance inside which the peer should be created 88 * @id: the ID assigned to this peer 89 * 90 * Return: a pointer to the new peer on success or an error code otherwise 91 */ 92 struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id) 93 { 94 struct ovpn_peer *peer; 95 int ret; 96 97 /* alloc and init peer object */ 98 peer = kzalloc_obj(*peer); 99 if (!peer) 100 return ERR_PTR(-ENOMEM); 101 102 peer->id = id; 103 peer->ovpn = ovpn; 104 105 peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY); 106 peer->vpn_addrs.ipv6 = in6addr_any; 107 108 RCU_INIT_POINTER(peer->bind, NULL); 109 ovpn_crypto_state_init(&peer->crypto); 110 spin_lock_init(&peer->lock); 111 kref_init(&peer->refcount); 112 ovpn_peer_stats_init(&peer->vpn_stats); 113 ovpn_peer_stats_init(&peer->link_stats); 114 INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send); 115 116 ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL); 117 if (ret < 0) { 118 netdev_err(ovpn->dev, 119 "cannot initialize dst cache for peer %u\n", 120 peer->id); 121 kfree(peer); 122 return ERR_PTR(ret); 123 } 124 125 netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL); 126 127 return peer; 128 } 129 130 /** 131 * ovpn_peer_reset_sockaddr - recreate binding for peer 132 * @peer: peer to recreate the binding for 133 * @ss: sockaddr to use as remote endpoint for the binding 134 * @local_ip: local IP for the binding 135 * 136 * Return: 0 on success or a negative error code otherwise 137 */ 138 int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer, 139 const struct sockaddr_storage *ss, 140 const void *local_ip) 141 { 142 struct ovpn_bind *bind; 143 size_t ip_len; 144 145 lockdep_assert_held(&peer->lock); 146 147 /* create new ovpn_bind object */ 148 bind = ovpn_bind_from_sockaddr(ss); 149 if (IS_ERR(bind)) 150 return PTR_ERR(bind); 151 152 if (local_ip) { 153 if (ss->ss_family == AF_INET) { 154 ip_len = sizeof(struct in_addr); 155 } else if (ss->ss_family == AF_INET6) { 156 ip_len = sizeof(struct in6_addr); 157 } else { 158 net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n", 159 netdev_name(peer->ovpn->dev), 160 ss->ss_family, peer->id); 161 kfree(bind); 162 return -EINVAL; 163 } 164 165 memcpy(&bind->local, local_ip, ip_len); 166 } 167 168 /* set binding */ 169 ovpn_bind_reset(peer, bind); 170 171 return 0; 172 } 173 174 /* variable name __tbl2 needs to be different from __tbl1 175 * in the macro below to avoid confusing clang 176 */ 177 #define ovpn_get_hash_slot(_tbl, _key, _key_len) ({ \ 178 typeof(_tbl) *__tbl2 = &(_tbl); \ 179 jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2); \ 180 }) 181 182 #define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \ 183 typeof(_tbl) *__tbl1 = &(_tbl); \ 184 &(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\ 185 }) 186 187 /** 188 * ovpn_peer_endpoints_update - update remote or local endpoint for peer 189 * @peer: peer to update the remote endpoint for 190 * @skb: incoming packet to retrieve the source/destination address from 191 */ 192 void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb) 193 { 194 struct hlist_nulls_head *nhead; 195 struct sockaddr_storage ss; 196 struct sockaddr_in6 *sa6; 197 bool reset_cache = false; 198 struct sockaddr_in *sa; 199 struct ovpn_bind *bind; 200 const void *local_ip; 201 size_t salen = 0; 202 203 spin_lock_bh(&peer->lock); 204 bind = rcu_dereference_protected(peer->bind, 205 lockdep_is_held(&peer->lock)); 206 if (unlikely(!bind)) 207 goto unlock; 208 209 switch (skb->protocol) { 210 case htons(ETH_P_IP): 211 /* float check */ 212 if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) { 213 /* unconditionally save local endpoint in case 214 * of float, as it may have changed as well 215 */ 216 local_ip = &ip_hdr(skb)->daddr; 217 sa = (struct sockaddr_in *)&ss; 218 sa->sin_family = AF_INET; 219 sa->sin_addr.s_addr = ip_hdr(skb)->saddr; 220 sa->sin_port = udp_hdr(skb)->source; 221 salen = sizeof(*sa); 222 reset_cache = true; 223 break; 224 } 225 226 /* if no float happened, let's double check if the local endpoint 227 * has changed 228 */ 229 if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) { 230 net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n", 231 netdev_name(peer->ovpn->dev), 232 peer->id, &bind->local.ipv4.s_addr, 233 &ip_hdr(skb)->daddr); 234 bind->local.ipv4.s_addr = ip_hdr(skb)->daddr; 235 reset_cache = true; 236 } 237 break; 238 case htons(ETH_P_IPV6): 239 /* float check */ 240 if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) { 241 /* unconditionally save local endpoint in case 242 * of float, as it may have changed as well 243 */ 244 local_ip = &ipv6_hdr(skb)->daddr; 245 sa6 = (struct sockaddr_in6 *)&ss; 246 sa6->sin6_family = AF_INET6; 247 sa6->sin6_addr = ipv6_hdr(skb)->saddr; 248 sa6->sin6_port = udp_hdr(skb)->source; 249 sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr, 250 skb->skb_iif); 251 salen = sizeof(*sa6); 252 reset_cache = true; 253 break; 254 } 255 256 /* if no float happened, let's double check if the local endpoint 257 * has changed 258 */ 259 if (unlikely(!ipv6_addr_equal(&bind->local.ipv6, 260 &ipv6_hdr(skb)->daddr))) { 261 net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c)\n", 262 netdev_name(peer->ovpn->dev), 263 peer->id, &bind->local.ipv6, 264 &ipv6_hdr(skb)->daddr); 265 bind->local.ipv6 = ipv6_hdr(skb)->daddr; 266 reset_cache = true; 267 } 268 break; 269 default: 270 goto unlock; 271 } 272 273 if (unlikely(reset_cache)) 274 dst_cache_reset(&peer->dst_cache); 275 276 /* if the peer did not float, we can bail out now */ 277 if (likely(!salen)) 278 goto unlock; 279 280 if (unlikely(ovpn_peer_reset_sockaddr(peer, 281 (struct sockaddr_storage *)&ss, 282 local_ip) < 0)) 283 goto unlock; 284 285 net_dbg_ratelimited("%s: peer %d floated to %pIScp", 286 netdev_name(peer->ovpn->dev), peer->id, &ss); 287 288 spin_unlock_bh(&peer->lock); 289 290 ovpn_nl_peer_float_notify(peer, &ss); 291 292 /* rehashing is required only in MP mode as P2P has one peer 293 * only and thus there is no hashtable 294 */ 295 if (peer->ovpn->mode == OVPN_MODE_MP) { 296 spin_lock_bh(&peer->ovpn->lock); 297 spin_lock_bh(&peer->lock); 298 bind = rcu_dereference_protected(peer->bind, 299 lockdep_is_held(&peer->lock)); 300 if (unlikely(!bind)) { 301 spin_unlock_bh(&peer->lock); 302 spin_unlock_bh(&peer->ovpn->lock); 303 return; 304 } 305 306 /* This function may be invoked concurrently, therefore another 307 * float may have happened in parallel: perform rehashing 308 * using the peer->bind->remote directly as key 309 */ 310 311 switch (bind->remote.in4.sin_family) { 312 case AF_INET: 313 salen = sizeof(*sa); 314 break; 315 case AF_INET6: 316 salen = sizeof(*sa6); 317 break; 318 } 319 320 /* remove old hashing */ 321 hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); 322 /* re-add with new transport address */ 323 nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr, 324 &bind->remote, salen); 325 hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead); 326 spin_unlock_bh(&peer->lock); 327 spin_unlock_bh(&peer->ovpn->lock); 328 } 329 return; 330 unlock: 331 spin_unlock_bh(&peer->lock); 332 } 333 334 /** 335 * ovpn_peer_release_rcu - RCU callback performing last peer release steps 336 * @head: RCU member of the ovpn_peer 337 */ 338 static void ovpn_peer_release_rcu(struct rcu_head *head) 339 { 340 struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu); 341 342 /* this call will immediately free the dst_cache, therefore we 343 * perform it in the RCU callback, when all contexts are done 344 */ 345 dst_cache_destroy(&peer->dst_cache); 346 kfree(peer); 347 } 348 349 /** 350 * ovpn_peer_release - release peer private members 351 * @peer: the peer to release 352 */ 353 void ovpn_peer_release(struct ovpn_peer *peer) 354 { 355 ovpn_crypto_state_release(&peer->crypto); 356 spin_lock_bh(&peer->lock); 357 ovpn_bind_reset(peer, NULL); 358 spin_unlock_bh(&peer->lock); 359 call_rcu(&peer->rcu, ovpn_peer_release_rcu); 360 netdev_put(peer->ovpn->dev, &peer->dev_tracker); 361 } 362 363 /** 364 * ovpn_peer_release_kref - callback for kref_put 365 * @kref: the kref object belonging to the peer 366 */ 367 void ovpn_peer_release_kref(struct kref *kref) 368 { 369 struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount); 370 371 ovpn_peer_release(peer); 372 } 373 374 /** 375 * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address 376 * @skb: the packet to extract data from 377 * @ss: the sockaddr to fill 378 * 379 * Return: sockaddr length on success or -1 otherwise 380 */ 381 static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb, 382 struct sockaddr_storage *ss) 383 { 384 struct sockaddr_in6 *sa6; 385 struct sockaddr_in *sa4; 386 387 switch (skb->protocol) { 388 case htons(ETH_P_IP): 389 sa4 = (struct sockaddr_in *)ss; 390 sa4->sin_family = AF_INET; 391 sa4->sin_addr.s_addr = ip_hdr(skb)->saddr; 392 sa4->sin_port = udp_hdr(skb)->source; 393 return sizeof(*sa4); 394 case htons(ETH_P_IPV6): 395 sa6 = (struct sockaddr_in6 *)ss; 396 sa6->sin6_family = AF_INET6; 397 sa6->sin6_addr = ipv6_hdr(skb)->saddr; 398 sa6->sin6_port = udp_hdr(skb)->source; 399 return sizeof(*sa6); 400 } 401 402 return -1; 403 } 404 405 /** 406 * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb 407 * @skb: the outgoing packet 408 * 409 * Return: the IPv4 of the nexthop 410 */ 411 static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb) 412 { 413 const struct rtable *rt = skb_rtable(skb); 414 415 if (rt && rt->rt_uses_gateway) 416 return rt->rt_gw4; 417 418 return ip_hdr(skb)->daddr; 419 } 420 421 /** 422 * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb 423 * @skb: the outgoing packet 424 * 425 * Return: the IPv6 of the nexthop 426 */ 427 static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb) 428 { 429 const struct rt6_info *rt = skb_rt6_info(skb); 430 431 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) 432 return ipv6_hdr(skb)->daddr; 433 434 return rt->rt6i_gateway; 435 } 436 437 /** 438 * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address 439 * @ovpn: the openvpn instance to search 440 * @addr: VPN IPv4 to use as search key 441 * 442 * Refcounter is not increased for the returned peer. 443 * 444 * Return: the peer if found or NULL otherwise 445 */ 446 static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn, 447 __be32 addr) 448 { 449 struct hlist_nulls_head *nhead; 450 struct hlist_nulls_node *ntmp; 451 struct ovpn_peer *tmp; 452 unsigned int slot; 453 454 begin: 455 slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr, 456 sizeof(addr)); 457 nhead = &ovpn->peers->by_vpn_addr4[slot]; 458 459 hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4) 460 if (addr == tmp->vpn_addrs.ipv4.s_addr) 461 return tmp; 462 463 /* item may have moved during lookup - check nulls and restart 464 * if that's the case 465 */ 466 if (get_nulls_value(ntmp) != slot) 467 goto begin; 468 469 return NULL; 470 } 471 472 /** 473 * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address 474 * @ovpn: the openvpn instance to search 475 * @addr: VPN IPv6 to use as search key 476 * 477 * Refcounter is not increased for the returned peer. 478 * 479 * Return: the peer if found or NULL otherwise 480 */ 481 static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn, 482 struct in6_addr *addr) 483 { 484 struct hlist_nulls_head *nhead; 485 struct hlist_nulls_node *ntmp; 486 struct ovpn_peer *tmp; 487 unsigned int slot; 488 489 begin: 490 slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr, 491 sizeof(*addr)); 492 nhead = &ovpn->peers->by_vpn_addr6[slot]; 493 494 hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6) 495 if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6)) 496 return tmp; 497 498 /* item may have moved during lookup - check nulls and restart 499 * if that's the case 500 */ 501 if (get_nulls_value(ntmp) != slot) 502 goto begin; 503 504 return NULL; 505 } 506 507 /** 508 * ovpn_peer_transp_match - check if sockaddr and peer binding match 509 * @peer: the peer to get the binding from 510 * @ss: the sockaddr to match 511 * 512 * Return: true if sockaddr and binding match or false otherwise 513 */ 514 static bool ovpn_peer_transp_match(const struct ovpn_peer *peer, 515 const struct sockaddr_storage *ss) 516 { 517 struct ovpn_bind *bind = rcu_dereference(peer->bind); 518 struct sockaddr_in6 *sa6; 519 struct sockaddr_in *sa4; 520 521 if (unlikely(!bind)) 522 return false; 523 524 if (ss->ss_family != bind->remote.in4.sin_family) 525 return false; 526 527 switch (ss->ss_family) { 528 case AF_INET: 529 sa4 = (struct sockaddr_in *)ss; 530 if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr) 531 return false; 532 if (sa4->sin_port != bind->remote.in4.sin_port) 533 return false; 534 break; 535 case AF_INET6: 536 sa6 = (struct sockaddr_in6 *)ss; 537 if (!ipv6_addr_equal(&sa6->sin6_addr, 538 &bind->remote.in6.sin6_addr)) 539 return false; 540 if (sa6->sin6_port != bind->remote.in6.sin6_port) 541 return false; 542 break; 543 default: 544 return false; 545 } 546 547 return true; 548 } 549 550 /** 551 * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P 552 * instance 553 * @ovpn: the openvpn instance to search 554 * @ss: the transport socket address 555 * 556 * Return: the peer if found or NULL otherwise 557 */ 558 static struct ovpn_peer * 559 ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn, 560 struct sockaddr_storage *ss) 561 { 562 struct ovpn_peer *tmp, *peer = NULL; 563 564 rcu_read_lock(); 565 tmp = rcu_dereference(ovpn->peer); 566 if (likely(tmp && ovpn_peer_transp_match(tmp, ss) && 567 ovpn_peer_hold(tmp))) 568 peer = tmp; 569 rcu_read_unlock(); 570 571 return peer; 572 } 573 574 /** 575 * ovpn_peer_get_by_transp_addr - retrieve peer by transport address 576 * @ovpn: the openvpn instance to search 577 * @skb: the skb to retrieve the source transport address from 578 * 579 * Return: a pointer to the peer if found or NULL otherwise 580 */ 581 struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn, 582 struct sk_buff *skb) 583 { 584 struct ovpn_peer *tmp, *peer = NULL; 585 struct sockaddr_storage ss = { 0 }; 586 struct hlist_nulls_head *nhead; 587 struct hlist_nulls_node *ntmp; 588 unsigned int slot; 589 ssize_t sa_len; 590 591 sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss); 592 if (unlikely(sa_len < 0)) 593 return NULL; 594 595 if (ovpn->mode == OVPN_MODE_P2P) 596 return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss); 597 598 rcu_read_lock(); 599 begin: 600 slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len); 601 nhead = &ovpn->peers->by_transp_addr[slot]; 602 603 hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, 604 hash_entry_transp_addr) { 605 if (!ovpn_peer_transp_match(tmp, &ss)) 606 continue; 607 608 if (!ovpn_peer_hold(tmp)) 609 continue; 610 611 peer = tmp; 612 break; 613 } 614 615 /* item may have moved during lookup - check nulls and restart 616 * if that's the case 617 */ 618 if (!peer && get_nulls_value(ntmp) != slot) 619 goto begin; 620 rcu_read_unlock(); 621 622 return peer; 623 } 624 625 /** 626 * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance 627 * @ovpn: the openvpn instance to search 628 * @peer_id: the ID of the peer to find 629 * 630 * Return: the peer if found or NULL otherwise 631 */ 632 static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn, 633 u32 peer_id) 634 { 635 struct ovpn_peer *tmp, *peer = NULL; 636 637 rcu_read_lock(); 638 tmp = rcu_dereference(ovpn->peer); 639 if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp))) 640 peer = tmp; 641 rcu_read_unlock(); 642 643 return peer; 644 } 645 646 /** 647 * ovpn_peer_get_by_id - retrieve peer by ID 648 * @ovpn: the openvpn instance to search 649 * @peer_id: the unique peer identifier to match 650 * 651 * Return: a pointer to the peer if found or NULL otherwise 652 */ 653 struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id) 654 { 655 struct ovpn_peer *tmp, *peer = NULL; 656 struct hlist_head *head; 657 658 if (ovpn->mode == OVPN_MODE_P2P) 659 return ovpn_peer_get_by_id_p2p(ovpn, peer_id); 660 661 head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id, 662 sizeof(peer_id)); 663 664 rcu_read_lock(); 665 hlist_for_each_entry_rcu(tmp, head, hash_entry_id) { 666 if (tmp->id != peer_id) 667 continue; 668 669 if (!ovpn_peer_hold(tmp)) 670 continue; 671 672 peer = tmp; 673 break; 674 } 675 rcu_read_unlock(); 676 677 return peer; 678 } 679 680 static void ovpn_peer_remove(struct ovpn_peer *peer, 681 enum ovpn_del_peer_reason reason, 682 struct llist_head *release_list) 683 { 684 lockdep_assert_held(&peer->ovpn->lock); 685 686 switch (peer->ovpn->mode) { 687 case OVPN_MODE_MP: 688 /* prevent double remove */ 689 if (hlist_unhashed(&peer->hash_entry_id)) 690 return; 691 692 hlist_del_init_rcu(&peer->hash_entry_id); 693 hlist_nulls_del_init_rcu(&peer->hash_entry_addr4); 694 hlist_nulls_del_init_rcu(&peer->hash_entry_addr6); 695 hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); 696 break; 697 case OVPN_MODE_P2P: 698 /* prevent double remove */ 699 if (peer != rcu_access_pointer(peer->ovpn->peer)) 700 return; 701 702 RCU_INIT_POINTER(peer->ovpn->peer, NULL); 703 /* in P2P mode the carrier is switched off when the peer is 704 * deleted so that third party protocols can react accordingly 705 */ 706 netif_carrier_off(peer->ovpn->dev); 707 break; 708 } 709 710 peer->delete_reason = reason; 711 ovpn_nl_peer_del_notify(peer); 712 713 /* append to provided list for later socket release and ref drop */ 714 llist_add(&peer->release_entry, release_list); 715 } 716 717 /** 718 * ovpn_peer_get_by_dst - Lookup peer to send skb to 719 * @ovpn: the private data representing the current VPN session 720 * @skb: the skb to extract the destination address from 721 * 722 * This function takes a tunnel packet and looks up the peer to send it to 723 * after encapsulation. The skb is expected to be the in-tunnel packet, without 724 * any OpenVPN related header. 725 * 726 * Assume that the IP header is accessible in the skb data. 727 * 728 * Return: the peer if found or NULL otherwise. 729 */ 730 struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn, 731 struct sk_buff *skb) 732 { 733 struct ovpn_peer *peer = NULL; 734 struct in6_addr addr6; 735 __be32 addr4; 736 737 /* in P2P mode, no matter the destination, packets are always sent to 738 * the single peer listening on the other side 739 */ 740 if (ovpn->mode == OVPN_MODE_P2P) { 741 rcu_read_lock(); 742 peer = rcu_dereference(ovpn->peer); 743 if (unlikely(peer && !ovpn_peer_hold(peer))) 744 peer = NULL; 745 rcu_read_unlock(); 746 return peer; 747 } 748 749 rcu_read_lock(); 750 switch (skb->protocol) { 751 case htons(ETH_P_IP): 752 addr4 = ovpn_nexthop_from_skb4(skb); 753 peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4); 754 break; 755 case htons(ETH_P_IPV6): 756 addr6 = ovpn_nexthop_from_skb6(skb); 757 peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6); 758 break; 759 } 760 761 if (unlikely(peer && !ovpn_peer_hold(peer))) 762 peer = NULL; 763 rcu_read_unlock(); 764 765 return peer; 766 } 767 768 /** 769 * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination 770 * @ovpn: the private data representing the current VPN session 771 * @dest: the destination to be looked up 772 * 773 * Looks up in the IPv4 system routing table the IP of the nexthop to be used 774 * to reach the destination passed as argument. If no nexthop can be found, the 775 * destination itself is returned as it probably has to be used as nexthop. 776 * 777 * Return: the IP of the next hop if found or dest itself otherwise 778 */ 779 static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest) 780 { 781 struct rtable *rt; 782 struct flowi4 fl = { 783 .daddr = dest 784 }; 785 786 rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL); 787 if (IS_ERR(rt)) { 788 net_dbg_ratelimited("%s: no route to host %pI4\n", 789 netdev_name(ovpn->dev), &dest); 790 /* if we end up here this packet is probably going to be 791 * thrown away later 792 */ 793 return dest; 794 } 795 796 if (!rt->rt_uses_gateway) 797 goto out; 798 799 dest = rt->rt_gw4; 800 out: 801 ip_rt_put(rt); 802 return dest; 803 } 804 805 /** 806 * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination 807 * @ovpn: the private data representing the current VPN session 808 * @dest: the destination to be looked up 809 * 810 * Looks up in the IPv6 system routing table the IP of the nexthop to be used 811 * to reach the destination passed as argument. If no nexthop can be found, the 812 * destination itself is returned as it probably has to be used as nexthop. 813 * 814 * Return: the IP of the next hop if found or dest itself otherwise 815 */ 816 static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn, 817 struct in6_addr dest) 818 { 819 #if IS_ENABLED(CONFIG_IPV6) 820 struct dst_entry *entry; 821 struct rt6_info *rt; 822 struct flowi6 fl = { 823 .daddr = dest, 824 }; 825 826 entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl, 827 NULL); 828 if (IS_ERR(entry)) { 829 net_dbg_ratelimited("%s: no route to host %pI6c\n", 830 netdev_name(ovpn->dev), &dest); 831 /* if we end up here this packet is probably going to be 832 * thrown away later 833 */ 834 return dest; 835 } 836 837 rt = dst_rt6_info(entry); 838 839 if (!(rt->rt6i_flags & RTF_GATEWAY)) 840 goto out; 841 842 dest = rt->rt6i_gateway; 843 out: 844 dst_release((struct dst_entry *)rt); 845 #endif 846 return dest; 847 } 848 849 /** 850 * ovpn_peer_check_by_src - check that skb source is routed via peer 851 * @ovpn: the openvpn instance to search 852 * @skb: the packet to extract source address from 853 * @peer: the peer to check against the source address 854 * 855 * Return: true if the peer is matching or false otherwise 856 */ 857 bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb, 858 struct ovpn_peer *peer) 859 { 860 bool match = false; 861 struct in6_addr addr6; 862 __be32 addr4; 863 864 if (ovpn->mode == OVPN_MODE_P2P) { 865 /* in P2P mode, no matter the destination, packets are always 866 * sent to the single peer listening on the other side 867 */ 868 return peer == rcu_access_pointer(ovpn->peer); 869 } 870 871 /* This function performs a reverse path check, therefore we now 872 * lookup the nexthop we would use if we wanted to route a packet 873 * to the source IP. If the nexthop matches the sender we know the 874 * latter is valid and we allow the packet to come in 875 */ 876 877 switch (skb->protocol) { 878 case htons(ETH_P_IP): 879 addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr); 880 rcu_read_lock(); 881 match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4)); 882 rcu_read_unlock(); 883 break; 884 case htons(ETH_P_IPV6): 885 addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr); 886 rcu_read_lock(); 887 match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6)); 888 rcu_read_unlock(); 889 break; 890 } 891 892 return match; 893 } 894 895 void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer) 896 { 897 struct hlist_nulls_head *nhead; 898 899 lockdep_assert_held(&peer->ovpn->lock); 900 901 /* rehashing makes sense only in multipeer mode */ 902 if (peer->ovpn->mode != OVPN_MODE_MP) 903 return; 904 905 if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) { 906 /* remove potential old hashing */ 907 hlist_nulls_del_init_rcu(&peer->hash_entry_addr4); 908 909 nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4, 910 &peer->vpn_addrs.ipv4, 911 sizeof(peer->vpn_addrs.ipv4)); 912 hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead); 913 } 914 915 if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) { 916 /* remove potential old hashing */ 917 hlist_nulls_del_init_rcu(&peer->hash_entry_addr6); 918 919 nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6, 920 &peer->vpn_addrs.ipv6, 921 sizeof(peer->vpn_addrs.ipv6)); 922 hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead); 923 } 924 } 925 926 /** 927 * ovpn_peer_add_mp - add peer to related tables in a MP instance 928 * @ovpn: the instance to add the peer to 929 * @peer: the peer to add 930 * 931 * Return: 0 on success or a negative error code otherwise 932 */ 933 static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer) 934 { 935 struct sockaddr_storage sa = { 0 }; 936 struct hlist_nulls_head *nhead; 937 struct sockaddr_in6 *sa6; 938 struct sockaddr_in *sa4; 939 struct ovpn_bind *bind; 940 struct ovpn_peer *tmp; 941 size_t salen; 942 int ret = 0; 943 944 spin_lock_bh(&ovpn->lock); 945 /* do not add duplicates */ 946 tmp = ovpn_peer_get_by_id(ovpn, peer->id); 947 if (tmp) { 948 ovpn_peer_put(tmp); 949 ret = -EEXIST; 950 goto out; 951 } 952 953 bind = rcu_dereference_protected(peer->bind, true); 954 /* peers connected via TCP have bind == NULL */ 955 if (bind) { 956 switch (bind->remote.in4.sin_family) { 957 case AF_INET: 958 sa4 = (struct sockaddr_in *)&sa; 959 960 sa4->sin_family = AF_INET; 961 sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr; 962 sa4->sin_port = bind->remote.in4.sin_port; 963 salen = sizeof(*sa4); 964 break; 965 case AF_INET6: 966 sa6 = (struct sockaddr_in6 *)&sa; 967 968 sa6->sin6_family = AF_INET6; 969 sa6->sin6_addr = bind->remote.in6.sin6_addr; 970 sa6->sin6_port = bind->remote.in6.sin6_port; 971 salen = sizeof(*sa6); 972 break; 973 default: 974 ret = -EPROTONOSUPPORT; 975 goto out; 976 } 977 978 nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa, 979 salen); 980 hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead); 981 } 982 983 hlist_add_head_rcu(&peer->hash_entry_id, 984 ovpn_get_hash_head(ovpn->peers->by_id, &peer->id, 985 sizeof(peer->id))); 986 987 ovpn_peer_hash_vpn_ip(peer); 988 out: 989 spin_unlock_bh(&ovpn->lock); 990 return ret; 991 } 992 993 /** 994 * ovpn_peer_add_p2p - add peer to related tables in a P2P instance 995 * @ovpn: the instance to add the peer to 996 * @peer: the peer to add 997 * 998 * Return: 0 on success or a negative error code otherwise 999 */ 1000 static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer) 1001 { 1002 LLIST_HEAD(release_list); 1003 struct ovpn_peer *tmp; 1004 1005 spin_lock_bh(&ovpn->lock); 1006 /* in p2p mode it is possible to have a single peer only, therefore the 1007 * old one is released and substituted by the new one 1008 */ 1009 tmp = rcu_dereference_protected(ovpn->peer, 1010 lockdep_is_held(&ovpn->lock)); 1011 if (tmp) 1012 ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN, 1013 &release_list); 1014 1015 rcu_assign_pointer(ovpn->peer, peer); 1016 /* in P2P mode the carrier is switched on when the peer is added */ 1017 netif_carrier_on(ovpn->dev); 1018 unlock_ovpn(ovpn, &release_list); 1019 1020 return 0; 1021 } 1022 1023 /** 1024 * ovpn_peer_add - add peer to the related tables 1025 * @ovpn: the openvpn instance the peer belongs to 1026 * @peer: the peer object to add 1027 * 1028 * Assume refcounter was increased by caller 1029 * 1030 * Return: 0 on success or a negative error code otherwise 1031 */ 1032 int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer) 1033 { 1034 switch (ovpn->mode) { 1035 case OVPN_MODE_MP: 1036 return ovpn_peer_add_mp(ovpn, peer); 1037 case OVPN_MODE_P2P: 1038 return ovpn_peer_add_p2p(ovpn, peer); 1039 } 1040 1041 return -EOPNOTSUPP; 1042 } 1043 1044 /** 1045 * ovpn_peer_del_mp - delete peer from related tables in a MP instance 1046 * @peer: the peer to delete 1047 * @reason: reason why the peer was deleted (sent to userspace) 1048 * @release_list: list where delete peer should be appended 1049 * 1050 * Return: 0 on success or a negative error code otherwise 1051 */ 1052 static int ovpn_peer_del_mp(struct ovpn_peer *peer, 1053 enum ovpn_del_peer_reason reason, 1054 struct llist_head *release_list) 1055 { 1056 struct ovpn_peer *tmp; 1057 int ret = -ENOENT; 1058 1059 lockdep_assert_held(&peer->ovpn->lock); 1060 1061 tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id); 1062 if (tmp == peer) { 1063 ovpn_peer_remove(peer, reason, release_list); 1064 ret = 0; 1065 } 1066 1067 if (tmp) 1068 ovpn_peer_put(tmp); 1069 1070 return ret; 1071 } 1072 1073 /** 1074 * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance 1075 * @peer: the peer to delete 1076 * @reason: reason why the peer was deleted (sent to userspace) 1077 * @release_list: list where delete peer should be appended 1078 * 1079 * Return: 0 on success or a negative error code otherwise 1080 */ 1081 static int ovpn_peer_del_p2p(struct ovpn_peer *peer, 1082 enum ovpn_del_peer_reason reason, 1083 struct llist_head *release_list) 1084 { 1085 struct ovpn_peer *tmp; 1086 1087 lockdep_assert_held(&peer->ovpn->lock); 1088 1089 tmp = rcu_dereference_protected(peer->ovpn->peer, 1090 lockdep_is_held(&peer->ovpn->lock)); 1091 if (tmp != peer) 1092 return -ENOENT; 1093 1094 ovpn_peer_remove(peer, reason, release_list); 1095 1096 return 0; 1097 } 1098 1099 /** 1100 * ovpn_peer_del - delete peer from related tables 1101 * @peer: the peer object to delete 1102 * @reason: reason for deleting peer (will be sent to userspace) 1103 * 1104 * Return: 0 on success or a negative error code otherwise 1105 */ 1106 int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason) 1107 { 1108 LLIST_HEAD(release_list); 1109 int ret = -EOPNOTSUPP; 1110 1111 spin_lock_bh(&peer->ovpn->lock); 1112 switch (peer->ovpn->mode) { 1113 case OVPN_MODE_MP: 1114 ret = ovpn_peer_del_mp(peer, reason, &release_list); 1115 break; 1116 case OVPN_MODE_P2P: 1117 ret = ovpn_peer_del_p2p(peer, reason, &release_list); 1118 break; 1119 default: 1120 break; 1121 } 1122 unlock_ovpn(peer->ovpn, &release_list); 1123 1124 return ret; 1125 } 1126 1127 /** 1128 * ovpn_peer_release_p2p - release peer upon P2P device teardown 1129 * @ovpn: the instance being torn down 1130 * @sk: if not NULL, release peer only if it's using this specific socket 1131 * @reason: the reason for releasing the peer 1132 */ 1133 static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk, 1134 enum ovpn_del_peer_reason reason) 1135 { 1136 struct ovpn_socket *ovpn_sock; 1137 LLIST_HEAD(release_list); 1138 struct ovpn_peer *peer; 1139 1140 spin_lock_bh(&ovpn->lock); 1141 peer = rcu_dereference_protected(ovpn->peer, 1142 lockdep_is_held(&ovpn->lock)); 1143 if (!peer) { 1144 spin_unlock_bh(&ovpn->lock); 1145 return; 1146 } 1147 1148 if (sk) { 1149 ovpn_sock = rcu_access_pointer(peer->sock); 1150 if (!ovpn_sock || ovpn_sock->sk != sk) { 1151 spin_unlock_bh(&ovpn->lock); 1152 ovpn_peer_put(peer); 1153 return; 1154 } 1155 } 1156 1157 ovpn_peer_remove(peer, reason, &release_list); 1158 unlock_ovpn(ovpn, &release_list); 1159 } 1160 1161 static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk, 1162 enum ovpn_del_peer_reason reason) 1163 { 1164 struct ovpn_socket *ovpn_sock; 1165 LLIST_HEAD(release_list); 1166 struct ovpn_peer *peer; 1167 struct hlist_node *tmp; 1168 int bkt; 1169 1170 spin_lock_bh(&ovpn->lock); 1171 hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) { 1172 bool remove = true; 1173 1174 /* if a socket was passed as argument, skip all peers except 1175 * those using it 1176 */ 1177 if (sk) { 1178 rcu_read_lock(); 1179 ovpn_sock = rcu_dereference(peer->sock); 1180 remove = ovpn_sock && ovpn_sock->sk == sk; 1181 rcu_read_unlock(); 1182 } 1183 1184 if (remove) 1185 ovpn_peer_remove(peer, reason, &release_list); 1186 } 1187 unlock_ovpn(ovpn, &release_list); 1188 } 1189 1190 /** 1191 * ovpn_peers_free - free all peers in the instance 1192 * @ovpn: the instance whose peers should be released 1193 * @sk: if not NULL, only peers using this socket are removed and the socket 1194 * is released immediately 1195 * @reason: the reason for releasing all peers 1196 */ 1197 void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk, 1198 enum ovpn_del_peer_reason reason) 1199 { 1200 switch (ovpn->mode) { 1201 case OVPN_MODE_P2P: 1202 ovpn_peer_release_p2p(ovpn, sk, reason); 1203 break; 1204 case OVPN_MODE_MP: 1205 ovpn_peers_release_mp(ovpn, sk, reason); 1206 break; 1207 } 1208 } 1209 1210 static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer, 1211 time64_t now, 1212 struct llist_head *release_list) 1213 { 1214 time64_t last_recv, last_sent, next_run1, next_run2; 1215 unsigned long timeout, interval; 1216 bool expired; 1217 1218 spin_lock_bh(&peer->lock); 1219 /* we expect both timers to be configured at the same time, 1220 * therefore bail out if either is not set 1221 */ 1222 if (!peer->keepalive_timeout || !peer->keepalive_interval) { 1223 spin_unlock_bh(&peer->lock); 1224 return 0; 1225 } 1226 1227 /* check for peer timeout */ 1228 expired = false; 1229 timeout = peer->keepalive_timeout; 1230 last_recv = READ_ONCE(peer->last_recv); 1231 if (now < last_recv + timeout) { 1232 peer->keepalive_recv_exp = last_recv + timeout; 1233 next_run1 = peer->keepalive_recv_exp; 1234 } else if (peer->keepalive_recv_exp > now) { 1235 next_run1 = peer->keepalive_recv_exp; 1236 } else { 1237 expired = true; 1238 } 1239 1240 if (expired) { 1241 /* peer is dead -> kill it and move on */ 1242 spin_unlock_bh(&peer->lock); 1243 netdev_dbg(peer->ovpn->dev, "peer %u expired\n", 1244 peer->id); 1245 ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED, 1246 release_list); 1247 return 0; 1248 } 1249 1250 /* check for peer keepalive */ 1251 expired = false; 1252 interval = peer->keepalive_interval; 1253 last_sent = READ_ONCE(peer->last_sent); 1254 if (now < last_sent + interval) { 1255 peer->keepalive_xmit_exp = last_sent + interval; 1256 next_run2 = peer->keepalive_xmit_exp; 1257 } else if (peer->keepalive_xmit_exp > now) { 1258 next_run2 = peer->keepalive_xmit_exp; 1259 } else { 1260 expired = true; 1261 next_run2 = now + interval; 1262 } 1263 spin_unlock_bh(&peer->lock); 1264 1265 if (expired) { 1266 /* a keepalive packet is required */ 1267 netdev_dbg(peer->ovpn->dev, 1268 "sending keepalive to peer %u\n", 1269 peer->id); 1270 if (schedule_work(&peer->keepalive_work)) 1271 ovpn_peer_hold(peer); 1272 } 1273 1274 if (next_run1 < next_run2) 1275 return next_run1; 1276 1277 return next_run2; 1278 } 1279 1280 static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn, 1281 time64_t now, 1282 struct llist_head *release_list) 1283 { 1284 time64_t tmp_next_run, next_run = 0; 1285 struct hlist_node *tmp; 1286 struct ovpn_peer *peer; 1287 int bkt; 1288 1289 lockdep_assert_held(&ovpn->lock); 1290 1291 hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) { 1292 tmp_next_run = ovpn_peer_keepalive_work_single(peer, now, 1293 release_list); 1294 if (!tmp_next_run) 1295 continue; 1296 1297 /* the next worker run will be scheduled based on the shortest 1298 * required interval across all peers 1299 */ 1300 if (!next_run || tmp_next_run < next_run) 1301 next_run = tmp_next_run; 1302 } 1303 1304 return next_run; 1305 } 1306 1307 static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn, 1308 time64_t now, 1309 struct llist_head *release_list) 1310 { 1311 struct ovpn_peer *peer; 1312 time64_t next_run = 0; 1313 1314 lockdep_assert_held(&ovpn->lock); 1315 1316 peer = rcu_dereference_protected(ovpn->peer, 1317 lockdep_is_held(&ovpn->lock)); 1318 if (peer) 1319 next_run = ovpn_peer_keepalive_work_single(peer, now, 1320 release_list); 1321 1322 return next_run; 1323 } 1324 1325 /** 1326 * ovpn_peer_keepalive_work - run keepalive logic on each known peer 1327 * @work: pointer to the work member of the related ovpn object 1328 * 1329 * Each peer has two timers (if configured): 1330 * 1. peer timeout: when no data is received for a certain interval, 1331 * the peer is considered dead and it gets killed. 1332 * 2. peer keepalive: when no data is sent to a certain peer for a 1333 * certain interval, a special 'keepalive' packet is explicitly sent. 1334 * 1335 * This function iterates across the whole peer collection while 1336 * checking the timers described above. 1337 */ 1338 void ovpn_peer_keepalive_work(struct work_struct *work) 1339 { 1340 struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv, 1341 keepalive_work.work); 1342 time64_t next_run = 0, now = ktime_get_real_seconds(); 1343 LLIST_HEAD(release_list); 1344 1345 spin_lock_bh(&ovpn->lock); 1346 switch (ovpn->mode) { 1347 case OVPN_MODE_MP: 1348 next_run = ovpn_peer_keepalive_work_mp(ovpn, now, 1349 &release_list); 1350 break; 1351 case OVPN_MODE_P2P: 1352 next_run = ovpn_peer_keepalive_work_p2p(ovpn, now, 1353 &release_list); 1354 break; 1355 } 1356 1357 /* prevent rearming if the interface is being destroyed */ 1358 if (next_run > 0) { 1359 netdev_dbg(ovpn->dev, 1360 "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n", 1361 next_run, now, next_run - now); 1362 schedule_delayed_work(&ovpn->keepalive_work, 1363 (next_run - now) * HZ); 1364 } 1365 unlock_ovpn(ovpn, &release_list); 1366 } 1367