1 // SPDX-License-Identifier: GPL-2.0 2 /* OpenVPN data channel offload 3 * 4 * Copyright (C) 2020-2025 OpenVPN, Inc. 5 * 6 * Author: James Yonan <james@openvpn.net> 7 * Antonio Quartulli <antonio@openvpn.net> 8 */ 9 10 #include <linux/net.h> 11 #include <linux/netdevice.h> 12 #include <linux/udp.h> 13 14 #include "ovpnpriv.h" 15 #include "main.h" 16 #include "io.h" 17 #include "peer.h" 18 #include "socket.h" 19 #include "tcp.h" 20 #include "udp.h" 21 22 static void ovpn_socket_release_kref(struct kref *kref) 23 { 24 struct ovpn_socket *sock = container_of(kref, struct ovpn_socket, 25 refcount); 26 27 if (sock->sock->sk->sk_protocol == IPPROTO_UDP) 28 ovpn_udp_socket_detach(sock); 29 else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) 30 ovpn_tcp_socket_detach(sock); 31 } 32 33 /** 34 * ovpn_socket_put - decrease reference counter 35 * @peer: peer whose socket reference counter should be decreased 36 * @sock: the RCU protected peer socket 37 * 38 * This function is only used internally. Users willing to release 39 * references to the ovpn_socket should use ovpn_socket_release() 40 * 41 * Return: true if the socket was released, false otherwise 42 */ 43 static bool ovpn_socket_put(struct ovpn_peer *peer, struct ovpn_socket *sock) 44 { 45 return kref_put(&sock->refcount, ovpn_socket_release_kref); 46 } 47 48 /** 49 * ovpn_socket_release - release resources owned by socket user 50 * @peer: peer whose socket should be released 51 * 52 * This function should be invoked when the peer is being removed 53 * and wants to drop its link to the socket. 54 * 55 * In case of UDP, the detach routine will drop a reference to the 56 * ovpn netdev, pointed by the ovpn_socket. 57 * 58 * In case of TCP, releasing the socket will cause dropping 59 * the refcounter for the peer it is linked to, thus allowing the peer 60 * disappear as well. 61 * 62 * This function is expected to be invoked exactly once per peer 63 * 64 * NOTE: this function may sleep 65 */ 66 void ovpn_socket_release(struct ovpn_peer *peer) 67 { 68 struct ovpn_socket *sock; 69 bool released; 70 71 might_sleep(); 72 73 sock = rcu_replace_pointer(peer->sock, NULL, true); 74 /* release may be invoked after socket was detached */ 75 if (!sock) 76 return; 77 78 /* sanity check: we should not end up here if the socket 79 * was already closed 80 */ 81 if (!sock->sock->sk) { 82 DEBUG_NET_WARN_ON_ONCE(1); 83 return; 84 } 85 86 /* Drop the reference while holding the sock lock to avoid 87 * concurrent ovpn_socket_new call to mess up with a partially 88 * detached socket. 89 * 90 * Holding the lock ensures that a socket with refcnt 0 is fully 91 * detached before it can be picked by a concurrent reader. 92 */ 93 lock_sock(sock->sock->sk); 94 released = ovpn_socket_put(peer, sock); 95 release_sock(sock->sock->sk); 96 97 /* align all readers with sk_user_data being NULL */ 98 synchronize_rcu(); 99 100 /* following cleanup should happen with lock released */ 101 if (released) { 102 if (sock->sock->sk->sk_protocol == IPPROTO_UDP) { 103 netdev_put(sock->ovpn->dev, &sock->dev_tracker); 104 } else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) { 105 /* wait for TCP jobs to terminate */ 106 ovpn_tcp_socket_wait_finish(sock); 107 ovpn_peer_put(sock->peer); 108 } 109 /* we can call plain kfree() because we already waited one RCU 110 * period due to synchronize_rcu() 111 */ 112 kfree(sock); 113 } 114 } 115 116 static bool ovpn_socket_hold(struct ovpn_socket *sock) 117 { 118 return kref_get_unless_zero(&sock->refcount); 119 } 120 121 static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer) 122 { 123 if (sock->sock->sk->sk_protocol == IPPROTO_UDP) 124 return ovpn_udp_socket_attach(sock, peer->ovpn); 125 else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) 126 return ovpn_tcp_socket_attach(sock, peer); 127 128 return -EOPNOTSUPP; 129 } 130 131 /** 132 * ovpn_socket_new - create a new socket and initialize it 133 * @sock: the kernel socket to embed 134 * @peer: the peer reachable via this socket 135 * 136 * Return: an openvpn socket on success or a negative error code otherwise 137 */ 138 struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer) 139 { 140 struct ovpn_socket *ovpn_sock; 141 int ret; 142 143 lock_sock(sock->sk); 144 145 /* a TCP socket can only be owned by a single peer, therefore there 146 * can't be any other user 147 */ 148 if (sock->sk->sk_protocol == IPPROTO_TCP && sock->sk->sk_user_data) { 149 ovpn_sock = ERR_PTR(-EBUSY); 150 goto sock_release; 151 } 152 153 /* a UDP socket can be shared across multiple peers, but we must make 154 * sure it is not owned by something else 155 */ 156 if (sock->sk->sk_protocol == IPPROTO_UDP) { 157 u8 type = READ_ONCE(udp_sk(sock->sk)->encap_type); 158 159 /* socket owned by other encapsulation module */ 160 if (type && type != UDP_ENCAP_OVPNINUDP) { 161 ovpn_sock = ERR_PTR(-EBUSY); 162 goto sock_release; 163 } 164 165 rcu_read_lock(); 166 ovpn_sock = rcu_dereference_sk_user_data(sock->sk); 167 if (ovpn_sock) { 168 /* socket owned by another ovpn instance, we can't use it */ 169 if (ovpn_sock->ovpn != peer->ovpn) { 170 ovpn_sock = ERR_PTR(-EBUSY); 171 rcu_read_unlock(); 172 goto sock_release; 173 } 174 175 /* this socket is already owned by this instance, 176 * therefore we can increase the refcounter and 177 * use it as expected 178 */ 179 if (WARN_ON(!ovpn_socket_hold(ovpn_sock))) { 180 /* this should never happen because setting 181 * the refcnt to 0 and detaching the socket 182 * is expected to be atomic 183 */ 184 ovpn_sock = ERR_PTR(-EAGAIN); 185 rcu_read_unlock(); 186 goto sock_release; 187 } 188 189 rcu_read_unlock(); 190 goto sock_release; 191 } 192 rcu_read_unlock(); 193 } 194 195 /* socket is not owned: attach to this ovpn instance */ 196 197 ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL); 198 if (!ovpn_sock) { 199 ovpn_sock = ERR_PTR(-ENOMEM); 200 goto sock_release; 201 } 202 203 ovpn_sock->sock = sock; 204 kref_init(&ovpn_sock->refcount); 205 206 ret = ovpn_socket_attach(ovpn_sock, peer); 207 if (ret < 0) { 208 kfree(ovpn_sock); 209 ovpn_sock = ERR_PTR(ret); 210 goto sock_release; 211 } 212 213 /* TCP sockets are per-peer, therefore they are linked to their unique 214 * peer 215 */ 216 if (sock->sk->sk_protocol == IPPROTO_TCP) { 217 INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work); 218 ovpn_sock->peer = peer; 219 ovpn_peer_hold(peer); 220 } else if (sock->sk->sk_protocol == IPPROTO_UDP) { 221 /* in UDP we only link the ovpn instance since the socket is 222 * shared among multiple peers 223 */ 224 ovpn_sock->ovpn = peer->ovpn; 225 netdev_hold(peer->ovpn->dev, &ovpn_sock->dev_tracker, 226 GFP_KERNEL); 227 } 228 229 rcu_assign_sk_user_data(sock->sk, ovpn_sock); 230 sock_release: 231 release_sock(sock->sk); 232 return ovpn_sock; 233 } 234