1 /* 2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/gfp.h> 35 #include <linux/in.h> 36 #include <net/tcp.h> 37 #include <trace/events/sock.h> 38 39 #include "rds.h" 40 #include "tcp.h" 41 42 void rds_tcp_keepalive(struct socket *sock) 43 { 44 /* values below based on xs_udp_default_timeout */ 45 int keepidle = 5; /* send a probe 'keepidle' secs after last data */ 46 int keepcnt = 5; /* number of unack'ed probes before declaring dead */ 47 48 sock_set_keepalive(sock->sk); 49 tcp_sock_set_keepcnt(sock->sk, keepcnt); 50 tcp_sock_set_keepidle(sock->sk, keepidle); 51 /* KEEPINTVL is the interval between successive probes. We follow 52 * the model in xs_tcp_finish_connecting() and re-use keepidle. 53 */ 54 tcp_sock_set_keepintvl(sock->sk, keepidle); 55 } 56 57 /* rds_tcp_accept_one_path(): if accepting on cp_index > 0, make sure the 58 * client's ipaddr < server's ipaddr. Otherwise, close the accepted 59 * socket and force a reconneect from smaller -> larger ip addr. The reason 60 * we special case cp_index 0 is to allow the rds probe ping itself to itself 61 * get through efficiently. 62 * Since reconnects are only initiated from the node with the numerically 63 * smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side 64 * by moving them to CONNECTING in this function. 65 */ 66 static 67 struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn) 68 { 69 int i; 70 int npaths = max_t(int, 1, conn->c_npaths); 71 72 /* for mprds, all paths MUST be initiated by the peer 73 * with the smaller address. 74 */ 75 if (rds_addr_cmp(&conn->c_faddr, &conn->c_laddr) >= 0) { 76 /* Make sure we initiate at least one path if this 77 * has not already been done; rds_start_mprds() will 78 * take care of additional paths, if necessary. 79 */ 80 if (npaths == 1) 81 rds_conn_path_connect_if_down(&conn->c_path[0]); 82 return NULL; 83 } 84 85 for (i = 0; i < npaths; i++) { 86 struct rds_conn_path *cp = &conn->c_path[i]; 87 88 if (rds_conn_path_transition(cp, RDS_CONN_DOWN, 89 RDS_CONN_CONNECTING) || 90 rds_conn_path_transition(cp, RDS_CONN_ERROR, 91 RDS_CONN_CONNECTING)) { 92 return cp->cp_transport_data; 93 } 94 } 95 return NULL; 96 } 97 98 int rds_tcp_accept_one(struct socket *sock) 99 { 100 struct socket *new_sock = NULL; 101 struct rds_connection *conn; 102 int ret; 103 struct inet_sock *inet; 104 struct rds_tcp_connection *rs_tcp = NULL; 105 int conn_state; 106 struct rds_conn_path *cp; 107 struct in6_addr *my_addr, *peer_addr; 108 #if !IS_ENABLED(CONFIG_IPV6) 109 struct in6_addr saddr, daddr; 110 #endif 111 int dev_if = 0; 112 113 if (!sock) /* module unload or netns delete in progress */ 114 return -ENETUNREACH; 115 116 ret = kernel_accept(sock, &new_sock, O_NONBLOCK); 117 if (ret) 118 return ret; 119 120 rds_tcp_keepalive(new_sock); 121 if (!rds_tcp_tune(new_sock)) { 122 ret = -EINVAL; 123 goto out; 124 } 125 126 inet = inet_sk(new_sock->sk); 127 128 #if IS_ENABLED(CONFIG_IPV6) 129 my_addr = &new_sock->sk->sk_v6_rcv_saddr; 130 peer_addr = &new_sock->sk->sk_v6_daddr; 131 #else 132 ipv6_addr_set_v4mapped(inet->inet_saddr, &saddr); 133 ipv6_addr_set_v4mapped(inet->inet_daddr, &daddr); 134 my_addr = &saddr; 135 peer_addr = &daddr; 136 #endif 137 rdsdebug("accepted family %d tcp %pI6c:%u -> %pI6c:%u\n", 138 sock->sk->sk_family, 139 my_addr, ntohs(inet->inet_sport), 140 peer_addr, ntohs(inet->inet_dport)); 141 142 #if IS_ENABLED(CONFIG_IPV6) 143 /* sk_bound_dev_if is not set if the peer address is not link local 144 * address. In this case, it happens that mcast_oif is set. So 145 * just use it. 146 */ 147 if ((ipv6_addr_type(my_addr) & IPV6_ADDR_LINKLOCAL) && 148 !(ipv6_addr_type(peer_addr) & IPV6_ADDR_LINKLOCAL)) { 149 struct ipv6_pinfo *inet6; 150 151 inet6 = inet6_sk(new_sock->sk); 152 dev_if = READ_ONCE(inet6->mcast_oif); 153 } else { 154 dev_if = new_sock->sk->sk_bound_dev_if; 155 } 156 #endif 157 158 if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) { 159 /* local address connection is only allowed via loopback */ 160 ret = -EOPNOTSUPP; 161 goto out; 162 } 163 164 conn = rds_conn_create(sock_net(sock->sk), 165 my_addr, peer_addr, 166 &rds_tcp_transport, 0, GFP_KERNEL, dev_if); 167 168 if (IS_ERR(conn)) { 169 ret = PTR_ERR(conn); 170 goto out; 171 } 172 /* An incoming SYN request came in, and TCP just accepted it. 173 * 174 * If the client reboots, this conn will need to be cleaned up. 175 * rds_tcp_state_change() will do that cleanup 176 */ 177 rs_tcp = rds_tcp_accept_one_path(conn); 178 if (!rs_tcp) 179 goto rst_nsk; 180 mutex_lock(&rs_tcp->t_conn_path_lock); 181 cp = rs_tcp->t_cpath; 182 conn_state = rds_conn_path_state(cp); 183 WARN_ON(conn_state == RDS_CONN_UP); 184 if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR) 185 goto rst_nsk; 186 if (rs_tcp->t_sock) { 187 /* Duelling SYN has been handled in rds_tcp_accept_one() */ 188 rds_tcp_reset_callbacks(new_sock, cp); 189 /* rds_connect_path_complete() marks RDS_CONN_UP */ 190 rds_connect_path_complete(cp, RDS_CONN_RESETTING); 191 } else { 192 rds_tcp_set_callbacks(new_sock, cp); 193 rds_connect_path_complete(cp, RDS_CONN_CONNECTING); 194 } 195 new_sock = NULL; 196 ret = 0; 197 if (conn->c_npaths == 0) 198 rds_send_ping(cp->cp_conn, cp->cp_index); 199 goto out; 200 rst_nsk: 201 /* reset the newly returned accept sock and bail. 202 * It is safe to set linger on new_sock because the RDS connection 203 * has not been brought up on new_sock, so no RDS-level data could 204 * be pending on it. By setting linger, we achieve the side-effect 205 * of avoiding TIME_WAIT state on new_sock. 206 */ 207 sock_no_linger(new_sock->sk); 208 kernel_sock_shutdown(new_sock, SHUT_RDWR); 209 ret = 0; 210 out: 211 if (rs_tcp) 212 mutex_unlock(&rs_tcp->t_conn_path_lock); 213 if (new_sock) 214 sock_release(new_sock); 215 return ret; 216 } 217 218 void rds_tcp_listen_data_ready(struct sock *sk) 219 { 220 void (*ready)(struct sock *sk); 221 222 trace_sk_data_ready(sk); 223 rdsdebug("listen data ready sk %p\n", sk); 224 225 read_lock_bh(&sk->sk_callback_lock); 226 ready = sk->sk_user_data; 227 if (!ready) { /* check for teardown race */ 228 ready = sk->sk_data_ready; 229 goto out; 230 } 231 232 /* 233 * ->sk_data_ready is also called for a newly established child socket 234 * before it has been accepted and the accepter has set up their 235 * data_ready.. we only want to queue listen work for our listening 236 * socket 237 * 238 * (*ready)() may be null if we are racing with netns delete, and 239 * the listen socket is being torn down. 240 */ 241 if (sk->sk_state == TCP_LISTEN) 242 rds_tcp_accept_work(sk); 243 else 244 ready = rds_tcp_listen_sock_def_readable(sock_net(sk)); 245 246 out: 247 read_unlock_bh(&sk->sk_callback_lock); 248 if (ready) 249 ready(sk); 250 } 251 252 struct socket *rds_tcp_listen_init(struct net *net, bool isv6) 253 { 254 struct socket *sock = NULL; 255 struct sockaddr_storage ss; 256 struct sockaddr_in6 *sin6; 257 struct sockaddr_in *sin; 258 int addr_len; 259 int ret; 260 261 ret = sock_create_kern(net, isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, 262 IPPROTO_TCP, &sock); 263 if (ret < 0) { 264 rdsdebug("could not create %s listener socket: %d\n", 265 isv6 ? "IPv6" : "IPv4", ret); 266 goto out; 267 } 268 269 sock->sk->sk_reuse = SK_CAN_REUSE; 270 tcp_sock_set_nodelay(sock->sk); 271 272 write_lock_bh(&sock->sk->sk_callback_lock); 273 sock->sk->sk_user_data = sock->sk->sk_data_ready; 274 sock->sk->sk_data_ready = rds_tcp_listen_data_ready; 275 write_unlock_bh(&sock->sk->sk_callback_lock); 276 277 if (isv6) { 278 sin6 = (struct sockaddr_in6 *)&ss; 279 sin6->sin6_family = PF_INET6; 280 sin6->sin6_addr = in6addr_any; 281 sin6->sin6_port = htons(RDS_TCP_PORT); 282 sin6->sin6_scope_id = 0; 283 sin6->sin6_flowinfo = 0; 284 addr_len = sizeof(*sin6); 285 } else { 286 sin = (struct sockaddr_in *)&ss; 287 sin->sin_family = PF_INET; 288 sin->sin_addr.s_addr = htonl(INADDR_ANY); 289 sin->sin_port = htons(RDS_TCP_PORT); 290 addr_len = sizeof(*sin); 291 } 292 293 ret = kernel_bind(sock, (struct sockaddr_unsized *)&ss, addr_len); 294 if (ret < 0) { 295 rdsdebug("could not bind %s listener socket: %d\n", 296 isv6 ? "IPv6" : "IPv4", ret); 297 goto out; 298 } 299 300 ret = sock->ops->listen(sock, 64); 301 if (ret < 0) 302 goto out; 303 304 return sock; 305 out: 306 if (sock) 307 sock_release(sock); 308 return NULL; 309 } 310 311 void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor) 312 { 313 struct sock *sk; 314 315 if (!sock) 316 return; 317 318 sk = sock->sk; 319 320 /* serialize with and prevent further callbacks */ 321 lock_sock(sk); 322 write_lock_bh(&sk->sk_callback_lock); 323 if (sk->sk_user_data) { 324 sk->sk_data_ready = sk->sk_user_data; 325 sk->sk_user_data = NULL; 326 } 327 write_unlock_bh(&sk->sk_callback_lock); 328 release_sock(sk); 329 330 /* wait for accepts to stop and close the socket */ 331 flush_workqueue(rds_wq); 332 flush_work(acceptor); 333 sock_release(sock); 334 } 335