1 /* 2 * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/in.h> 35 #include <net/tcp.h> 36 37 #include "rds.h" 38 #include "tcp.h" 39 40 void rds_tcp_state_change(struct sock *sk) 41 { 42 void (*state_change)(struct sock *sk); 43 struct rds_conn_path *cp; 44 struct rds_tcp_connection *tc; 45 46 read_lock_bh(&sk->sk_callback_lock); 47 cp = sk->sk_user_data; 48 if (!cp) { 49 state_change = sk->sk_state_change; 50 goto out; 51 } 52 tc = cp->cp_transport_data; 53 state_change = tc->t_orig_state_change; 54 55 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); 56 57 switch (sk->sk_state) { 58 /* ignore connecting sockets as they make progress */ 59 case TCP_SYN_SENT: 60 case TCP_SYN_RECV: 61 break; 62 case TCP_ESTABLISHED: 63 /* Force the peer to reconnect so that we have the 64 * TCP ports going from <smaller-ip>.<transient> to 65 * <larger-ip>.<RDS_TCP_PORT>. We avoid marking the 66 * RDS connection as RDS_CONN_UP until the reconnect, 67 * to avoid RDS datagram loss. 68 */ 69 if (rds_addr_cmp(&cp->cp_conn->c_laddr, 70 &cp->cp_conn->c_faddr) >= 0 && 71 rds_conn_path_transition(cp, RDS_CONN_CONNECTING, 72 RDS_CONN_ERROR)) { 73 rds_conn_path_drop(cp, false); 74 } else { 75 rds_connect_path_complete(cp, RDS_CONN_CONNECTING); 76 } 77 break; 78 case TCP_CLOSING: 79 case TCP_TIME_WAIT: 80 if (wq_has_sleeper(&tc->t_recv_done_waitq)) 81 wake_up(&tc->t_recv_done_waitq); 82 break; 83 case TCP_CLOSE_WAIT: 84 case TCP_LAST_ACK: 85 case TCP_CLOSE: 86 if (wq_has_sleeper(&tc->t_recv_done_waitq)) 87 wake_up(&tc->t_recv_done_waitq); 88 rds_conn_path_drop(cp, false); 89 break; 90 default: 91 break; 92 } 93 out: 94 read_unlock_bh(&sk->sk_callback_lock); 95 state_change(sk); 96 } 97 98 int rds_tcp_conn_path_connect(struct rds_conn_path *cp) 99 { 100 struct socket *sock = NULL; 101 struct sockaddr_in6 sin6; 102 struct sockaddr_in sin; 103 struct sockaddr *addr; 104 int port_low, port_high, port; 105 int port_groups, groups_left; 106 int addrlen; 107 bool isv6; 108 int ret; 109 struct rds_connection *conn = cp->cp_conn; 110 struct rds_tcp_connection *tc = cp->cp_transport_data; 111 112 /* for multipath rds,we only trigger the connection after 113 * the handshake probe has determined the number of paths. 114 */ 115 if (cp->cp_index > 0 && cp->cp_conn->c_npaths < 2) 116 return -EAGAIN; 117 118 mutex_lock(&tc->t_conn_path_lock); 119 120 if (rds_conn_path_up(cp)) { 121 mutex_unlock(&tc->t_conn_path_lock); 122 return 0; 123 } 124 if (ipv6_addr_v4mapped(&conn->c_laddr)) { 125 ret = sock_create_kern(rds_conn_net(conn), PF_INET, 126 SOCK_STREAM, IPPROTO_TCP, &sock); 127 isv6 = false; 128 } else { 129 ret = sock_create_kern(rds_conn_net(conn), PF_INET6, 130 SOCK_STREAM, IPPROTO_TCP, &sock); 131 isv6 = true; 132 } 133 134 if (ret < 0) 135 goto out; 136 137 if (!rds_tcp_tune(sock)) { 138 ret = -EINVAL; 139 goto out; 140 } 141 142 if (isv6) { 143 sin6.sin6_family = AF_INET6; 144 sin6.sin6_addr = conn->c_laddr; 145 sin6.sin6_port = 0; 146 sin6.sin6_flowinfo = 0; 147 sin6.sin6_scope_id = conn->c_dev_if; 148 addr = (struct sockaddr *)&sin6; 149 addrlen = sizeof(sin6); 150 } else { 151 sin.sin_family = AF_INET; 152 sin.sin_addr.s_addr = conn->c_laddr.s6_addr32[3]; 153 sin.sin_port = 0; 154 addr = (struct sockaddr *)&sin; 155 addrlen = sizeof(sin); 156 } 157 158 /* encode cp->cp_index in lowest bits of source-port */ 159 inet_get_local_port_range(rds_conn_net(conn), &port_low, &port_high); 160 port_low = ALIGN(port_low, RDS_MPATH_WORKERS); 161 port_groups = (port_high - port_low + 1) / RDS_MPATH_WORKERS; 162 ret = -EADDRINUSE; 163 groups_left = port_groups; 164 while (groups_left-- > 0 && ret) { 165 if (++tc->t_client_port_group >= port_groups) 166 tc->t_client_port_group = 0; 167 port = port_low + 168 tc->t_client_port_group * RDS_MPATH_WORKERS + 169 cp->cp_index; 170 171 if (isv6) 172 sin6.sin6_port = htons(port); 173 else 174 sin.sin_port = htons(port); 175 ret = kernel_bind(sock, (struct sockaddr_unsized *)addr, 176 addrlen); 177 } 178 if (ret) { 179 rdsdebug("bind failed with %d at address %pI6c\n", 180 ret, &conn->c_laddr); 181 goto out; 182 } 183 184 if (isv6) { 185 sin6.sin6_family = AF_INET6; 186 sin6.sin6_addr = conn->c_faddr; 187 sin6.sin6_port = htons(RDS_TCP_PORT); 188 sin6.sin6_flowinfo = 0; 189 sin6.sin6_scope_id = conn->c_dev_if; 190 addr = (struct sockaddr *)&sin6; 191 addrlen = sizeof(sin6); 192 } else { 193 sin.sin_family = AF_INET; 194 sin.sin_addr.s_addr = conn->c_faddr.s6_addr32[3]; 195 sin.sin_port = htons(RDS_TCP_PORT); 196 addr = (struct sockaddr *)&sin; 197 addrlen = sizeof(sin); 198 } 199 200 /* 201 * once we call connect() we can start getting callbacks and they 202 * own the socket 203 */ 204 rds_tcp_set_callbacks(sock, cp); 205 ret = kernel_connect(sock, (struct sockaddr_unsized *)addr, addrlen, O_NONBLOCK); 206 207 rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret); 208 if (ret == -EINPROGRESS) 209 ret = 0; 210 if (ret == 0) { 211 rds_tcp_keepalive(sock); 212 sock = NULL; 213 } else { 214 rds_tcp_restore_callbacks(sock, cp->cp_transport_data); 215 } 216 217 out: 218 mutex_unlock(&tc->t_conn_path_lock); 219 if (sock) 220 sock_release(sock); 221 return ret; 222 } 223 224 /* 225 * Before killing the tcp socket this needs to serialize with callbacks. The 226 * caller has already grabbed the sending sem so we're serialized with other 227 * senders. 228 * 229 * TCP calls the callbacks with the sock lock so we hold it while we reset the 230 * callbacks to those set by TCP. Our callbacks won't execute again once we 231 * hold the sock lock. 232 */ 233 void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp) 234 { 235 struct rds_tcp_connection *tc = cp->cp_transport_data; 236 struct socket *sock = tc->t_sock; 237 struct sock *sk; 238 unsigned int rounds; 239 240 rdsdebug("shutting down conn %p tc %p sock %p\n", 241 cp->cp_conn, tc, sock); 242 243 if (sock) { 244 sk = sock->sk; 245 if (rds_destroy_pending(cp->cp_conn)) 246 sock_no_linger(sk); 247 248 sock->ops->shutdown(sock, SHUT_WR); 249 250 /* after sending FIN, 251 * wait until we processed all incoming messages 252 * and we're sure that there won't be any more: 253 * i.e. state CLOSING, TIME_WAIT, CLOSE_WAIT, 254 * LAST_ACK, or CLOSE (RFC 793). 255 * 256 * Give up waiting after 5 seconds and allow messages 257 * to theoretically get dropped, if the TCP transition 258 * didn't happen. 259 */ 260 rounds = 0; 261 do { 262 /* we need to ensure messages are dequeued here 263 * since "rds_recv_worker" only dispatches messages 264 * while the connection is still in RDS_CONN_UP 265 * and there is no guarantee that "rds_tcp_data_ready" 266 * was called nor that "sk_data_ready" still points to 267 * it. 268 */ 269 rds_tcp_recv_path(cp); 270 } while (!wait_event_timeout(tc->t_recv_done_waitq, 271 (sk->sk_state == TCP_CLOSING || 272 sk->sk_state == TCP_TIME_WAIT || 273 sk->sk_state == TCP_CLOSE_WAIT || 274 sk->sk_state == TCP_LAST_ACK || 275 sk->sk_state == TCP_CLOSE) && 276 skb_queue_empty_lockless(&sk->sk_receive_queue), 277 msecs_to_jiffies(100)) && 278 ++rounds < 50); 279 lock_sock(sk); 280 281 /* discard messages that the peer received already */ 282 tc->t_last_seen_una = rds_tcp_snd_una(tc); 283 rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), 284 rds_tcp_is_acked); 285 286 rds_tcp_restore_callbacks(sock, tc); /* tc->tc_sock = NULL */ 287 288 release_sock(sk); 289 sock_release(sock); 290 } 291 292 if (tc->t_tinc) { 293 rds_inc_put(&tc->t_tinc->ti_inc); 294 tc->t_tinc = NULL; 295 } 296 tc->t_tinc_hdr_rem = sizeof(struct rds_header); 297 tc->t_tinc_data_rem = 0; 298 } 299