1 /*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/in.h>
36 #include <linux/module.h>
37 #include <net/tcp.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
40 #include <net/addrconf.h>
41
42 #include "rds.h"
43 #include "tcp.h"
44
45 /* only for info exporting */
46 static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
47 static LIST_HEAD(rds_tcp_tc_list);
48
49 /* rds_tcp_tc_count counts only IPv4 connections.
50 * rds6_tcp_tc_count counts both IPv4 and IPv6 connections.
51 */
52 static unsigned int rds_tcp_tc_count;
53 #if IS_ENABLED(CONFIG_IPV6)
54 static unsigned int rds6_tcp_tc_count;
55 #endif
56
57 /* Track rds_tcp_connection structs so they can be cleaned up */
58 static DEFINE_SPINLOCK(rds_tcp_conn_lock);
59 static LIST_HEAD(rds_tcp_conn_list);
60 static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
61
62 static struct kmem_cache *rds_tcp_conn_slab;
63
64 static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
65 void *buffer, size_t *lenp, loff_t *fpos);
66 static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
67 void *buffer, size_t *lenp, loff_t *fpos);
68
69 static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
70 static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
71
72 static struct ctl_table rds_tcp_sysctl_table[] = {
73 #define RDS_TCP_SNDBUF 0
74 {
75 .procname = "rds_tcp_sndbuf",
76 /* data is per-net pointer */
77 .maxlen = sizeof(int),
78 .mode = 0644,
79 .proc_handler = rds_tcp_sndbuf_handler,
80 .extra1 = &rds_tcp_min_sndbuf,
81 },
82 #define RDS_TCP_RCVBUF 1
83 {
84 .procname = "rds_tcp_rcvbuf",
85 /* data is per-net pointer */
86 .maxlen = sizeof(int),
87 .mode = 0644,
88 .proc_handler = rds_tcp_rcvbuf_handler,
89 .extra1 = &rds_tcp_min_rcvbuf,
90 },
91 };
92
rds_tcp_write_seq(struct rds_tcp_connection * tc)93 u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94 {
95 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
97 }
98
rds_tcp_snd_una(struct rds_tcp_connection * tc)99 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
100 {
101 return tcp_sk(tc->t_sock->sk)->snd_una;
102 }
103
rds_tcp_restore_callbacks(struct socket * sock,struct rds_tcp_connection * tc)104 void rds_tcp_restore_callbacks(struct socket *sock,
105 struct rds_tcp_connection *tc)
106 {
107 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
108 write_lock_bh(&sock->sk->sk_callback_lock);
109
110 /* done under the callback_lock to serialize with write_space */
111 spin_lock(&rds_tcp_tc_list_lock);
112 list_del_init(&tc->t_list_item);
113 #if IS_ENABLED(CONFIG_IPV6)
114 rds6_tcp_tc_count--;
115 #endif
116 if (!tc->t_cpath->cp_conn->c_isv6)
117 rds_tcp_tc_count--;
118 spin_unlock(&rds_tcp_tc_list_lock);
119
120 tc->t_sock = NULL;
121
122 sock->sk->sk_write_space = tc->t_orig_write_space;
123 sock->sk->sk_data_ready = tc->t_orig_data_ready;
124 sock->sk->sk_state_change = tc->t_orig_state_change;
125 sock->sk->sk_user_data = NULL;
126
127 write_unlock_bh(&sock->sk->sk_callback_lock);
128 }
129
130 /*
131 * rds_tcp_reset_callbacks() switches the to the new sock and
132 * returns the existing tc->t_sock.
133 *
134 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
135 * and rds_tcp_reset_callbacks. Send and receive trust that
136 * it is set. The absence of RDS_CONN_UP bit protects those paths
137 * from being called while it isn't set.
138 */
rds_tcp_reset_callbacks(struct socket * sock,struct rds_conn_path * cp)139 void rds_tcp_reset_callbacks(struct socket *sock,
140 struct rds_conn_path *cp)
141 {
142 struct rds_tcp_connection *tc = cp->cp_transport_data;
143 struct socket *osock = tc->t_sock;
144
145 if (!osock)
146 goto newsock;
147
148 /* Need to resolve a duelling SYN between peers.
149 * We have an outstanding SYN to this peer, which may
150 * potentially have transitioned to the RDS_CONN_UP state,
151 * so we must quiesce any send threads before resetting
152 * cp_transport_data. We quiesce these threads by setting
153 * cp_state to something other than RDS_CONN_UP, and then
154 * waiting for any existing threads in rds_send_xmit to
155 * complete release_in_xmit(). (Subsequent threads entering
156 * rds_send_xmit() will bail on !rds_conn_up().
157 *
158 * However an incoming syn-ack at this point would end up
159 * marking the conn as RDS_CONN_UP, and would again permit
160 * rds_send_xmi() threads through, so ideally we would
161 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
162 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
163 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
164 * would not get set. As a result, we set c_state to
165 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
166 * cannot mark rds_conn_path_up() in the window before lock_sock()
167 */
168 atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
169 wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
170 /* reset receive side state for rds_tcp_data_recv() for osock */
171 cancel_delayed_work_sync(&cp->cp_send_w);
172 cancel_delayed_work_sync(&cp->cp_recv_w);
173 lock_sock(osock->sk);
174 if (tc->t_tinc) {
175 rds_inc_put(&tc->t_tinc->ti_inc);
176 tc->t_tinc = NULL;
177 }
178 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
179 tc->t_tinc_data_rem = 0;
180 rds_tcp_restore_callbacks(osock, tc);
181 release_sock(osock->sk);
182 sock_release(osock);
183 newsock:
184 rds_send_path_reset(cp);
185 lock_sock(sock->sk);
186 rds_tcp_set_callbacks(sock, cp);
187 release_sock(sock->sk);
188 }
189
190 /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
191 * above rds_tcp_reset_callbacks for notes about synchronization
192 * with data path
193 */
rds_tcp_set_callbacks(struct socket * sock,struct rds_conn_path * cp)194 void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
195 {
196 struct rds_tcp_connection *tc = cp->cp_transport_data;
197
198 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
199 write_lock_bh(&sock->sk->sk_callback_lock);
200
201 /* done under the callback_lock to serialize with write_space */
202 spin_lock(&rds_tcp_tc_list_lock);
203 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
204 #if IS_ENABLED(CONFIG_IPV6)
205 rds6_tcp_tc_count++;
206 #endif
207 if (!tc->t_cpath->cp_conn->c_isv6)
208 rds_tcp_tc_count++;
209 spin_unlock(&rds_tcp_tc_list_lock);
210
211 /* accepted sockets need our listen data ready undone */
212 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
213 sock->sk->sk_data_ready = sock->sk->sk_user_data;
214
215 tc->t_sock = sock;
216 if (!tc->t_rtn)
217 tc->t_rtn = net_generic(sock_net(sock->sk), rds_tcp_netid);
218 tc->t_cpath = cp;
219 tc->t_orig_data_ready = sock->sk->sk_data_ready;
220 tc->t_orig_write_space = sock->sk->sk_write_space;
221 tc->t_orig_state_change = sock->sk->sk_state_change;
222
223 sock->sk->sk_user_data = cp;
224 sock->sk->sk_data_ready = rds_tcp_data_ready;
225 sock->sk->sk_write_space = rds_tcp_write_space;
226 sock->sk->sk_state_change = rds_tcp_state_change;
227
228 write_unlock_bh(&sock->sk->sk_callback_lock);
229 }
230
231 /* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4
232 * connections for backward compatibility.
233 */
rds_tcp_tc_info(struct socket * rds_sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)234 static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
235 struct rds_info_iterator *iter,
236 struct rds_info_lengths *lens)
237 {
238 struct rds_info_tcp_socket tsinfo;
239 struct rds_tcp_connection *tc;
240 unsigned long flags;
241
242 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
243
244 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
245 goto out;
246
247 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
248 struct inet_sock *inet = inet_sk(tc->t_sock->sk);
249
250 if (tc->t_cpath->cp_conn->c_isv6)
251 continue;
252
253 tsinfo.local_addr = inet->inet_saddr;
254 tsinfo.local_port = inet->inet_sport;
255 tsinfo.peer_addr = inet->inet_daddr;
256 tsinfo.peer_port = inet->inet_dport;
257
258 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
259 tsinfo.data_rem = tc->t_tinc_data_rem;
260 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
261 tsinfo.last_expected_una = tc->t_last_expected_una;
262 tsinfo.last_seen_una = tc->t_last_seen_una;
263 tsinfo.tos = tc->t_cpath->cp_conn->c_tos;
264
265 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
266 }
267
268 out:
269 lens->nr = rds_tcp_tc_count;
270 lens->each = sizeof(tsinfo);
271
272 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
273 }
274
275 #if IS_ENABLED(CONFIG_IPV6)
276 /* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and
277 * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped
278 * address.
279 */
rds6_tcp_tc_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)280 static void rds6_tcp_tc_info(struct socket *sock, unsigned int len,
281 struct rds_info_iterator *iter,
282 struct rds_info_lengths *lens)
283 {
284 struct rds6_info_tcp_socket tsinfo6;
285 struct rds_tcp_connection *tc;
286 unsigned long flags;
287
288 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
289
290 if (len / sizeof(tsinfo6) < rds6_tcp_tc_count)
291 goto out;
292
293 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
294 struct sock *sk = tc->t_sock->sk;
295 struct inet_sock *inet = inet_sk(sk);
296
297 tsinfo6.local_addr = sk->sk_v6_rcv_saddr;
298 tsinfo6.local_port = inet->inet_sport;
299 tsinfo6.peer_addr = sk->sk_v6_daddr;
300 tsinfo6.peer_port = inet->inet_dport;
301
302 tsinfo6.hdr_rem = tc->t_tinc_hdr_rem;
303 tsinfo6.data_rem = tc->t_tinc_data_rem;
304 tsinfo6.last_sent_nxt = tc->t_last_sent_nxt;
305 tsinfo6.last_expected_una = tc->t_last_expected_una;
306 tsinfo6.last_seen_una = tc->t_last_seen_una;
307
308 rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6));
309 }
310
311 out:
312 lens->nr = rds6_tcp_tc_count;
313 lens->each = sizeof(tsinfo6);
314
315 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
316 }
317 #endif
318
rds_tcp_laddr_check(struct net * net,const struct in6_addr * addr,__u32 scope_id)319 int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
320 __u32 scope_id)
321 {
322 struct net_device *dev = NULL;
323 #if IS_ENABLED(CONFIG_IPV6)
324 int ret;
325 #endif
326
327 if (ipv6_addr_v4mapped(addr)) {
328 if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL)
329 return 0;
330 return -EADDRNOTAVAIL;
331 }
332
333 /* If the scope_id is specified, check only those addresses
334 * hosted on the specified interface.
335 */
336 if (scope_id != 0) {
337 rcu_read_lock();
338 dev = dev_get_by_index_rcu(net, scope_id);
339 /* scope_id is not valid... */
340 if (!dev) {
341 rcu_read_unlock();
342 return -EADDRNOTAVAIL;
343 }
344 rcu_read_unlock();
345 }
346 #if IS_ENABLED(CONFIG_IPV6)
347 ret = ipv6_chk_addr(net, addr, dev, 0);
348 if (ret)
349 return 0;
350 #endif
351 return -EADDRNOTAVAIL;
352 }
353
rds_tcp_conn_free(void * arg)354 static void rds_tcp_conn_free(void *arg)
355 {
356 struct rds_tcp_connection *tc = arg;
357 unsigned long flags;
358
359 rdsdebug("freeing tc %p\n", tc);
360
361 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
362 if (!tc->t_tcp_node_detached)
363 list_del(&tc->t_tcp_node);
364 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
365
366 kmem_cache_free(rds_tcp_conn_slab, tc);
367 }
368
rds_tcp_conn_alloc(struct rds_connection * conn,gfp_t gfp)369 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
370 {
371 struct rds_tcp_connection *tc;
372 int i, j;
373 int ret = 0;
374
375 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
376 tc = kmem_cache_zalloc(rds_tcp_conn_slab, gfp);
377 if (!tc) {
378 ret = -ENOMEM;
379 goto fail;
380 }
381 mutex_init(&tc->t_conn_path_lock);
382 tc->t_sock = NULL;
383 tc->t_rtn = NULL;
384 tc->t_tinc = NULL;
385 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
386 tc->t_tinc_data_rem = 0;
387 init_waitqueue_head(&tc->t_recv_done_waitq);
388
389 conn->c_path[i].cp_transport_data = tc;
390 tc->t_cpath = &conn->c_path[i];
391 tc->t_tcp_node_detached = true;
392
393 rdsdebug("rds_conn_path [%d] tc %p\n", i,
394 conn->c_path[i].cp_transport_data);
395 }
396 spin_lock_irq(&rds_tcp_conn_lock);
397 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
398 tc = conn->c_path[i].cp_transport_data;
399 tc->t_tcp_node_detached = false;
400 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
401 }
402 spin_unlock_irq(&rds_tcp_conn_lock);
403 fail:
404 if (ret) {
405 for (j = 0; j < i; j++)
406 rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
407 }
408 return ret;
409 }
410
list_has_conn(struct list_head * list,struct rds_connection * conn)411 static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
412 {
413 struct rds_tcp_connection *tc, *_tc;
414
415 list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
416 if (tc->t_cpath->cp_conn == conn)
417 return true;
418 }
419 return false;
420 }
421
rds_tcp_set_unloading(void)422 static void rds_tcp_set_unloading(void)
423 {
424 atomic_set(&rds_tcp_unloading, 1);
425 }
426
rds_tcp_is_unloading(struct rds_connection * conn)427 static bool rds_tcp_is_unloading(struct rds_connection *conn)
428 {
429 return atomic_read(&rds_tcp_unloading) != 0;
430 }
431
rds_tcp_destroy_conns(void)432 static void rds_tcp_destroy_conns(void)
433 {
434 struct rds_tcp_connection *tc, *_tc;
435 LIST_HEAD(tmp_list);
436
437 /* avoid calling conn_destroy with irqs off */
438 spin_lock_irq(&rds_tcp_conn_lock);
439 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
440 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
441 list_move_tail(&tc->t_tcp_node, &tmp_list);
442 }
443 spin_unlock_irq(&rds_tcp_conn_lock);
444
445 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
446 rds_conn_destroy(tc->t_cpath->cp_conn);
447 }
448
449 static void rds_tcp_exit(void);
450
rds_tcp_get_tos_map(u8 tos)451 static u8 rds_tcp_get_tos_map(u8 tos)
452 {
453 /* all user tos mapped to default 0 for TCP transport */
454 return 0;
455 }
456
457 struct rds_transport rds_tcp_transport = {
458 .laddr_check = rds_tcp_laddr_check,
459 .xmit_path_prepare = rds_tcp_xmit_path_prepare,
460 .xmit_path_complete = rds_tcp_xmit_path_complete,
461 .xmit = rds_tcp_xmit,
462 .recv_path = rds_tcp_recv_path,
463 .conn_alloc = rds_tcp_conn_alloc,
464 .conn_free = rds_tcp_conn_free,
465 .conn_slots_available = rds_tcp_conn_slots_available,
466 .conn_path_connect = rds_tcp_conn_path_connect,
467 .conn_path_shutdown = rds_tcp_conn_path_shutdown,
468 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
469 .inc_free = rds_tcp_inc_free,
470 .stats_info_copy = rds_tcp_stats_info_copy,
471 .exit = rds_tcp_exit,
472 .get_tos_map = rds_tcp_get_tos_map,
473 .t_owner = THIS_MODULE,
474 .t_name = "tcp",
475 .t_type = RDS_TRANS_TCP,
476 .t_prefer_loopback = 1,
477 .t_mp_capable = 1,
478 .t_unloading = rds_tcp_is_unloading,
479 };
480
481 int rds_tcp_netid;
482
483 /* All module specific customizations to the RDS-TCP socket should be done in
484 * rds_tcp_tune() and applied after socket creation.
485 */
rds_tcp_tune(struct socket * sock)486 bool rds_tcp_tune(struct socket *sock)
487 {
488 struct sock *sk = sock->sk;
489 struct net *net = sock_net(sk);
490 struct rds_tcp_net *rtn;
491
492 tcp_sock_set_nodelay(sock->sk);
493 /* TCP timer functions might access net namespace even after
494 * a process which created this net namespace terminated.
495 */
496 if (!sk->sk_net_refcnt) {
497 if (!maybe_get_net(net))
498 return false;
499 /*
500 * sk_net_refcnt_upgrade() must be called before lock_sock()
501 * because it does a GFP_KERNEL allocation, which can trigger
502 * fs_reclaim and create a circular lock dependency with the
503 * socket lock. The fields it modifies (sk_net_refcnt,
504 * ns_tracker) are not accessed by any concurrent code path
505 * at this point.
506 */
507 sk_net_refcnt_upgrade(sk);
508 put_net(net);
509 }
510 lock_sock(sk);
511 rtn = net_generic(net, rds_tcp_netid);
512 if (rtn->sndbuf_size > 0) {
513 sk->sk_sndbuf = rtn->sndbuf_size;
514 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
515 }
516 if (rtn->rcvbuf_size > 0) {
517 sk->sk_rcvbuf = rtn->rcvbuf_size;
518 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
519 }
520 release_sock(sk);
521 return true;
522 }
523
rds_tcp_accept_worker(struct work_struct * work)524 static void rds_tcp_accept_worker(struct work_struct *work)
525 {
526 struct rds_tcp_net *rtn = container_of(work,
527 struct rds_tcp_net,
528 rds_tcp_accept_w);
529
530 while (rds_tcp_accept_one(rtn) == 0)
531 cond_resched();
532 }
533
rds_tcp_accept_work(struct rds_tcp_net * rtn)534 void rds_tcp_accept_work(struct rds_tcp_net *rtn)
535 {
536 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
537 }
538
rds_tcp_init_net(struct net * net)539 static __net_init int rds_tcp_init_net(struct net *net)
540 {
541 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
542 struct ctl_table *tbl;
543 int err = 0;
544
545 memset(rtn, 0, sizeof(*rtn));
546
547 mutex_init(&rtn->rds_tcp_accept_lock);
548
549 /* {snd, rcv}buf_size default to 0, which implies we let the
550 * stack pick the value, and permit auto-tuning of buffer size.
551 */
552 if (net == &init_net) {
553 tbl = rds_tcp_sysctl_table;
554 } else {
555 tbl = kmemdup(rds_tcp_sysctl_table,
556 sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
557 if (!tbl) {
558 pr_warn("could not set allocate sysctl table\n");
559 return -ENOMEM;
560 }
561 rtn->ctl_table = tbl;
562 }
563 tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
564 tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
565 rtn->rds_tcp_sysctl = register_net_sysctl_sz(net, "net/rds/tcp", tbl,
566 ARRAY_SIZE(rds_tcp_sysctl_table));
567 if (!rtn->rds_tcp_sysctl) {
568 pr_warn("could not register sysctl\n");
569 err = -ENOMEM;
570 goto fail;
571 }
572
573 #if IS_ENABLED(CONFIG_IPV6)
574 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true);
575 #else
576 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
577 #endif
578 if (!rtn->rds_tcp_listen_sock) {
579 pr_warn("could not set up IPv6 listen sock\n");
580
581 #if IS_ENABLED(CONFIG_IPV6)
582 /* Try IPv4 as some systems disable IPv6 */
583 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
584 if (!rtn->rds_tcp_listen_sock) {
585 #endif
586 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
587 rtn->rds_tcp_sysctl = NULL;
588 err = -EAFNOSUPPORT;
589 goto fail;
590 #if IS_ENABLED(CONFIG_IPV6)
591 }
592 #endif
593 }
594 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
595 return 0;
596
597 fail:
598 if (net != &init_net)
599 kfree(tbl);
600 return err;
601 }
602
rds_tcp_kill_sock(struct net * net)603 static void rds_tcp_kill_sock(struct net *net)
604 {
605 struct rds_tcp_connection *tc, *_tc;
606 LIST_HEAD(tmp_list);
607 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
608 struct socket *lsock = rtn->rds_tcp_listen_sock;
609
610 rtn->rds_tcp_listen_sock = NULL;
611 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
612 if (rtn->rds_tcp_accepted_sock)
613 sock_release(rtn->rds_tcp_accepted_sock);
614 spin_lock_irq(&rds_tcp_conn_lock);
615 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
616 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
617
618 if (net != c_net)
619 continue;
620 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
621 list_move_tail(&tc->t_tcp_node, &tmp_list);
622 } else {
623 list_del(&tc->t_tcp_node);
624 tc->t_tcp_node_detached = true;
625 }
626 }
627 spin_unlock_irq(&rds_tcp_conn_lock);
628 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
629 rds_conn_destroy(tc->t_cpath->cp_conn);
630 }
631
rds_tcp_exit_net(struct net * net)632 static void __net_exit rds_tcp_exit_net(struct net *net)
633 {
634 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
635
636 rds_tcp_kill_sock(net);
637
638 if (rtn->rds_tcp_sysctl)
639 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
640
641 if (net != &init_net)
642 kfree(rtn->ctl_table);
643 }
644
645 static struct pernet_operations rds_tcp_net_ops = {
646 .init = rds_tcp_init_net,
647 .exit = rds_tcp_exit_net,
648 .id = &rds_tcp_netid,
649 .size = sizeof(struct rds_tcp_net),
650 };
651
rds_tcp_listen_sock_def_readable(struct net * net)652 void *rds_tcp_listen_sock_def_readable(struct net *net)
653 {
654 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
655 struct socket *lsock = rtn->rds_tcp_listen_sock;
656
657 if (!lsock)
658 return NULL;
659
660 return lsock->sk->sk_user_data;
661 }
662
663 /* when sysctl is used to modify some kernel socket parameters,this
664 * function resets the RDS connections in that netns so that we can
665 * restart with new parameters. The assumption is that such reset
666 * events are few and far-between.
667 */
rds_tcp_sysctl_reset(struct net * net)668 static void rds_tcp_sysctl_reset(struct net *net)
669 {
670 struct rds_tcp_connection *tc, *_tc;
671
672 spin_lock_irq(&rds_tcp_conn_lock);
673 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
674 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
675
676 if (net != c_net || !tc->t_sock)
677 continue;
678
679 /* reconnect with new parameters */
680 rds_conn_path_drop(tc->t_cpath, false);
681 }
682 spin_unlock_irq(&rds_tcp_conn_lock);
683 }
684
rds_tcp_skbuf_handler(struct rds_tcp_net * rtn,const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * fpos)685 static int rds_tcp_skbuf_handler(struct rds_tcp_net *rtn,
686 const struct ctl_table *ctl, int write,
687 void *buffer, size_t *lenp, loff_t *fpos)
688 {
689 int err;
690
691 err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
692 if (err < 0) {
693 pr_warn("Invalid input. Must be >= %d\n",
694 *(int *)(ctl->extra1));
695 return err;
696 }
697
698 if (write && rtn->rds_tcp_listen_sock && rtn->rds_tcp_listen_sock->sk) {
699 struct net *net = sock_net(rtn->rds_tcp_listen_sock->sk);
700
701 rds_tcp_sysctl_reset(net);
702 }
703
704 return 0;
705 }
706
rds_tcp_sndbuf_handler(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * fpos)707 static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
708 void *buffer, size_t *lenp, loff_t *fpos)
709 {
710 struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
711 sndbuf_size);
712
713 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
714 }
715
rds_tcp_rcvbuf_handler(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * fpos)716 static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
717 void *buffer, size_t *lenp, loff_t *fpos)
718 {
719 struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
720 rcvbuf_size);
721
722 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
723 }
724
rds_tcp_exit(void)725 static void rds_tcp_exit(void)
726 {
727 rds_tcp_set_unloading();
728 synchronize_rcu();
729 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
730 #if IS_ENABLED(CONFIG_IPV6)
731 rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
732 #endif
733 unregister_pernet_device(&rds_tcp_net_ops);
734 rds_tcp_destroy_conns();
735 rds_trans_unregister(&rds_tcp_transport);
736 rds_tcp_recv_exit();
737 kmem_cache_destroy(rds_tcp_conn_slab);
738 }
739 module_exit(rds_tcp_exit);
740
rds_tcp_init(void)741 static int __init rds_tcp_init(void)
742 {
743 int ret;
744
745 rds_tcp_conn_slab = KMEM_CACHE(rds_tcp_connection, 0);
746 if (!rds_tcp_conn_slab) {
747 ret = -ENOMEM;
748 goto out;
749 }
750
751 ret = rds_tcp_recv_init();
752 if (ret)
753 goto out_slab;
754
755 ret = register_pernet_device(&rds_tcp_net_ops);
756 if (ret)
757 goto out_recv;
758
759 rds_trans_register(&rds_tcp_transport);
760
761 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
762 #if IS_ENABLED(CONFIG_IPV6)
763 rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
764 #endif
765
766 goto out;
767 out_recv:
768 rds_tcp_recv_exit();
769 out_slab:
770 kmem_cache_destroy(rds_tcp_conn_slab);
771 out:
772 return ret;
773 }
774 module_init(rds_tcp_init);
775
776 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
777 MODULE_DESCRIPTION("RDS: TCP transport");
778 MODULE_LICENSE("Dual BSD/GPL");
779