1 /*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/in.h>
36 #include <linux/module.h>
37 #include <net/tcp.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
40 #include <net/addrconf.h>
41
42 #include "rds.h"
43 #include "tcp.h"
44
45 /* only for info exporting */
46 static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
47 static LIST_HEAD(rds_tcp_tc_list);
48
49 /* rds_tcp_tc_count counts only IPv4 connections.
50 * rds6_tcp_tc_count counts both IPv4 and IPv6 connections.
51 */
52 static unsigned int rds_tcp_tc_count;
53 #if IS_ENABLED(CONFIG_IPV6)
54 static unsigned int rds6_tcp_tc_count;
55 #endif
56
57 /* Track rds_tcp_connection structs so they can be cleaned up */
58 static DEFINE_SPINLOCK(rds_tcp_conn_lock);
59 static LIST_HEAD(rds_tcp_conn_list);
60 static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
61
62 static struct kmem_cache *rds_tcp_conn_slab;
63
64 static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
65 void *buffer, size_t *lenp, loff_t *fpos);
66 static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
67 void *buffer, size_t *lenp, loff_t *fpos);
68
69 static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
70 static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
71
72 static struct ctl_table rds_tcp_sysctl_table[] = {
73 #define RDS_TCP_SNDBUF 0
74 {
75 .procname = "rds_tcp_sndbuf",
76 /* data is per-net pointer */
77 .maxlen = sizeof(int),
78 .mode = 0644,
79 .proc_handler = rds_tcp_sndbuf_handler,
80 .extra1 = &rds_tcp_min_sndbuf,
81 },
82 #define RDS_TCP_RCVBUF 1
83 {
84 .procname = "rds_tcp_rcvbuf",
85 /* data is per-net pointer */
86 .maxlen = sizeof(int),
87 .mode = 0644,
88 .proc_handler = rds_tcp_rcvbuf_handler,
89 .extra1 = &rds_tcp_min_rcvbuf,
90 },
91 };
92
rds_tcp_write_seq(struct rds_tcp_connection * tc)93 u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94 {
95 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
97 }
98
rds_tcp_snd_una(struct rds_tcp_connection * tc)99 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
100 {
101 return tcp_sk(tc->t_sock->sk)->snd_una;
102 }
103
rds_tcp_restore_callbacks(struct socket * sock,struct rds_tcp_connection * tc)104 void rds_tcp_restore_callbacks(struct socket *sock,
105 struct rds_tcp_connection *tc)
106 {
107 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
108 write_lock_bh(&sock->sk->sk_callback_lock);
109
110 /* done under the callback_lock to serialize with write_space */
111 spin_lock(&rds_tcp_tc_list_lock);
112 list_del_init(&tc->t_list_item);
113 #if IS_ENABLED(CONFIG_IPV6)
114 rds6_tcp_tc_count--;
115 #endif
116 if (!tc->t_cpath->cp_conn->c_isv6)
117 rds_tcp_tc_count--;
118 spin_unlock(&rds_tcp_tc_list_lock);
119
120 tc->t_sock = NULL;
121
122 sock->sk->sk_write_space = tc->t_orig_write_space;
123 sock->sk->sk_data_ready = tc->t_orig_data_ready;
124 sock->sk->sk_state_change = tc->t_orig_state_change;
125 sock->sk->sk_user_data = NULL;
126
127 write_unlock_bh(&sock->sk->sk_callback_lock);
128 }
129
130 /*
131 * rds_tcp_reset_callbacks() switches the to the new sock and
132 * returns the existing tc->t_sock.
133 *
134 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
135 * and rds_tcp_reset_callbacks. Send and receive trust that
136 * it is set. The absence of RDS_CONN_UP bit protects those paths
137 * from being called while it isn't set.
138 */
rds_tcp_reset_callbacks(struct socket * sock,struct rds_conn_path * cp)139 void rds_tcp_reset_callbacks(struct socket *sock,
140 struct rds_conn_path *cp)
141 {
142 struct rds_tcp_connection *tc = cp->cp_transport_data;
143 struct socket *osock = tc->t_sock;
144
145 if (!osock)
146 goto newsock;
147
148 /* Need to resolve a duelling SYN between peers.
149 * We have an outstanding SYN to this peer, which may
150 * potentially have transitioned to the RDS_CONN_UP state,
151 * so we must quiesce any send threads before resetting
152 * cp_transport_data. We quiesce these threads by setting
153 * cp_state to something other than RDS_CONN_UP, and then
154 * waiting for any existing threads in rds_send_xmit to
155 * complete release_in_xmit(). (Subsequent threads entering
156 * rds_send_xmit() will bail on !rds_conn_up().
157 *
158 * However an incoming syn-ack at this point would end up
159 * marking the conn as RDS_CONN_UP, and would again permit
160 * rds_send_xmi() threads through, so ideally we would
161 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
162 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
163 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
164 * would not get set. As a result, we set c_state to
165 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
166 * cannot mark rds_conn_path_up() in the window before lock_sock()
167 */
168 atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
169 wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
170 /* reset receive side state for rds_tcp_data_recv() for osock */
171 cancel_delayed_work_sync(&cp->cp_send_w);
172 cancel_delayed_work_sync(&cp->cp_recv_w);
173 lock_sock(osock->sk);
174 if (tc->t_tinc) {
175 rds_inc_put(&tc->t_tinc->ti_inc);
176 tc->t_tinc = NULL;
177 }
178 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
179 tc->t_tinc_data_rem = 0;
180 rds_tcp_restore_callbacks(osock, tc);
181 release_sock(osock->sk);
182 sock_release(osock);
183 newsock:
184 rds_send_path_reset(cp);
185 lock_sock(sock->sk);
186 rds_tcp_set_callbacks(sock, cp);
187 release_sock(sock->sk);
188 }
189
190 /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
191 * above rds_tcp_reset_callbacks for notes about synchronization
192 * with data path
193 */
rds_tcp_set_callbacks(struct socket * sock,struct rds_conn_path * cp)194 void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
195 {
196 struct rds_tcp_connection *tc = cp->cp_transport_data;
197
198 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
199 write_lock_bh(&sock->sk->sk_callback_lock);
200
201 /* done under the callback_lock to serialize with write_space */
202 spin_lock(&rds_tcp_tc_list_lock);
203 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
204 #if IS_ENABLED(CONFIG_IPV6)
205 rds6_tcp_tc_count++;
206 #endif
207 if (!tc->t_cpath->cp_conn->c_isv6)
208 rds_tcp_tc_count++;
209 spin_unlock(&rds_tcp_tc_list_lock);
210
211 /* accepted sockets need our listen data ready undone */
212 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
213 sock->sk->sk_data_ready = sock->sk->sk_user_data;
214
215 tc->t_sock = sock;
216 tc->t_cpath = cp;
217 tc->t_orig_data_ready = sock->sk->sk_data_ready;
218 tc->t_orig_write_space = sock->sk->sk_write_space;
219 tc->t_orig_state_change = sock->sk->sk_state_change;
220
221 sock->sk->sk_user_data = cp;
222 sock->sk->sk_data_ready = rds_tcp_data_ready;
223 sock->sk->sk_write_space = rds_tcp_write_space;
224 sock->sk->sk_state_change = rds_tcp_state_change;
225
226 write_unlock_bh(&sock->sk->sk_callback_lock);
227 }
228
229 /* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4
230 * connections for backward compatibility.
231 */
rds_tcp_tc_info(struct socket * rds_sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)232 static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
233 struct rds_info_iterator *iter,
234 struct rds_info_lengths *lens)
235 {
236 struct rds_info_tcp_socket tsinfo;
237 struct rds_tcp_connection *tc;
238 unsigned long flags;
239
240 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
241
242 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
243 goto out;
244
245 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
246 struct inet_sock *inet = inet_sk(tc->t_sock->sk);
247
248 if (tc->t_cpath->cp_conn->c_isv6)
249 continue;
250
251 tsinfo.local_addr = inet->inet_saddr;
252 tsinfo.local_port = inet->inet_sport;
253 tsinfo.peer_addr = inet->inet_daddr;
254 tsinfo.peer_port = inet->inet_dport;
255
256 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
257 tsinfo.data_rem = tc->t_tinc_data_rem;
258 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
259 tsinfo.last_expected_una = tc->t_last_expected_una;
260 tsinfo.last_seen_una = tc->t_last_seen_una;
261 tsinfo.tos = tc->t_cpath->cp_conn->c_tos;
262
263 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
264 }
265
266 out:
267 lens->nr = rds_tcp_tc_count;
268 lens->each = sizeof(tsinfo);
269
270 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
271 }
272
273 #if IS_ENABLED(CONFIG_IPV6)
274 /* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and
275 * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped
276 * address.
277 */
rds6_tcp_tc_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)278 static void rds6_tcp_tc_info(struct socket *sock, unsigned int len,
279 struct rds_info_iterator *iter,
280 struct rds_info_lengths *lens)
281 {
282 struct rds6_info_tcp_socket tsinfo6;
283 struct rds_tcp_connection *tc;
284 unsigned long flags;
285
286 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
287
288 if (len / sizeof(tsinfo6) < rds6_tcp_tc_count)
289 goto out;
290
291 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
292 struct sock *sk = tc->t_sock->sk;
293 struct inet_sock *inet = inet_sk(sk);
294
295 tsinfo6.local_addr = sk->sk_v6_rcv_saddr;
296 tsinfo6.local_port = inet->inet_sport;
297 tsinfo6.peer_addr = sk->sk_v6_daddr;
298 tsinfo6.peer_port = inet->inet_dport;
299
300 tsinfo6.hdr_rem = tc->t_tinc_hdr_rem;
301 tsinfo6.data_rem = tc->t_tinc_data_rem;
302 tsinfo6.last_sent_nxt = tc->t_last_sent_nxt;
303 tsinfo6.last_expected_una = tc->t_last_expected_una;
304 tsinfo6.last_seen_una = tc->t_last_seen_una;
305
306 rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6));
307 }
308
309 out:
310 lens->nr = rds6_tcp_tc_count;
311 lens->each = sizeof(tsinfo6);
312
313 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
314 }
315 #endif
316
rds_tcp_laddr_check(struct net * net,const struct in6_addr * addr,__u32 scope_id)317 int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
318 __u32 scope_id)
319 {
320 struct net_device *dev = NULL;
321 #if IS_ENABLED(CONFIG_IPV6)
322 int ret;
323 #endif
324
325 if (ipv6_addr_v4mapped(addr)) {
326 if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL)
327 return 0;
328 return -EADDRNOTAVAIL;
329 }
330
331 /* If the scope_id is specified, check only those addresses
332 * hosted on the specified interface.
333 */
334 if (scope_id != 0) {
335 rcu_read_lock();
336 dev = dev_get_by_index_rcu(net, scope_id);
337 /* scope_id is not valid... */
338 if (!dev) {
339 rcu_read_unlock();
340 return -EADDRNOTAVAIL;
341 }
342 rcu_read_unlock();
343 }
344 #if IS_ENABLED(CONFIG_IPV6)
345 ret = ipv6_chk_addr(net, addr, dev, 0);
346 if (ret)
347 return 0;
348 #endif
349 return -EADDRNOTAVAIL;
350 }
351
rds_tcp_conn_free(void * arg)352 static void rds_tcp_conn_free(void *arg)
353 {
354 struct rds_tcp_connection *tc = arg;
355 unsigned long flags;
356
357 rdsdebug("freeing tc %p\n", tc);
358
359 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
360 if (!tc->t_tcp_node_detached)
361 list_del(&tc->t_tcp_node);
362 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
363
364 kmem_cache_free(rds_tcp_conn_slab, tc);
365 }
366
rds_tcp_conn_alloc(struct rds_connection * conn,gfp_t gfp)367 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
368 {
369 struct rds_tcp_connection *tc;
370 int i, j;
371 int ret = 0;
372
373 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
374 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
375 if (!tc) {
376 ret = -ENOMEM;
377 goto fail;
378 }
379 mutex_init(&tc->t_conn_path_lock);
380 tc->t_sock = NULL;
381 tc->t_tinc = NULL;
382 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
383 tc->t_tinc_data_rem = 0;
384
385 conn->c_path[i].cp_transport_data = tc;
386 tc->t_cpath = &conn->c_path[i];
387 tc->t_tcp_node_detached = true;
388
389 rdsdebug("rds_conn_path [%d] tc %p\n", i,
390 conn->c_path[i].cp_transport_data);
391 }
392 spin_lock_irq(&rds_tcp_conn_lock);
393 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
394 tc = conn->c_path[i].cp_transport_data;
395 tc->t_tcp_node_detached = false;
396 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
397 }
398 spin_unlock_irq(&rds_tcp_conn_lock);
399 fail:
400 if (ret) {
401 for (j = 0; j < i; j++)
402 rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
403 }
404 return ret;
405 }
406
list_has_conn(struct list_head * list,struct rds_connection * conn)407 static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
408 {
409 struct rds_tcp_connection *tc, *_tc;
410
411 list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
412 if (tc->t_cpath->cp_conn == conn)
413 return true;
414 }
415 return false;
416 }
417
rds_tcp_set_unloading(void)418 static void rds_tcp_set_unloading(void)
419 {
420 atomic_set(&rds_tcp_unloading, 1);
421 }
422
rds_tcp_is_unloading(struct rds_connection * conn)423 static bool rds_tcp_is_unloading(struct rds_connection *conn)
424 {
425 return atomic_read(&rds_tcp_unloading) != 0;
426 }
427
rds_tcp_destroy_conns(void)428 static void rds_tcp_destroy_conns(void)
429 {
430 struct rds_tcp_connection *tc, *_tc;
431 LIST_HEAD(tmp_list);
432
433 /* avoid calling conn_destroy with irqs off */
434 spin_lock_irq(&rds_tcp_conn_lock);
435 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
436 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
437 list_move_tail(&tc->t_tcp_node, &tmp_list);
438 }
439 spin_unlock_irq(&rds_tcp_conn_lock);
440
441 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
442 rds_conn_destroy(tc->t_cpath->cp_conn);
443 }
444
445 static void rds_tcp_exit(void);
446
rds_tcp_get_tos_map(u8 tos)447 static u8 rds_tcp_get_tos_map(u8 tos)
448 {
449 /* all user tos mapped to default 0 for TCP transport */
450 return 0;
451 }
452
453 struct rds_transport rds_tcp_transport = {
454 .laddr_check = rds_tcp_laddr_check,
455 .xmit_path_prepare = rds_tcp_xmit_path_prepare,
456 .xmit_path_complete = rds_tcp_xmit_path_complete,
457 .xmit = rds_tcp_xmit,
458 .recv_path = rds_tcp_recv_path,
459 .conn_alloc = rds_tcp_conn_alloc,
460 .conn_free = rds_tcp_conn_free,
461 .conn_path_connect = rds_tcp_conn_path_connect,
462 .conn_path_shutdown = rds_tcp_conn_path_shutdown,
463 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
464 .inc_free = rds_tcp_inc_free,
465 .stats_info_copy = rds_tcp_stats_info_copy,
466 .exit = rds_tcp_exit,
467 .get_tos_map = rds_tcp_get_tos_map,
468 .t_owner = THIS_MODULE,
469 .t_name = "tcp",
470 .t_type = RDS_TRANS_TCP,
471 .t_prefer_loopback = 1,
472 .t_mp_capable = 1,
473 .t_unloading = rds_tcp_is_unloading,
474 };
475
476 static unsigned int rds_tcp_netid;
477
478 /* per-network namespace private data for this module */
479 struct rds_tcp_net {
480 struct socket *rds_tcp_listen_sock;
481 struct work_struct rds_tcp_accept_w;
482 struct ctl_table_header *rds_tcp_sysctl;
483 struct ctl_table *ctl_table;
484 int sndbuf_size;
485 int rcvbuf_size;
486 };
487
488 /* All module specific customizations to the RDS-TCP socket should be done in
489 * rds_tcp_tune() and applied after socket creation.
490 */
rds_tcp_tune(struct socket * sock)491 bool rds_tcp_tune(struct socket *sock)
492 {
493 struct sock *sk = sock->sk;
494 struct net *net = sock_net(sk);
495 struct rds_tcp_net *rtn;
496
497 tcp_sock_set_nodelay(sock->sk);
498 lock_sock(sk);
499 /* TCP timer functions might access net namespace even after
500 * a process which created this net namespace terminated.
501 */
502 if (!sk->sk_net_refcnt) {
503 if (!maybe_get_net(net)) {
504 release_sock(sk);
505 return false;
506 }
507 /* Update ns_tracker to current stack trace and refcounted tracker */
508 __netns_tracker_free(net, &sk->ns_tracker, false);
509
510 sk->sk_net_refcnt = 1;
511 netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
512 sock_inuse_add(net, 1);
513 }
514 rtn = net_generic(net, rds_tcp_netid);
515 if (rtn->sndbuf_size > 0) {
516 sk->sk_sndbuf = rtn->sndbuf_size;
517 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
518 }
519 if (rtn->rcvbuf_size > 0) {
520 sk->sk_rcvbuf = rtn->rcvbuf_size;
521 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
522 }
523 release_sock(sk);
524 return true;
525 }
526
rds_tcp_accept_worker(struct work_struct * work)527 static void rds_tcp_accept_worker(struct work_struct *work)
528 {
529 struct rds_tcp_net *rtn = container_of(work,
530 struct rds_tcp_net,
531 rds_tcp_accept_w);
532
533 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
534 cond_resched();
535 }
536
rds_tcp_accept_work(struct sock * sk)537 void rds_tcp_accept_work(struct sock *sk)
538 {
539 struct net *net = sock_net(sk);
540 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
541
542 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
543 }
544
rds_tcp_init_net(struct net * net)545 static __net_init int rds_tcp_init_net(struct net *net)
546 {
547 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
548 struct ctl_table *tbl;
549 int err = 0;
550
551 memset(rtn, 0, sizeof(*rtn));
552
553 /* {snd, rcv}buf_size default to 0, which implies we let the
554 * stack pick the value, and permit auto-tuning of buffer size.
555 */
556 if (net == &init_net) {
557 tbl = rds_tcp_sysctl_table;
558 } else {
559 tbl = kmemdup(rds_tcp_sysctl_table,
560 sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
561 if (!tbl) {
562 pr_warn("could not set allocate sysctl table\n");
563 return -ENOMEM;
564 }
565 rtn->ctl_table = tbl;
566 }
567 tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
568 tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
569 rtn->rds_tcp_sysctl = register_net_sysctl_sz(net, "net/rds/tcp", tbl,
570 ARRAY_SIZE(rds_tcp_sysctl_table));
571 if (!rtn->rds_tcp_sysctl) {
572 pr_warn("could not register sysctl\n");
573 err = -ENOMEM;
574 goto fail;
575 }
576
577 #if IS_ENABLED(CONFIG_IPV6)
578 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true);
579 #else
580 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
581 #endif
582 if (!rtn->rds_tcp_listen_sock) {
583 pr_warn("could not set up IPv6 listen sock\n");
584
585 #if IS_ENABLED(CONFIG_IPV6)
586 /* Try IPv4 as some systems disable IPv6 */
587 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
588 if (!rtn->rds_tcp_listen_sock) {
589 #endif
590 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
591 rtn->rds_tcp_sysctl = NULL;
592 err = -EAFNOSUPPORT;
593 goto fail;
594 #if IS_ENABLED(CONFIG_IPV6)
595 }
596 #endif
597 }
598 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
599 return 0;
600
601 fail:
602 if (net != &init_net)
603 kfree(tbl);
604 return err;
605 }
606
rds_tcp_kill_sock(struct net * net)607 static void rds_tcp_kill_sock(struct net *net)
608 {
609 struct rds_tcp_connection *tc, *_tc;
610 LIST_HEAD(tmp_list);
611 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
612 struct socket *lsock = rtn->rds_tcp_listen_sock;
613
614 rtn->rds_tcp_listen_sock = NULL;
615 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
616 spin_lock_irq(&rds_tcp_conn_lock);
617 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
618 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
619
620 if (net != c_net)
621 continue;
622 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
623 list_move_tail(&tc->t_tcp_node, &tmp_list);
624 } else {
625 list_del(&tc->t_tcp_node);
626 tc->t_tcp_node_detached = true;
627 }
628 }
629 spin_unlock_irq(&rds_tcp_conn_lock);
630 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
631 rds_conn_destroy(tc->t_cpath->cp_conn);
632 }
633
rds_tcp_exit_net(struct net * net)634 static void __net_exit rds_tcp_exit_net(struct net *net)
635 {
636 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
637
638 rds_tcp_kill_sock(net);
639
640 if (rtn->rds_tcp_sysctl)
641 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
642
643 if (net != &init_net)
644 kfree(rtn->ctl_table);
645 }
646
647 static struct pernet_operations rds_tcp_net_ops = {
648 .init = rds_tcp_init_net,
649 .exit = rds_tcp_exit_net,
650 .id = &rds_tcp_netid,
651 .size = sizeof(struct rds_tcp_net),
652 };
653
rds_tcp_listen_sock_def_readable(struct net * net)654 void *rds_tcp_listen_sock_def_readable(struct net *net)
655 {
656 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
657 struct socket *lsock = rtn->rds_tcp_listen_sock;
658
659 if (!lsock)
660 return NULL;
661
662 return lsock->sk->sk_user_data;
663 }
664
665 /* when sysctl is used to modify some kernel socket parameters,this
666 * function resets the RDS connections in that netns so that we can
667 * restart with new parameters. The assumption is that such reset
668 * events are few and far-between.
669 */
rds_tcp_sysctl_reset(struct net * net)670 static void rds_tcp_sysctl_reset(struct net *net)
671 {
672 struct rds_tcp_connection *tc, *_tc;
673
674 spin_lock_irq(&rds_tcp_conn_lock);
675 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
676 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
677
678 if (net != c_net || !tc->t_sock)
679 continue;
680
681 /* reconnect with new parameters */
682 rds_conn_path_drop(tc->t_cpath, false);
683 }
684 spin_unlock_irq(&rds_tcp_conn_lock);
685 }
686
rds_tcp_skbuf_handler(struct rds_tcp_net * rtn,const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * fpos)687 static int rds_tcp_skbuf_handler(struct rds_tcp_net *rtn,
688 const struct ctl_table *ctl, int write,
689 void *buffer, size_t *lenp, loff_t *fpos)
690 {
691 int err;
692
693 err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
694 if (err < 0) {
695 pr_warn("Invalid input. Must be >= %d\n",
696 *(int *)(ctl->extra1));
697 return err;
698 }
699
700 if (write && rtn->rds_tcp_listen_sock && rtn->rds_tcp_listen_sock->sk) {
701 struct net *net = sock_net(rtn->rds_tcp_listen_sock->sk);
702
703 rds_tcp_sysctl_reset(net);
704 }
705
706 return 0;
707 }
708
rds_tcp_sndbuf_handler(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * fpos)709 static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
710 void *buffer, size_t *lenp, loff_t *fpos)
711 {
712 struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
713 sndbuf_size);
714
715 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
716 }
717
rds_tcp_rcvbuf_handler(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * fpos)718 static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
719 void *buffer, size_t *lenp, loff_t *fpos)
720 {
721 struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
722 rcvbuf_size);
723
724 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
725 }
726
rds_tcp_exit(void)727 static void rds_tcp_exit(void)
728 {
729 rds_tcp_set_unloading();
730 synchronize_rcu();
731 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
732 #if IS_ENABLED(CONFIG_IPV6)
733 rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
734 #endif
735 unregister_pernet_device(&rds_tcp_net_ops);
736 rds_tcp_destroy_conns();
737 rds_trans_unregister(&rds_tcp_transport);
738 rds_tcp_recv_exit();
739 kmem_cache_destroy(rds_tcp_conn_slab);
740 }
741 module_exit(rds_tcp_exit);
742
rds_tcp_init(void)743 static int __init rds_tcp_init(void)
744 {
745 int ret;
746
747 rds_tcp_conn_slab = KMEM_CACHE(rds_tcp_connection, 0);
748 if (!rds_tcp_conn_slab) {
749 ret = -ENOMEM;
750 goto out;
751 }
752
753 ret = rds_tcp_recv_init();
754 if (ret)
755 goto out_slab;
756
757 ret = register_pernet_device(&rds_tcp_net_ops);
758 if (ret)
759 goto out_recv;
760
761 rds_trans_register(&rds_tcp_transport);
762
763 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
764 #if IS_ENABLED(CONFIG_IPV6)
765 rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
766 #endif
767
768 goto out;
769 out_recv:
770 rds_tcp_recv_exit();
771 out_slab:
772 kmem_cache_destroy(rds_tcp_conn_slab);
773 out:
774 return ret;
775 }
776 module_init(rds_tcp_init);
777
778 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
779 MODULE_DESCRIPTION("RDS: TCP transport");
780 MODULE_LICENSE("Dual BSD/GPL");
781