xref: /linux/net/rds/tcp.c (revision 140eb5227767c6754742020a16d2691222b9c19b)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/in.h>
36 #include <linux/module.h>
37 #include <net/tcp.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
40 
41 #include "rds.h"
42 #include "tcp.h"
43 
44 /* only for info exporting */
45 static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
46 static LIST_HEAD(rds_tcp_tc_list);
47 static unsigned int rds_tcp_tc_count;
48 
49 /* Track rds_tcp_connection structs so they can be cleaned up */
50 static DEFINE_SPINLOCK(rds_tcp_conn_lock);
51 static LIST_HEAD(rds_tcp_conn_list);
52 
53 static struct kmem_cache *rds_tcp_conn_slab;
54 
55 static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
56 				 void __user *buffer, size_t *lenp,
57 				 loff_t *fpos);
58 
59 static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
60 static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
61 
62 static struct ctl_table rds_tcp_sysctl_table[] = {
63 #define	RDS_TCP_SNDBUF	0
64 	{
65 		.procname       = "rds_tcp_sndbuf",
66 		/* data is per-net pointer */
67 		.maxlen         = sizeof(int),
68 		.mode           = 0644,
69 		.proc_handler   = rds_tcp_skbuf_handler,
70 		.extra1		= &rds_tcp_min_sndbuf,
71 	},
72 #define	RDS_TCP_RCVBUF	1
73 	{
74 		.procname       = "rds_tcp_rcvbuf",
75 		/* data is per-net pointer */
76 		.maxlen         = sizeof(int),
77 		.mode           = 0644,
78 		.proc_handler   = rds_tcp_skbuf_handler,
79 		.extra1		= &rds_tcp_min_rcvbuf,
80 	},
81 	{ }
82 };
83 
84 /* doing it this way avoids calling tcp_sk() */
85 void rds_tcp_nonagle(struct socket *sock)
86 {
87 	int val = 1;
88 
89 	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (void *)&val,
90 			      sizeof(val));
91 }
92 
93 u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94 {
95 	/* seq# of the last byte of data in tcp send buffer */
96 	return tcp_sk(tc->t_sock->sk)->write_seq;
97 }
98 
99 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
100 {
101 	return tcp_sk(tc->t_sock->sk)->snd_una;
102 }
103 
104 void rds_tcp_restore_callbacks(struct socket *sock,
105 			       struct rds_tcp_connection *tc)
106 {
107 	rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
108 	write_lock_bh(&sock->sk->sk_callback_lock);
109 
110 	/* done under the callback_lock to serialize with write_space */
111 	spin_lock(&rds_tcp_tc_list_lock);
112 	list_del_init(&tc->t_list_item);
113 	rds_tcp_tc_count--;
114 	spin_unlock(&rds_tcp_tc_list_lock);
115 
116 	tc->t_sock = NULL;
117 
118 	sock->sk->sk_write_space = tc->t_orig_write_space;
119 	sock->sk->sk_data_ready = tc->t_orig_data_ready;
120 	sock->sk->sk_state_change = tc->t_orig_state_change;
121 	sock->sk->sk_user_data = NULL;
122 
123 	write_unlock_bh(&sock->sk->sk_callback_lock);
124 }
125 
126 /*
127  * rds_tcp_reset_callbacks() switches the to the new sock and
128  * returns the existing tc->t_sock.
129  *
130  * The only functions that set tc->t_sock are rds_tcp_set_callbacks
131  * and rds_tcp_reset_callbacks.  Send and receive trust that
132  * it is set.  The absence of RDS_CONN_UP bit protects those paths
133  * from being called while it isn't set.
134  */
135 void rds_tcp_reset_callbacks(struct socket *sock,
136 			     struct rds_conn_path *cp)
137 {
138 	struct rds_tcp_connection *tc = cp->cp_transport_data;
139 	struct socket *osock = tc->t_sock;
140 
141 	if (!osock)
142 		goto newsock;
143 
144 	/* Need to resolve a duelling SYN between peers.
145 	 * We have an outstanding SYN to this peer, which may
146 	 * potentially have transitioned to the RDS_CONN_UP state,
147 	 * so we must quiesce any send threads before resetting
148 	 * cp_transport_data. We quiesce these threads by setting
149 	 * cp_state to something other than RDS_CONN_UP, and then
150 	 * waiting for any existing threads in rds_send_xmit to
151 	 * complete release_in_xmit(). (Subsequent threads entering
152 	 * rds_send_xmit() will bail on !rds_conn_up().
153 	 *
154 	 * However an incoming syn-ack at this point would end up
155 	 * marking the conn as RDS_CONN_UP, and would again permit
156 	 * rds_send_xmi() threads through, so ideally we would
157 	 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
158 	 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
159 	 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
160 	 * would not get set. As a result, we set c_state to
161 	 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
162 	 * cannot mark rds_conn_path_up() in the window before lock_sock()
163 	 */
164 	atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
165 	wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
166 	lock_sock(osock->sk);
167 	/* reset receive side state for rds_tcp_data_recv() for osock  */
168 	cancel_delayed_work_sync(&cp->cp_send_w);
169 	cancel_delayed_work_sync(&cp->cp_recv_w);
170 	if (tc->t_tinc) {
171 		rds_inc_put(&tc->t_tinc->ti_inc);
172 		tc->t_tinc = NULL;
173 	}
174 	tc->t_tinc_hdr_rem = sizeof(struct rds_header);
175 	tc->t_tinc_data_rem = 0;
176 	rds_tcp_restore_callbacks(osock, tc);
177 	release_sock(osock->sk);
178 	sock_release(osock);
179 newsock:
180 	rds_send_path_reset(cp);
181 	lock_sock(sock->sk);
182 	rds_tcp_set_callbacks(sock, cp);
183 	release_sock(sock->sk);
184 }
185 
186 /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
187  * above rds_tcp_reset_callbacks for notes about synchronization
188  * with data path
189  */
190 void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
191 {
192 	struct rds_tcp_connection *tc = cp->cp_transport_data;
193 
194 	rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
195 	write_lock_bh(&sock->sk->sk_callback_lock);
196 
197 	/* done under the callback_lock to serialize with write_space */
198 	spin_lock(&rds_tcp_tc_list_lock);
199 	list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
200 	rds_tcp_tc_count++;
201 	spin_unlock(&rds_tcp_tc_list_lock);
202 
203 	/* accepted sockets need our listen data ready undone */
204 	if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
205 		sock->sk->sk_data_ready = sock->sk->sk_user_data;
206 
207 	tc->t_sock = sock;
208 	tc->t_cpath = cp;
209 	tc->t_orig_data_ready = sock->sk->sk_data_ready;
210 	tc->t_orig_write_space = sock->sk->sk_write_space;
211 	tc->t_orig_state_change = sock->sk->sk_state_change;
212 
213 	sock->sk->sk_user_data = cp;
214 	sock->sk->sk_data_ready = rds_tcp_data_ready;
215 	sock->sk->sk_write_space = rds_tcp_write_space;
216 	sock->sk->sk_state_change = rds_tcp_state_change;
217 
218 	write_unlock_bh(&sock->sk->sk_callback_lock);
219 }
220 
221 static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
222 			    struct rds_info_iterator *iter,
223 			    struct rds_info_lengths *lens)
224 {
225 	struct rds_info_tcp_socket tsinfo;
226 	struct rds_tcp_connection *tc;
227 	unsigned long flags;
228 	struct sockaddr_in sin;
229 	int sinlen;
230 	struct socket *sock;
231 
232 	spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
233 
234 	if (len / sizeof(tsinfo) < rds_tcp_tc_count)
235 		goto out;
236 
237 	list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
238 
239 		sock = tc->t_sock;
240 		if (sock) {
241 			sock->ops->getname(sock, (struct sockaddr *)&sin,
242 					   &sinlen, 0);
243 			tsinfo.local_addr = sin.sin_addr.s_addr;
244 			tsinfo.local_port = sin.sin_port;
245 			sock->ops->getname(sock, (struct sockaddr *)&sin,
246 					   &sinlen, 1);
247 			tsinfo.peer_addr = sin.sin_addr.s_addr;
248 			tsinfo.peer_port = sin.sin_port;
249 		}
250 
251 		tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
252 		tsinfo.data_rem = tc->t_tinc_data_rem;
253 		tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
254 		tsinfo.last_expected_una = tc->t_last_expected_una;
255 		tsinfo.last_seen_una = tc->t_last_seen_una;
256 
257 		rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
258 	}
259 
260 out:
261 	lens->nr = rds_tcp_tc_count;
262 	lens->each = sizeof(tsinfo);
263 
264 	spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
265 }
266 
267 static int rds_tcp_laddr_check(struct net *net, __be32 addr)
268 {
269 	if (inet_addr_type(net, addr) == RTN_LOCAL)
270 		return 0;
271 	return -EADDRNOTAVAIL;
272 }
273 
274 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
275 {
276 	struct rds_tcp_connection *tc;
277 	int i;
278 
279 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
280 		tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
281 		if (!tc)
282 			return -ENOMEM;
283 
284 		mutex_init(&tc->t_conn_path_lock);
285 		tc->t_sock = NULL;
286 		tc->t_tinc = NULL;
287 		tc->t_tinc_hdr_rem = sizeof(struct rds_header);
288 		tc->t_tinc_data_rem = 0;
289 
290 		conn->c_path[i].cp_transport_data = tc;
291 		tc->t_cpath = &conn->c_path[i];
292 
293 		spin_lock_irq(&rds_tcp_conn_lock);
294 		list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
295 		spin_unlock_irq(&rds_tcp_conn_lock);
296 		rdsdebug("rds_conn_path [%d] tc %p\n", i,
297 			 conn->c_path[i].cp_transport_data);
298 	}
299 
300 	return 0;
301 }
302 
303 static void rds_tcp_conn_free(void *arg)
304 {
305 	struct rds_tcp_connection *tc = arg;
306 	unsigned long flags;
307 	rdsdebug("freeing tc %p\n", tc);
308 
309 	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
310 	list_del(&tc->t_tcp_node);
311 	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
312 
313 	kmem_cache_free(rds_tcp_conn_slab, tc);
314 }
315 
316 static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
317 {
318 	struct rds_tcp_connection *tc, *_tc;
319 
320 	list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
321 		if (tc->t_cpath->cp_conn == conn)
322 			return true;
323 	}
324 	return false;
325 }
326 
327 static void rds_tcp_destroy_conns(void)
328 {
329 	struct rds_tcp_connection *tc, *_tc;
330 	LIST_HEAD(tmp_list);
331 
332 	/* avoid calling conn_destroy with irqs off */
333 	spin_lock_irq(&rds_tcp_conn_lock);
334 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
335 		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
336 			list_move_tail(&tc->t_tcp_node, &tmp_list);
337 	}
338 	spin_unlock_irq(&rds_tcp_conn_lock);
339 
340 	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
341 		rds_conn_destroy(tc->t_cpath->cp_conn);
342 }
343 
344 static void rds_tcp_exit(void);
345 
346 struct rds_transport rds_tcp_transport = {
347 	.laddr_check		= rds_tcp_laddr_check,
348 	.xmit_path_prepare	= rds_tcp_xmit_path_prepare,
349 	.xmit_path_complete	= rds_tcp_xmit_path_complete,
350 	.xmit			= rds_tcp_xmit,
351 	.recv_path		= rds_tcp_recv_path,
352 	.conn_alloc		= rds_tcp_conn_alloc,
353 	.conn_free		= rds_tcp_conn_free,
354 	.conn_path_connect	= rds_tcp_conn_path_connect,
355 	.conn_path_shutdown	= rds_tcp_conn_path_shutdown,
356 	.inc_copy_to_user	= rds_tcp_inc_copy_to_user,
357 	.inc_free		= rds_tcp_inc_free,
358 	.stats_info_copy	= rds_tcp_stats_info_copy,
359 	.exit			= rds_tcp_exit,
360 	.t_owner		= THIS_MODULE,
361 	.t_name			= "tcp",
362 	.t_type			= RDS_TRANS_TCP,
363 	.t_prefer_loopback	= 1,
364 	.t_mp_capable		= 1,
365 };
366 
367 static unsigned int rds_tcp_netid;
368 
369 /* per-network namespace private data for this module */
370 struct rds_tcp_net {
371 	struct socket *rds_tcp_listen_sock;
372 	struct work_struct rds_tcp_accept_w;
373 	struct ctl_table_header *rds_tcp_sysctl;
374 	struct ctl_table *ctl_table;
375 	int sndbuf_size;
376 	int rcvbuf_size;
377 };
378 
379 /* All module specific customizations to the RDS-TCP socket should be done in
380  * rds_tcp_tune() and applied after socket creation.
381  */
382 void rds_tcp_tune(struct socket *sock)
383 {
384 	struct sock *sk = sock->sk;
385 	struct net *net = sock_net(sk);
386 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
387 
388 	rds_tcp_nonagle(sock);
389 	lock_sock(sk);
390 	if (rtn->sndbuf_size > 0) {
391 		sk->sk_sndbuf = rtn->sndbuf_size;
392 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
393 	}
394 	if (rtn->rcvbuf_size > 0) {
395 		sk->sk_sndbuf = rtn->rcvbuf_size;
396 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
397 	}
398 	release_sock(sk);
399 }
400 
401 static void rds_tcp_accept_worker(struct work_struct *work)
402 {
403 	struct rds_tcp_net *rtn = container_of(work,
404 					       struct rds_tcp_net,
405 					       rds_tcp_accept_w);
406 
407 	while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
408 		cond_resched();
409 }
410 
411 void rds_tcp_accept_work(struct sock *sk)
412 {
413 	struct net *net = sock_net(sk);
414 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
415 
416 	queue_work(rds_wq, &rtn->rds_tcp_accept_w);
417 }
418 
419 static __net_init int rds_tcp_init_net(struct net *net)
420 {
421 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
422 	struct ctl_table *tbl;
423 	int err = 0;
424 
425 	memset(rtn, 0, sizeof(*rtn));
426 
427 	/* {snd, rcv}buf_size default to 0, which implies we let the
428 	 * stack pick the value, and permit auto-tuning of buffer size.
429 	 */
430 	if (net == &init_net) {
431 		tbl = rds_tcp_sysctl_table;
432 	} else {
433 		tbl = kmemdup(rds_tcp_sysctl_table,
434 			      sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
435 		if (!tbl) {
436 			pr_warn("could not set allocate syctl table\n");
437 			return -ENOMEM;
438 		}
439 		rtn->ctl_table = tbl;
440 	}
441 	tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
442 	tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
443 	rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl);
444 	if (!rtn->rds_tcp_sysctl) {
445 		pr_warn("could not register sysctl\n");
446 		err = -ENOMEM;
447 		goto fail;
448 	}
449 	rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
450 	if (!rtn->rds_tcp_listen_sock) {
451 		pr_warn("could not set up listen sock\n");
452 		unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
453 		rtn->rds_tcp_sysctl = NULL;
454 		err = -EAFNOSUPPORT;
455 		goto fail;
456 	}
457 	INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
458 	return 0;
459 
460 fail:
461 	if (net != &init_net)
462 		kfree(tbl);
463 	return err;
464 }
465 
466 static void __net_exit rds_tcp_exit_net(struct net *net)
467 {
468 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
469 
470 	if (rtn->rds_tcp_sysctl)
471 		unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
472 
473 	if (net != &init_net && rtn->ctl_table)
474 		kfree(rtn->ctl_table);
475 
476 	/* If rds_tcp_exit_net() is called as a result of netns deletion,
477 	 * the rds_tcp_kill_sock() device notifier would already have cleaned
478 	 * up the listen socket, thus there is no work to do in this function.
479 	 *
480 	 * If rds_tcp_exit_net() is called as a result of module unload,
481 	 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
482 	 * we do need to clean up the listen socket here.
483 	 */
484 	if (rtn->rds_tcp_listen_sock) {
485 		struct socket *lsock = rtn->rds_tcp_listen_sock;
486 
487 		rtn->rds_tcp_listen_sock = NULL;
488 		rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
489 	}
490 }
491 
492 static struct pernet_operations rds_tcp_net_ops = {
493 	.init = rds_tcp_init_net,
494 	.exit = rds_tcp_exit_net,
495 	.id = &rds_tcp_netid,
496 	.size = sizeof(struct rds_tcp_net),
497 };
498 
499 /* explicitly send a RST on each socket, thereby releasing any socket refcnts
500  * that may otherwise hold up netns deletion.
501  */
502 static void rds_tcp_conn_paths_destroy(struct rds_connection *conn)
503 {
504 	struct rds_conn_path *cp;
505 	struct rds_tcp_connection *tc;
506 	int i;
507 	struct sock *sk;
508 
509 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
510 		cp = &conn->c_path[i];
511 		tc = cp->cp_transport_data;
512 		if (!tc->t_sock)
513 			continue;
514 		sk = tc->t_sock->sk;
515 		sk->sk_prot->disconnect(sk, 0);
516 		tcp_done(sk);
517 	}
518 }
519 
520 static void rds_tcp_kill_sock(struct net *net)
521 {
522 	struct rds_tcp_connection *tc, *_tc;
523 	LIST_HEAD(tmp_list);
524 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
525 	struct socket *lsock = rtn->rds_tcp_listen_sock;
526 
527 	rtn->rds_tcp_listen_sock = NULL;
528 	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
529 	spin_lock_irq(&rds_tcp_conn_lock);
530 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
531 		struct net *c_net = tc->t_cpath->cp_conn->c_net;
532 
533 		if (net != c_net || !tc->t_sock)
534 			continue;
535 		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
536 			list_move_tail(&tc->t_tcp_node, &tmp_list);
537 	}
538 	spin_unlock_irq(&rds_tcp_conn_lock);
539 	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
540 		rds_tcp_conn_paths_destroy(tc->t_cpath->cp_conn);
541 		rds_conn_destroy(tc->t_cpath->cp_conn);
542 	}
543 }
544 
545 void *rds_tcp_listen_sock_def_readable(struct net *net)
546 {
547 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
548 	struct socket *lsock = rtn->rds_tcp_listen_sock;
549 
550 	if (!lsock)
551 		return NULL;
552 
553 	return lsock->sk->sk_user_data;
554 }
555 
556 static int rds_tcp_dev_event(struct notifier_block *this,
557 			     unsigned long event, void *ptr)
558 {
559 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
560 
561 	/* rds-tcp registers as a pernet subys, so the ->exit will only
562 	 * get invoked after network acitivity has quiesced. We need to
563 	 * clean up all sockets  to quiesce network activity, and use
564 	 * the unregistration of the per-net loopback device as a trigger
565 	 * to start that cleanup.
566 	 */
567 	if (event == NETDEV_UNREGISTER_FINAL &&
568 	    dev->ifindex == LOOPBACK_IFINDEX)
569 		rds_tcp_kill_sock(dev_net(dev));
570 
571 	return NOTIFY_DONE;
572 }
573 
574 static struct notifier_block rds_tcp_dev_notifier = {
575 	.notifier_call        = rds_tcp_dev_event,
576 	.priority = -10, /* must be called after other network notifiers */
577 };
578 
579 /* when sysctl is used to modify some kernel socket parameters,this
580  * function  resets the RDS connections in that netns  so that we can
581  * restart with new parameters.  The assumption is that such reset
582  * events are few and far-between.
583  */
584 static void rds_tcp_sysctl_reset(struct net *net)
585 {
586 	struct rds_tcp_connection *tc, *_tc;
587 
588 	spin_lock_irq(&rds_tcp_conn_lock);
589 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
590 		struct net *c_net = tc->t_cpath->cp_conn->c_net;
591 
592 		if (net != c_net || !tc->t_sock)
593 			continue;
594 
595 		/* reconnect with new parameters */
596 		rds_conn_path_drop(tc->t_cpath, false);
597 	}
598 	spin_unlock_irq(&rds_tcp_conn_lock);
599 }
600 
601 static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
602 				 void __user *buffer, size_t *lenp,
603 				 loff_t *fpos)
604 {
605 	struct net *net = current->nsproxy->net_ns;
606 	int err;
607 
608 	err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
609 	if (err < 0) {
610 		pr_warn("Invalid input. Must be >= %d\n",
611 			*(int *)(ctl->extra1));
612 		return err;
613 	}
614 	if (write)
615 		rds_tcp_sysctl_reset(net);
616 	return 0;
617 }
618 
619 static void rds_tcp_exit(void)
620 {
621 	rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
622 	unregister_pernet_subsys(&rds_tcp_net_ops);
623 	if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
624 		pr_warn("could not unregister rds_tcp_dev_notifier\n");
625 	rds_tcp_destroy_conns();
626 	rds_trans_unregister(&rds_tcp_transport);
627 	rds_tcp_recv_exit();
628 	kmem_cache_destroy(rds_tcp_conn_slab);
629 }
630 module_exit(rds_tcp_exit);
631 
632 static int rds_tcp_init(void)
633 {
634 	int ret;
635 
636 	rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
637 					      sizeof(struct rds_tcp_connection),
638 					      0, 0, NULL);
639 	if (!rds_tcp_conn_slab) {
640 		ret = -ENOMEM;
641 		goto out;
642 	}
643 
644 	ret = rds_tcp_recv_init();
645 	if (ret)
646 		goto out_slab;
647 
648 	ret = register_pernet_subsys(&rds_tcp_net_ops);
649 	if (ret)
650 		goto out_recv;
651 
652 	ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
653 	if (ret) {
654 		pr_warn("could not register rds_tcp_dev_notifier\n");
655 		goto out_pernet;
656 	}
657 
658 	rds_trans_register(&rds_tcp_transport);
659 
660 	rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
661 
662 	goto out;
663 
664 out_pernet:
665 	unregister_pernet_subsys(&rds_tcp_net_ops);
666 out_recv:
667 	rds_tcp_recv_exit();
668 out_slab:
669 	kmem_cache_destroy(rds_tcp_conn_slab);
670 out:
671 	return ret;
672 }
673 module_init(rds_tcp_init);
674 
675 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
676 MODULE_DESCRIPTION("RDS: TCP transport");
677 MODULE_LICENSE("Dual BSD/GPL");
678 
679