xref: /linux/net/rds/connection.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <net/inet_hashtables.h>
38 
39 #include "rds.h"
40 #include "loop.h"
41 
42 #define RDS_CONNECTION_HASH_BITS 12
43 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
44 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
45 
46 /* converting this to RCU is a chore for another day.. */
47 static DEFINE_SPINLOCK(rds_conn_lock);
48 static unsigned long rds_conn_count;
49 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
50 static struct kmem_cache *rds_conn_slab;
51 
52 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
53 {
54 	static u32 rds_hash_secret __read_mostly;
55 
56 	unsigned long hash;
57 
58 	net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
59 
60 	/* Pass NULL, don't need struct net for hash */
61 	hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
62 			      be32_to_cpu(faddr), 0,
63 			      rds_hash_secret);
64 	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
65 }
66 
67 #define rds_conn_info_set(var, test, suffix) do {		\
68 	if (test)						\
69 		var |= RDS_INFO_CONNECTION_FLAG_##suffix;	\
70 } while (0)
71 
72 /* rcu read lock must be held or the connection spinlock */
73 static struct rds_connection *rds_conn_lookup(struct net *net,
74 					      struct hlist_head *head,
75 					      __be32 laddr, __be32 faddr,
76 					      struct rds_transport *trans)
77 {
78 	struct rds_connection *conn, *ret = NULL;
79 
80 	hlist_for_each_entry_rcu(conn, head, c_hash_node) {
81 		if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
82 		    conn->c_trans == trans && net == rds_conn_net(conn)) {
83 			ret = conn;
84 			break;
85 		}
86 	}
87 	rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
88 		 &laddr, &faddr);
89 	return ret;
90 }
91 
92 /*
93  * This is called by transports as they're bringing down a connection.
94  * It clears partial message state so that the transport can start sending
95  * and receiving over this connection again in the future.  It is up to
96  * the transport to have serialized this call with its send and recv.
97  */
98 static void rds_conn_path_reset(struct rds_conn_path *cp)
99 {
100 	struct rds_connection *conn = cp->cp_conn;
101 
102 	rdsdebug("connection %pI4 to %pI4 reset\n",
103 	  &conn->c_laddr, &conn->c_faddr);
104 
105 	rds_stats_inc(s_conn_reset);
106 	rds_send_path_reset(cp);
107 	cp->cp_flags = 0;
108 
109 	/* Do not clear next_rx_seq here, else we cannot distinguish
110 	 * retransmitted packets from new packets, and will hand all
111 	 * of them to the application. That is not consistent with the
112 	 * reliability guarantees of RDS. */
113 }
114 
115 static void __rds_conn_path_init(struct rds_connection *conn,
116 				 struct rds_conn_path *cp, bool is_outgoing)
117 {
118 	spin_lock_init(&cp->cp_lock);
119 	cp->cp_next_tx_seq = 1;
120 	init_waitqueue_head(&cp->cp_waitq);
121 	INIT_LIST_HEAD(&cp->cp_send_queue);
122 	INIT_LIST_HEAD(&cp->cp_retrans);
123 
124 	cp->cp_conn = conn;
125 	atomic_set(&cp->cp_state, RDS_CONN_DOWN);
126 	cp->cp_send_gen = 0;
127 	/* cp_outgoing is per-path. So we can only set it here
128 	 * for the single-path transports.
129 	 */
130 	if (!conn->c_trans->t_mp_capable)
131 		cp->cp_outgoing = (is_outgoing ? 1 : 0);
132 	cp->cp_reconnect_jiffies = 0;
133 	INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
134 	INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
135 	INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
136 	INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
137 	mutex_init(&cp->cp_cm_lock);
138 	cp->cp_flags = 0;
139 }
140 
141 /*
142  * There is only every one 'conn' for a given pair of addresses in the
143  * system at a time.  They contain messages to be retransmitted and so
144  * span the lifetime of the actual underlying transport connections.
145  *
146  * For now they are not garbage collected once they're created.  They
147  * are torn down as the module is removed, if ever.
148  */
149 static struct rds_connection *__rds_conn_create(struct net *net,
150 						__be32 laddr, __be32 faddr,
151 				       struct rds_transport *trans, gfp_t gfp,
152 				       int is_outgoing)
153 {
154 	struct rds_connection *conn, *parent = NULL;
155 	struct hlist_head *head = rds_conn_bucket(laddr, faddr);
156 	struct rds_transport *loop_trans;
157 	unsigned long flags;
158 	int ret, i;
159 
160 	rcu_read_lock();
161 	conn = rds_conn_lookup(net, head, laddr, faddr, trans);
162 	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
163 	    laddr == faddr && !is_outgoing) {
164 		/* This is a looped back IB connection, and we're
165 		 * called by the code handling the incoming connect.
166 		 * We need a second connection object into which we
167 		 * can stick the other QP. */
168 		parent = conn;
169 		conn = parent->c_passive;
170 	}
171 	rcu_read_unlock();
172 	if (conn)
173 		goto out;
174 
175 	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
176 	if (!conn) {
177 		conn = ERR_PTR(-ENOMEM);
178 		goto out;
179 	}
180 
181 	INIT_HLIST_NODE(&conn->c_hash_node);
182 	conn->c_laddr = laddr;
183 	conn->c_faddr = faddr;
184 
185 	rds_conn_net_set(conn, net);
186 
187 	ret = rds_cong_get_maps(conn);
188 	if (ret) {
189 		kmem_cache_free(rds_conn_slab, conn);
190 		conn = ERR_PTR(ret);
191 		goto out;
192 	}
193 
194 	/*
195 	 * This is where a connection becomes loopback.  If *any* RDS sockets
196 	 * can bind to the destination address then we'd rather the messages
197 	 * flow through loopback rather than either transport.
198 	 */
199 	loop_trans = rds_trans_get_preferred(net, faddr);
200 	if (loop_trans) {
201 		rds_trans_put(loop_trans);
202 		conn->c_loopback = 1;
203 		if (is_outgoing && trans->t_prefer_loopback) {
204 			/* "outgoing" connection - and the transport
205 			 * says it wants the connection handled by the
206 			 * loopback transport. This is what TCP does.
207 			 */
208 			trans = &rds_loop_transport;
209 		}
210 	}
211 
212 	conn->c_trans = trans;
213 
214 	init_waitqueue_head(&conn->c_hs_waitq);
215 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
216 		__rds_conn_path_init(conn, &conn->c_path[i],
217 				     is_outgoing);
218 		conn->c_path[i].cp_index = i;
219 	}
220 	ret = trans->conn_alloc(conn, gfp);
221 	if (ret) {
222 		kmem_cache_free(rds_conn_slab, conn);
223 		conn = ERR_PTR(ret);
224 		goto out;
225 	}
226 
227 	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
228 	  conn, &laddr, &faddr,
229 	  trans->t_name ? trans->t_name : "[unknown]",
230 	  is_outgoing ? "(outgoing)" : "");
231 
232 	/*
233 	 * Since we ran without holding the conn lock, someone could
234 	 * have created the same conn (either normal or passive) in the
235 	 * interim. We check while holding the lock. If we won, we complete
236 	 * init and return our conn. If we lost, we rollback and return the
237 	 * other one.
238 	 */
239 	spin_lock_irqsave(&rds_conn_lock, flags);
240 	if (parent) {
241 		/* Creating passive conn */
242 		if (parent->c_passive) {
243 			trans->conn_free(conn->c_path[0].cp_transport_data);
244 			kmem_cache_free(rds_conn_slab, conn);
245 			conn = parent->c_passive;
246 		} else {
247 			parent->c_passive = conn;
248 			rds_cong_add_conn(conn);
249 			rds_conn_count++;
250 		}
251 	} else {
252 		/* Creating normal conn */
253 		struct rds_connection *found;
254 
255 		found = rds_conn_lookup(net, head, laddr, faddr, trans);
256 		if (found) {
257 			struct rds_conn_path *cp;
258 			int i;
259 
260 			for (i = 0; i < RDS_MPATH_WORKERS; i++) {
261 				cp = &conn->c_path[i];
262 				/* The ->conn_alloc invocation may have
263 				 * allocated resource for all paths, so all
264 				 * of them may have to be freed here.
265 				 */
266 				if (cp->cp_transport_data)
267 					trans->conn_free(cp->cp_transport_data);
268 			}
269 			kmem_cache_free(rds_conn_slab, conn);
270 			conn = found;
271 		} else {
272 			conn->c_my_gen_num = rds_gen_num;
273 			conn->c_peer_gen_num = 0;
274 			hlist_add_head_rcu(&conn->c_hash_node, head);
275 			rds_cong_add_conn(conn);
276 			rds_conn_count++;
277 		}
278 	}
279 	spin_unlock_irqrestore(&rds_conn_lock, flags);
280 
281 out:
282 	return conn;
283 }
284 
285 struct rds_connection *rds_conn_create(struct net *net,
286 				       __be32 laddr, __be32 faddr,
287 				       struct rds_transport *trans, gfp_t gfp)
288 {
289 	return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
290 }
291 EXPORT_SYMBOL_GPL(rds_conn_create);
292 
293 struct rds_connection *rds_conn_create_outgoing(struct net *net,
294 						__be32 laddr, __be32 faddr,
295 				       struct rds_transport *trans, gfp_t gfp)
296 {
297 	return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
298 }
299 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
300 
301 void rds_conn_shutdown(struct rds_conn_path *cp)
302 {
303 	struct rds_connection *conn = cp->cp_conn;
304 
305 	/* shut it down unless it's down already */
306 	if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
307 		/*
308 		 * Quiesce the connection mgmt handlers before we start tearing
309 		 * things down. We don't hold the mutex for the entire
310 		 * duration of the shutdown operation, else we may be
311 		 * deadlocking with the CM handler. Instead, the CM event
312 		 * handler is supposed to check for state DISCONNECTING
313 		 */
314 		mutex_lock(&cp->cp_cm_lock);
315 		if (!rds_conn_path_transition(cp, RDS_CONN_UP,
316 					      RDS_CONN_DISCONNECTING) &&
317 		    !rds_conn_path_transition(cp, RDS_CONN_ERROR,
318 					      RDS_CONN_DISCONNECTING)) {
319 			rds_conn_path_error(cp,
320 					    "shutdown called in state %d\n",
321 					    atomic_read(&cp->cp_state));
322 			mutex_unlock(&cp->cp_cm_lock);
323 			return;
324 		}
325 		mutex_unlock(&cp->cp_cm_lock);
326 
327 		wait_event(cp->cp_waitq,
328 			   !test_bit(RDS_IN_XMIT, &cp->cp_flags));
329 		wait_event(cp->cp_waitq,
330 			   !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
331 
332 		conn->c_trans->conn_path_shutdown(cp);
333 		rds_conn_path_reset(cp);
334 
335 		if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
336 					      RDS_CONN_DOWN)) {
337 			/* This can happen - eg when we're in the middle of tearing
338 			 * down the connection, and someone unloads the rds module.
339 			 * Quite reproduceable with loopback connections.
340 			 * Mostly harmless.
341 			 */
342 			rds_conn_path_error(cp, "%s: failed to transition "
343 					    "to state DOWN, current state "
344 					    "is %d\n", __func__,
345 					    atomic_read(&cp->cp_state));
346 			return;
347 		}
348 	}
349 
350 	/* Then reconnect if it's still live.
351 	 * The passive side of an IB loopback connection is never added
352 	 * to the conn hash, so we never trigger a reconnect on this
353 	 * conn - the reconnect is always triggered by the active peer. */
354 	cancel_delayed_work_sync(&cp->cp_conn_w);
355 	rcu_read_lock();
356 	if (!hlist_unhashed(&conn->c_hash_node)) {
357 		rcu_read_unlock();
358 		rds_queue_reconnect(cp);
359 	} else {
360 		rcu_read_unlock();
361 	}
362 }
363 
364 /* destroy a single rds_conn_path. rds_conn_destroy() iterates over
365  * all paths using rds_conn_path_destroy()
366  */
367 static void rds_conn_path_destroy(struct rds_conn_path *cp)
368 {
369 	struct rds_message *rm, *rtmp;
370 
371 	if (!cp->cp_transport_data)
372 		return;
373 
374 	rds_conn_path_drop(cp);
375 	flush_work(&cp->cp_down_w);
376 
377 	/* make sure lingering queued work won't try to ref the conn */
378 	cancel_delayed_work_sync(&cp->cp_send_w);
379 	cancel_delayed_work_sync(&cp->cp_recv_w);
380 
381 	/* tear down queued messages */
382 	list_for_each_entry_safe(rm, rtmp,
383 				 &cp->cp_send_queue,
384 				 m_conn_item) {
385 		list_del_init(&rm->m_conn_item);
386 		BUG_ON(!list_empty(&rm->m_sock_item));
387 		rds_message_put(rm);
388 	}
389 	if (cp->cp_xmit_rm)
390 		rds_message_put(cp->cp_xmit_rm);
391 
392 	cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
393 }
394 
395 /*
396  * Stop and free a connection.
397  *
398  * This can only be used in very limited circumstances.  It assumes that once
399  * the conn has been shutdown that no one else is referencing the connection.
400  * We can only ensure this in the rmmod path in the current code.
401  */
402 void rds_conn_destroy(struct rds_connection *conn)
403 {
404 	unsigned long flags;
405 	int i;
406 	struct rds_conn_path *cp;
407 
408 	rdsdebug("freeing conn %p for %pI4 -> "
409 		 "%pI4\n", conn, &conn->c_laddr,
410 		 &conn->c_faddr);
411 
412 	/* Ensure conn will not be scheduled for reconnect */
413 	spin_lock_irq(&rds_conn_lock);
414 	hlist_del_init_rcu(&conn->c_hash_node);
415 	spin_unlock_irq(&rds_conn_lock);
416 	synchronize_rcu();
417 
418 	/* shut the connection down */
419 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
420 		cp = &conn->c_path[i];
421 		rds_conn_path_destroy(cp);
422 		BUG_ON(!list_empty(&cp->cp_retrans));
423 	}
424 
425 	/*
426 	 * The congestion maps aren't freed up here.  They're
427 	 * freed by rds_cong_exit() after all the connections
428 	 * have been freed.
429 	 */
430 	rds_cong_remove_conn(conn);
431 
432 	put_net(conn->c_net);
433 	kmem_cache_free(rds_conn_slab, conn);
434 
435 	spin_lock_irqsave(&rds_conn_lock, flags);
436 	rds_conn_count--;
437 	spin_unlock_irqrestore(&rds_conn_lock, flags);
438 }
439 EXPORT_SYMBOL_GPL(rds_conn_destroy);
440 
441 static void rds_conn_message_info(struct socket *sock, unsigned int len,
442 				  struct rds_info_iterator *iter,
443 				  struct rds_info_lengths *lens,
444 				  int want_send)
445 {
446 	struct hlist_head *head;
447 	struct list_head *list;
448 	struct rds_connection *conn;
449 	struct rds_message *rm;
450 	unsigned int total = 0;
451 	unsigned long flags;
452 	size_t i;
453 	int j;
454 
455 	len /= sizeof(struct rds_info_message);
456 
457 	rcu_read_lock();
458 
459 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
460 	     i++, head++) {
461 		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
462 			struct rds_conn_path *cp;
463 
464 			for (j = 0; j < RDS_MPATH_WORKERS; j++) {
465 				cp = &conn->c_path[j];
466 				if (want_send)
467 					list = &cp->cp_send_queue;
468 				else
469 					list = &cp->cp_retrans;
470 
471 				spin_lock_irqsave(&cp->cp_lock, flags);
472 
473 				/* XXX too lazy to maintain counts.. */
474 				list_for_each_entry(rm, list, m_conn_item) {
475 					total++;
476 					if (total <= len)
477 						rds_inc_info_copy(&rm->m_inc,
478 								  iter,
479 								  conn->c_laddr,
480 								  conn->c_faddr,
481 								  0);
482 				}
483 
484 				spin_unlock_irqrestore(&cp->cp_lock, flags);
485 				if (!conn->c_trans->t_mp_capable)
486 					break;
487 			}
488 		}
489 	}
490 	rcu_read_unlock();
491 
492 	lens->nr = total;
493 	lens->each = sizeof(struct rds_info_message);
494 }
495 
496 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
497 				       struct rds_info_iterator *iter,
498 				       struct rds_info_lengths *lens)
499 {
500 	rds_conn_message_info(sock, len, iter, lens, 1);
501 }
502 
503 static void rds_conn_message_info_retrans(struct socket *sock,
504 					  unsigned int len,
505 					  struct rds_info_iterator *iter,
506 					  struct rds_info_lengths *lens)
507 {
508 	rds_conn_message_info(sock, len, iter, lens, 0);
509 }
510 
511 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
512 			  struct rds_info_iterator *iter,
513 			  struct rds_info_lengths *lens,
514 			  int (*visitor)(struct rds_connection *, void *),
515 			  size_t item_len)
516 {
517 	uint64_t buffer[(item_len + 7) / 8];
518 	struct hlist_head *head;
519 	struct rds_connection *conn;
520 	size_t i;
521 
522 	rcu_read_lock();
523 
524 	lens->nr = 0;
525 	lens->each = item_len;
526 
527 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
528 	     i++, head++) {
529 		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
530 
531 			/* XXX no c_lock usage.. */
532 			if (!visitor(conn, buffer))
533 				continue;
534 
535 			/* We copy as much as we can fit in the buffer,
536 			 * but we count all items so that the caller
537 			 * can resize the buffer. */
538 			if (len >= item_len) {
539 				rds_info_copy(iter, buffer, item_len);
540 				len -= item_len;
541 			}
542 			lens->nr++;
543 		}
544 	}
545 	rcu_read_unlock();
546 }
547 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
548 
549 static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
550 				    struct rds_info_iterator *iter,
551 				    struct rds_info_lengths *lens,
552 				    int (*visitor)(struct rds_conn_path *, void *),
553 				    size_t item_len)
554 {
555 	u64  buffer[(item_len + 7) / 8];
556 	struct hlist_head *head;
557 	struct rds_connection *conn;
558 	size_t i;
559 	int j;
560 
561 	rcu_read_lock();
562 
563 	lens->nr = 0;
564 	lens->each = item_len;
565 
566 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
567 	     i++, head++) {
568 		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
569 			struct rds_conn_path *cp;
570 
571 			for (j = 0; j < RDS_MPATH_WORKERS; j++) {
572 				cp = &conn->c_path[j];
573 
574 				/* XXX no cp_lock usage.. */
575 				if (!visitor(cp, buffer))
576 					continue;
577 				if (!conn->c_trans->t_mp_capable)
578 					break;
579 			}
580 
581 			/* We copy as much as we can fit in the buffer,
582 			 * but we count all items so that the caller
583 			 * can resize the buffer.
584 			 */
585 			if (len >= item_len) {
586 				rds_info_copy(iter, buffer, item_len);
587 				len -= item_len;
588 			}
589 			lens->nr++;
590 		}
591 	}
592 	rcu_read_unlock();
593 }
594 
595 static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
596 {
597 	struct rds_info_connection *cinfo = buffer;
598 
599 	cinfo->next_tx_seq = cp->cp_next_tx_seq;
600 	cinfo->next_rx_seq = cp->cp_next_rx_seq;
601 	cinfo->laddr = cp->cp_conn->c_laddr;
602 	cinfo->faddr = cp->cp_conn->c_faddr;
603 	strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name,
604 		sizeof(cinfo->transport));
605 	cinfo->flags = 0;
606 
607 	rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
608 			  SENDING);
609 	/* XXX Future: return the state rather than these funky bits */
610 	rds_conn_info_set(cinfo->flags,
611 			  atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
612 			  CONNECTING);
613 	rds_conn_info_set(cinfo->flags,
614 			  atomic_read(&cp->cp_state) == RDS_CONN_UP,
615 			  CONNECTED);
616 	return 1;
617 }
618 
619 static void rds_conn_info(struct socket *sock, unsigned int len,
620 			  struct rds_info_iterator *iter,
621 			  struct rds_info_lengths *lens)
622 {
623 	rds_walk_conn_path_info(sock, len, iter, lens,
624 				rds_conn_info_visitor,
625 				sizeof(struct rds_info_connection));
626 }
627 
628 int rds_conn_init(void)
629 {
630 	rds_conn_slab = kmem_cache_create("rds_connection",
631 					  sizeof(struct rds_connection),
632 					  0, 0, NULL);
633 	if (!rds_conn_slab)
634 		return -ENOMEM;
635 
636 	rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
637 	rds_info_register_func(RDS_INFO_SEND_MESSAGES,
638 			       rds_conn_message_info_send);
639 	rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
640 			       rds_conn_message_info_retrans);
641 
642 	return 0;
643 }
644 
645 void rds_conn_exit(void)
646 {
647 	rds_loop_exit();
648 
649 	WARN_ON(!hlist_empty(rds_conn_hash));
650 
651 	kmem_cache_destroy(rds_conn_slab);
652 
653 	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
654 	rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
655 				 rds_conn_message_info_send);
656 	rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
657 				 rds_conn_message_info_retrans);
658 }
659 
660 /*
661  * Force a disconnect
662  */
663 void rds_conn_path_drop(struct rds_conn_path *cp)
664 {
665 	atomic_set(&cp->cp_state, RDS_CONN_ERROR);
666 	queue_work(rds_wq, &cp->cp_down_w);
667 }
668 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
669 
670 void rds_conn_drop(struct rds_connection *conn)
671 {
672 	WARN_ON(conn->c_trans->t_mp_capable);
673 	rds_conn_path_drop(&conn->c_path[0]);
674 }
675 EXPORT_SYMBOL_GPL(rds_conn_drop);
676 
677 /*
678  * If the connection is down, trigger a connect. We may have scheduled a
679  * delayed reconnect however - in this case we should not interfere.
680  */
681 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
682 {
683 	if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
684 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
685 		queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
686 }
687 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
688 
689 void rds_conn_connect_if_down(struct rds_connection *conn)
690 {
691 	WARN_ON(conn->c_trans->t_mp_capable);
692 	rds_conn_path_connect_if_down(&conn->c_path[0]);
693 }
694 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
695 
696 void
697 __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
698 {
699 	va_list ap;
700 
701 	va_start(ap, fmt);
702 	vprintk(fmt, ap);
703 	va_end(ap);
704 
705 	rds_conn_path_drop(cp);
706 }
707