xref: /linux/net/rds/ib_cm.c (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 /*
2  * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 #include <linux/ratelimit.h>
38 #include <net/addrconf.h>
39 
40 #include "rds_single_path.h"
41 #include "rds.h"
42 #include "ib.h"
43 
44 /*
45  * Set the selected protocol version
46  */
47 static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
48 {
49 	conn->c_version = version;
50 }
51 
52 /*
53  * Set up flow control
54  */
55 static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
56 {
57 	struct rds_ib_connection *ic = conn->c_transport_data;
58 
59 	if (rds_ib_sysctl_flow_control && credits != 0) {
60 		/* We're doing flow control */
61 		ic->i_flowctl = 1;
62 		rds_ib_send_add_credits(conn, credits);
63 	} else {
64 		ic->i_flowctl = 0;
65 	}
66 }
67 
68 /*
69  * Tune RNR behavior. Without flow control, we use a rather
70  * low timeout, but not the absolute minimum - this should
71  * be tunable.
72  *
73  * We already set the RNR retry count to 7 (which is the
74  * smallest infinite number :-) above.
75  * If flow control is off, we want to change this back to 0
76  * so that we learn quickly when our credit accounting is
77  * buggy.
78  *
79  * Caller passes in a qp_attr pointer - don't waste stack spacv
80  * by allocation this twice.
81  */
82 static void
83 rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
84 {
85 	int ret;
86 
87 	attr->min_rnr_timer = IB_RNR_TIMER_000_32;
88 	ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
89 	if (ret)
90 		printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
91 }
92 
93 /*
94  * Connection established.
95  * We get here for both outgoing and incoming connection.
96  */
97 void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
98 {
99 	struct rds_ib_connection *ic = conn->c_transport_data;
100 	const union rds_ib_conn_priv *dp = NULL;
101 	struct ib_qp_attr qp_attr;
102 	__be64 ack_seq = 0;
103 	__be32 credit = 0;
104 	u8 major = 0;
105 	u8 minor = 0;
106 	int err;
107 
108 	dp = event->param.conn.private_data;
109 	if (conn->c_isv6) {
110 		if (event->param.conn.private_data_len >=
111 		    sizeof(struct rds6_ib_connect_private)) {
112 			major = dp->ricp_v6.dp_protocol_major;
113 			minor = dp->ricp_v6.dp_protocol_minor;
114 			credit = dp->ricp_v6.dp_credit;
115 			/* dp structure start is not guaranteed to be 8 bytes
116 			 * aligned.  Since dp_ack_seq is 64-bit extended load
117 			 * operations can be used so go through get_unaligned
118 			 * to avoid unaligned errors.
119 			 */
120 			ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq);
121 		}
122 	} else if (event->param.conn.private_data_len >=
123 		   sizeof(struct rds_ib_connect_private)) {
124 		major = dp->ricp_v4.dp_protocol_major;
125 		minor = dp->ricp_v4.dp_protocol_minor;
126 		credit = dp->ricp_v4.dp_credit;
127 		ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq);
128 	}
129 
130 	/* make sure it isn't empty data */
131 	if (major) {
132 		rds_ib_set_protocol(conn, RDS_PROTOCOL(major, minor));
133 		rds_ib_set_flow_control(conn, be32_to_cpu(credit));
134 	}
135 
136 	if (conn->c_version < RDS_PROTOCOL_VERSION) {
137 		if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) {
138 			pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
139 				  &conn->c_laddr, &conn->c_faddr,
140 				  RDS_PROTOCOL_MAJOR(conn->c_version),
141 				  RDS_PROTOCOL_MINOR(conn->c_version));
142 			rds_conn_destroy(conn);
143 			return;
144 		}
145 	}
146 
147 	pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c,%d> version %u.%u%s\n",
148 		  ic->i_active_side ? "Active" : "Passive",
149 		  &conn->c_laddr, &conn->c_faddr, conn->c_tos,
150 		  RDS_PROTOCOL_MAJOR(conn->c_version),
151 		  RDS_PROTOCOL_MINOR(conn->c_version),
152 		  ic->i_flowctl ? ", flow control" : "");
153 
154 	atomic_set(&ic->i_cq_quiesce, 0);
155 
156 	/* Init rings and fill recv. this needs to wait until protocol
157 	 * negotiation is complete, since ring layout is different
158 	 * from 3.1 to 4.1.
159 	 */
160 	rds_ib_send_init_ring(ic);
161 	rds_ib_recv_init_ring(ic);
162 	/* Post receive buffers - as a side effect, this will update
163 	 * the posted credit count. */
164 	rds_ib_recv_refill(conn, 1, GFP_KERNEL);
165 
166 	/* Tune RNR behavior */
167 	rds_ib_tune_rnr(ic, &qp_attr);
168 
169 	qp_attr.qp_state = IB_QPS_RTS;
170 	err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
171 	if (err)
172 		printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
173 
174 	/* update ib_device with this local ipaddr */
175 	err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr);
176 	if (err)
177 		printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
178 			err);
179 
180 	/* If the peer gave us the last packet it saw, process this as if
181 	 * we had received a regular ACK. */
182 	if (dp) {
183 		if (ack_seq)
184 			rds_send_drop_acked(conn, be64_to_cpu(ack_seq),
185 					    NULL);
186 	}
187 
188 	conn->c_proposed_version = conn->c_version;
189 	rds_connect_complete(conn);
190 }
191 
192 static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
193 				      struct rdma_conn_param *conn_param,
194 				      union rds_ib_conn_priv *dp,
195 				      u32 protocol_version,
196 				      u32 max_responder_resources,
197 				      u32 max_initiator_depth,
198 				      bool isv6)
199 {
200 	struct rds_ib_connection *ic = conn->c_transport_data;
201 	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
202 
203 	memset(conn_param, 0, sizeof(struct rdma_conn_param));
204 
205 	conn_param->responder_resources =
206 		min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
207 	conn_param->initiator_depth =
208 		min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
209 	conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
210 	conn_param->rnr_retry_count = 7;
211 
212 	if (dp) {
213 		memset(dp, 0, sizeof(*dp));
214 		if (isv6) {
215 			dp->ricp_v6.dp_saddr = conn->c_laddr;
216 			dp->ricp_v6.dp_daddr = conn->c_faddr;
217 			dp->ricp_v6.dp_protocol_major =
218 			    RDS_PROTOCOL_MAJOR(protocol_version);
219 			dp->ricp_v6.dp_protocol_minor =
220 			    RDS_PROTOCOL_MINOR(protocol_version);
221 			dp->ricp_v6.dp_protocol_minor_mask =
222 			    cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
223 			dp->ricp_v6.dp_ack_seq =
224 			    cpu_to_be64(rds_ib_piggyb_ack(ic));
225 			dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos;
226 
227 			conn_param->private_data = &dp->ricp_v6;
228 			conn_param->private_data_len = sizeof(dp->ricp_v6);
229 		} else {
230 			dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3];
231 			dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3];
232 			dp->ricp_v4.dp_protocol_major =
233 			    RDS_PROTOCOL_MAJOR(protocol_version);
234 			dp->ricp_v4.dp_protocol_minor =
235 			    RDS_PROTOCOL_MINOR(protocol_version);
236 			dp->ricp_v4.dp_protocol_minor_mask =
237 			    cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
238 			dp->ricp_v4.dp_ack_seq =
239 			    cpu_to_be64(rds_ib_piggyb_ack(ic));
240 			dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos;
241 
242 			conn_param->private_data = &dp->ricp_v4;
243 			conn_param->private_data_len = sizeof(dp->ricp_v4);
244 		}
245 
246 		/* Advertise flow control */
247 		if (ic->i_flowctl) {
248 			unsigned int credits;
249 
250 			credits = IB_GET_POST_CREDITS
251 				(atomic_read(&ic->i_credits));
252 			if (isv6)
253 				dp->ricp_v6.dp_credit = cpu_to_be32(credits);
254 			else
255 				dp->ricp_v4.dp_credit = cpu_to_be32(credits);
256 			atomic_sub(IB_SET_POST_CREDITS(credits),
257 				   &ic->i_credits);
258 		}
259 	}
260 }
261 
262 static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
263 {
264 	rdsdebug("event %u (%s) data %p\n",
265 		 event->event, ib_event_msg(event->event), data);
266 }
267 
268 /* Plucking the oldest entry from the ring can be done concurrently with
269  * the thread refilling the ring.  Each ring operation is protected by
270  * spinlocks and the transient state of refilling doesn't change the
271  * recording of which entry is oldest.
272  *
273  * This relies on IB only calling one cq comp_handler for each cq so that
274  * there will only be one caller of rds_recv_incoming() per RDS connection.
275  */
276 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
277 {
278 	struct rds_connection *conn = context;
279 	struct rds_ib_connection *ic = conn->c_transport_data;
280 
281 	rdsdebug("conn %p cq %p\n", conn, cq);
282 
283 	rds_ib_stats_inc(s_ib_evt_handler_call);
284 
285 	tasklet_schedule(&ic->i_recv_tasklet);
286 }
287 
288 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
289 		     struct ib_wc *wcs)
290 {
291 	int nr, i;
292 	struct ib_wc *wc;
293 
294 	while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
295 		for (i = 0; i < nr; i++) {
296 			wc = wcs + i;
297 			rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
298 				 (unsigned long long)wc->wr_id, wc->status,
299 				 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
300 
301 			if (wc->wr_id <= ic->i_send_ring.w_nr ||
302 			    wc->wr_id == RDS_IB_ACK_WR_ID)
303 				rds_ib_send_cqe_handler(ic, wc);
304 			else
305 				rds_ib_mr_cqe_handler(ic, wc);
306 
307 		}
308 	}
309 }
310 
311 static void rds_ib_tasklet_fn_send(unsigned long data)
312 {
313 	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
314 	struct rds_connection *conn = ic->conn;
315 
316 	rds_ib_stats_inc(s_ib_tasklet_call);
317 
318 	/* if cq has been already reaped, ignore incoming cq event */
319 	if (atomic_read(&ic->i_cq_quiesce))
320 		return;
321 
322 	poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
323 	ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
324 	poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
325 
326 	if (rds_conn_up(conn) &&
327 	    (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
328 	    test_bit(0, &conn->c_map_queued)))
329 		rds_send_xmit(&ic->conn->c_path[0]);
330 }
331 
332 static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
333 		     struct ib_wc *wcs,
334 		     struct rds_ib_ack_state *ack_state)
335 {
336 	int nr, i;
337 	struct ib_wc *wc;
338 
339 	while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
340 		for (i = 0; i < nr; i++) {
341 			wc = wcs + i;
342 			rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
343 				 (unsigned long long)wc->wr_id, wc->status,
344 				 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
345 
346 			rds_ib_recv_cqe_handler(ic, wc, ack_state);
347 		}
348 	}
349 }
350 
351 static void rds_ib_tasklet_fn_recv(unsigned long data)
352 {
353 	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
354 	struct rds_connection *conn = ic->conn;
355 	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
356 	struct rds_ib_ack_state state;
357 
358 	if (!rds_ibdev)
359 		rds_conn_drop(conn);
360 
361 	rds_ib_stats_inc(s_ib_tasklet_call);
362 
363 	/* if cq has been already reaped, ignore incoming cq event */
364 	if (atomic_read(&ic->i_cq_quiesce))
365 		return;
366 
367 	memset(&state, 0, sizeof(state));
368 	poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
369 	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
370 	poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
371 
372 	if (state.ack_next_valid)
373 		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
374 	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
375 		rds_send_drop_acked(conn, state.ack_recv, NULL);
376 		ic->i_ack_recv = state.ack_recv;
377 	}
378 
379 	if (rds_conn_up(conn))
380 		rds_ib_attempt_ack(ic);
381 }
382 
383 static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
384 {
385 	struct rds_connection *conn = data;
386 	struct rds_ib_connection *ic = conn->c_transport_data;
387 
388 	rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
389 		 ib_event_msg(event->event));
390 
391 	switch (event->event) {
392 	case IB_EVENT_COMM_EST:
393 		rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
394 		break;
395 	default:
396 		rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n",
397 			 event->event, ib_event_msg(event->event),
398 			 &conn->c_laddr, &conn->c_faddr);
399 		rds_conn_drop(conn);
400 		break;
401 	}
402 }
403 
404 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
405 {
406 	struct rds_connection *conn = context;
407 	struct rds_ib_connection *ic = conn->c_transport_data;
408 
409 	rdsdebug("conn %p cq %p\n", conn, cq);
410 
411 	rds_ib_stats_inc(s_ib_evt_handler_call);
412 
413 	tasklet_schedule(&ic->i_send_tasklet);
414 }
415 
416 static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
417 {
418 	int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
419 	int index = rds_ibdev->dev->num_comp_vectors - 1;
420 	int i;
421 
422 	for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
423 		if (rds_ibdev->vector_load[i] < min) {
424 			index = i;
425 			min = rds_ibdev->vector_load[i];
426 		}
427 	}
428 
429 	rds_ibdev->vector_load[index]++;
430 	return index;
431 }
432 
433 static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
434 {
435 	rds_ibdev->vector_load[index]--;
436 }
437 
438 /*
439  * This needs to be very careful to not leave IS_ERR pointers around for
440  * cleanup to trip over.
441  */
442 static int rds_ib_setup_qp(struct rds_connection *conn)
443 {
444 	struct rds_ib_connection *ic = conn->c_transport_data;
445 	struct ib_device *dev = ic->i_cm_id->device;
446 	struct ib_qp_init_attr attr;
447 	struct ib_cq_init_attr cq_attr = {};
448 	struct rds_ib_device *rds_ibdev;
449 	int ret, fr_queue_space;
450 
451 	/*
452 	 * It's normal to see a null device if an incoming connection races
453 	 * with device removal, so we don't print a warning.
454 	 */
455 	rds_ibdev = rds_ib_get_client_data(dev);
456 	if (!rds_ibdev)
457 		return -EOPNOTSUPP;
458 
459 	/* The fr_queue_space is currently set to 512, to add extra space on
460 	 * completion queue and send queue. This extra space is used for FRMR
461 	 * registration and invalidation work requests
462 	 */
463 	fr_queue_space = rds_ibdev->use_fastreg ?
464 			 (RDS_IB_DEFAULT_FR_WR + 1) +
465 			 (RDS_IB_DEFAULT_FR_INV_WR + 1)
466 			 : 0;
467 
468 	/* add the conn now so that connection establishment has the dev */
469 	rds_ib_add_conn(rds_ibdev, conn);
470 
471 	if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
472 		rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
473 	if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
474 		rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
475 
476 	/* Protection domain and memory range */
477 	ic->i_pd = rds_ibdev->pd;
478 
479 	ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
480 	cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
481 	cq_attr.comp_vector = ic->i_scq_vector;
482 	ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
483 				     rds_ib_cq_event_handler, conn,
484 				     &cq_attr);
485 	if (IS_ERR(ic->i_send_cq)) {
486 		ret = PTR_ERR(ic->i_send_cq);
487 		ic->i_send_cq = NULL;
488 		ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
489 		rdsdebug("ib_create_cq send failed: %d\n", ret);
490 		goto rds_ibdev_out;
491 	}
492 
493 	ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
494 	cq_attr.cqe = ic->i_recv_ring.w_nr;
495 	cq_attr.comp_vector = ic->i_rcq_vector;
496 	ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
497 				     rds_ib_cq_event_handler, conn,
498 				     &cq_attr);
499 	if (IS_ERR(ic->i_recv_cq)) {
500 		ret = PTR_ERR(ic->i_recv_cq);
501 		ic->i_recv_cq = NULL;
502 		ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
503 		rdsdebug("ib_create_cq recv failed: %d\n", ret);
504 		goto send_cq_out;
505 	}
506 
507 	ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
508 	if (ret) {
509 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
510 		goto recv_cq_out;
511 	}
512 
513 	ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
514 	if (ret) {
515 		rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
516 		goto recv_cq_out;
517 	}
518 
519 	/* XXX negotiate max send/recv with remote? */
520 	memset(&attr, 0, sizeof(attr));
521 	attr.event_handler = rds_ib_qp_event_handler;
522 	attr.qp_context = conn;
523 	/* + 1 to allow for the single ack message */
524 	attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
525 	attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
526 	attr.cap.max_send_sge = rds_ibdev->max_sge;
527 	attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
528 	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
529 	attr.qp_type = IB_QPT_RC;
530 	attr.send_cq = ic->i_send_cq;
531 	attr.recv_cq = ic->i_recv_cq;
532 	atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
533 	atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR);
534 
535 	/*
536 	 * XXX this can fail if max_*_wr is too large?  Are we supposed
537 	 * to back off until we get a value that the hardware can support?
538 	 */
539 	ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
540 	if (ret) {
541 		rdsdebug("rdma_create_qp failed: %d\n", ret);
542 		goto recv_cq_out;
543 	}
544 
545 	ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
546 					   ic->i_send_ring.w_nr *
547 						sizeof(struct rds_header),
548 					   &ic->i_send_hdrs_dma, GFP_KERNEL);
549 	if (!ic->i_send_hdrs) {
550 		ret = -ENOMEM;
551 		rdsdebug("ib_dma_alloc_coherent send failed\n");
552 		goto qp_out;
553 	}
554 
555 	ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
556 					   ic->i_recv_ring.w_nr *
557 						sizeof(struct rds_header),
558 					   &ic->i_recv_hdrs_dma, GFP_KERNEL);
559 	if (!ic->i_recv_hdrs) {
560 		ret = -ENOMEM;
561 		rdsdebug("ib_dma_alloc_coherent recv failed\n");
562 		goto send_hdrs_dma_out;
563 	}
564 
565 	ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
566 				       &ic->i_ack_dma, GFP_KERNEL);
567 	if (!ic->i_ack) {
568 		ret = -ENOMEM;
569 		rdsdebug("ib_dma_alloc_coherent ack failed\n");
570 		goto recv_hdrs_dma_out;
571 	}
572 
573 	ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work),
574 					      ic->i_send_ring.w_nr),
575 				   ibdev_to_node(dev));
576 	if (!ic->i_sends) {
577 		ret = -ENOMEM;
578 		rdsdebug("send allocation failed\n");
579 		goto ack_dma_out;
580 	}
581 
582 	ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work),
583 					      ic->i_recv_ring.w_nr),
584 				   ibdev_to_node(dev));
585 	if (!ic->i_recvs) {
586 		ret = -ENOMEM;
587 		rdsdebug("recv allocation failed\n");
588 		goto sends_out;
589 	}
590 
591 	rds_ib_recv_init_ack(ic);
592 
593 	rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
594 		 ic->i_send_cq, ic->i_recv_cq);
595 
596 	goto out;
597 
598 sends_out:
599 	vfree(ic->i_sends);
600 ack_dma_out:
601 	ib_dma_free_coherent(dev, sizeof(struct rds_header),
602 			     ic->i_ack, ic->i_ack_dma);
603 recv_hdrs_dma_out:
604 	ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
605 					sizeof(struct rds_header),
606 					ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
607 send_hdrs_dma_out:
608 	ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
609 					sizeof(struct rds_header),
610 					ic->i_send_hdrs, ic->i_send_hdrs_dma);
611 qp_out:
612 	rdma_destroy_qp(ic->i_cm_id);
613 recv_cq_out:
614 	if (!ib_destroy_cq(ic->i_recv_cq))
615 		ic->i_recv_cq = NULL;
616 send_cq_out:
617 	if (!ib_destroy_cq(ic->i_send_cq))
618 		ic->i_send_cq = NULL;
619 rds_ibdev_out:
620 	rds_ib_remove_conn(rds_ibdev, conn);
621 out:
622 	rds_ib_dev_put(rds_ibdev);
623 
624 	return ret;
625 }
626 
627 static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
628 {
629 	const union rds_ib_conn_priv *dp = event->param.conn.private_data;
630 	u8 data_len, major, minor;
631 	u32 version = 0;
632 	__be16 mask;
633 	u16 common;
634 
635 	/*
636 	 * rdma_cm private data is odd - when there is any private data in the
637 	 * request, we will be given a pretty large buffer without telling us the
638 	 * original size. The only way to tell the difference is by looking at
639 	 * the contents, which are initialized to zero.
640 	 * If the protocol version fields aren't set, this is a connection attempt
641 	 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
642 	 * We really should have changed this for OFED 1.3 :-(
643 	 */
644 
645 	/* Be paranoid. RDS always has privdata */
646 	if (!event->param.conn.private_data_len) {
647 		printk(KERN_NOTICE "RDS incoming connection has no private data, "
648 			"rejecting\n");
649 		return 0;
650 	}
651 
652 	if (isv6) {
653 		data_len = sizeof(struct rds6_ib_connect_private);
654 		major = dp->ricp_v6.dp_protocol_major;
655 		minor = dp->ricp_v6.dp_protocol_minor;
656 		mask = dp->ricp_v6.dp_protocol_minor_mask;
657 	} else {
658 		data_len = sizeof(struct rds_ib_connect_private);
659 		major = dp->ricp_v4.dp_protocol_major;
660 		minor = dp->ricp_v4.dp_protocol_minor;
661 		mask = dp->ricp_v4.dp_protocol_minor_mask;
662 	}
663 
664 	/* Even if len is crap *now* I still want to check it. -ASG */
665 	if (event->param.conn.private_data_len < data_len || major == 0)
666 		return RDS_PROTOCOL_4_0;
667 
668 	common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
669 	if (major == 4 && common) {
670 		version = RDS_PROTOCOL_4_0;
671 		while ((common >>= 1) != 0)
672 			version++;
673 	} else if (RDS_PROTOCOL_COMPAT_VERSION ==
674 		   RDS_PROTOCOL(major, minor)) {
675 		version = RDS_PROTOCOL_COMPAT_VERSION;
676 	} else {
677 		if (isv6)
678 			printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n",
679 					   &dp->ricp_v6.dp_saddr, major, minor);
680 		else
681 			printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
682 					   &dp->ricp_v4.dp_saddr, major, minor);
683 	}
684 	return version;
685 }
686 
687 #if IS_ENABLED(CONFIG_IPV6)
688 /* Given an IPv6 address, find the net_device which hosts that address and
689  * return its index.  This is used by the rds_ib_cm_handle_connect() code to
690  * find the interface index of where an incoming request comes from when
691  * the request is using a link local address.
692  *
693  * Note one problem in this search.  It is possible that two interfaces have
694  * the same link local address.  Unfortunately, this cannot be solved unless
695  * the underlying layer gives us the interface which an incoming RDMA connect
696  * request comes from.
697  */
698 static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr)
699 {
700 	struct net_device *dev;
701 	int idx = 0;
702 
703 	rcu_read_lock();
704 	for_each_netdev_rcu(net, dev) {
705 		if (ipv6_chk_addr(net, addr, dev, 1)) {
706 			idx = dev->ifindex;
707 			break;
708 		}
709 	}
710 	rcu_read_unlock();
711 
712 	return idx;
713 }
714 #endif
715 
716 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
717 			     struct rdma_cm_event *event, bool isv6)
718 {
719 	__be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
720 	__be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
721 	const struct rds_ib_conn_priv_cmn *dp_cmn;
722 	struct rds_connection *conn = NULL;
723 	struct rds_ib_connection *ic = NULL;
724 	struct rdma_conn_param conn_param;
725 	const union rds_ib_conn_priv *dp;
726 	union rds_ib_conn_priv dp_rep;
727 	struct in6_addr s_mapped_addr;
728 	struct in6_addr d_mapped_addr;
729 	const struct in6_addr *saddr6;
730 	const struct in6_addr *daddr6;
731 	int destroy = 1;
732 	u32 ifindex = 0;
733 	u32 version;
734 	int err = 1;
735 
736 	/* Check whether the remote protocol version matches ours. */
737 	version = rds_ib_protocol_compatible(event, isv6);
738 	if (!version) {
739 		err = RDS_RDMA_REJ_INCOMPAT;
740 		goto out;
741 	}
742 
743 	dp = event->param.conn.private_data;
744 	if (isv6) {
745 #if IS_ENABLED(CONFIG_IPV6)
746 		dp_cmn = &dp->ricp_v6.dp_cmn;
747 		saddr6 = &dp->ricp_v6.dp_saddr;
748 		daddr6 = &dp->ricp_v6.dp_daddr;
749 		/* If either address is link local, need to find the
750 		 * interface index in order to create a proper RDS
751 		 * connection.
752 		 */
753 		if (ipv6_addr_type(daddr6) & IPV6_ADDR_LINKLOCAL) {
754 			/* Using init_net for now ..  */
755 			ifindex = __rds_find_ifindex(&init_net, daddr6);
756 			/* No index found...  Need to bail out. */
757 			if (ifindex == 0) {
758 				err = -EOPNOTSUPP;
759 				goto out;
760 			}
761 		} else if (ipv6_addr_type(saddr6) & IPV6_ADDR_LINKLOCAL) {
762 			/* Use our address to find the correct index. */
763 			ifindex = __rds_find_ifindex(&init_net, daddr6);
764 			/* No index found...  Need to bail out. */
765 			if (ifindex == 0) {
766 				err = -EOPNOTSUPP;
767 				goto out;
768 			}
769 		}
770 #else
771 		err = -EOPNOTSUPP;
772 		goto out;
773 #endif
774 	} else {
775 		dp_cmn = &dp->ricp_v4.dp_cmn;
776 		ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr);
777 		ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr);
778 		saddr6 = &s_mapped_addr;
779 		daddr6 = &d_mapped_addr;
780 	}
781 
782 	rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid 0x%llx, tos:%d\n",
783 		 saddr6, daddr6, RDS_PROTOCOL_MAJOR(version),
784 		 RDS_PROTOCOL_MINOR(version),
785 		 (unsigned long long)be64_to_cpu(lguid),
786 		 (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss);
787 
788 	/* RDS/IB is not currently netns aware, thus init_net */
789 	conn = rds_conn_create(&init_net, daddr6, saddr6,
790 			       &rds_ib_transport, dp_cmn->ricpc_dp_toss,
791 			       GFP_KERNEL, ifindex);
792 	if (IS_ERR(conn)) {
793 		rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
794 		conn = NULL;
795 		goto out;
796 	}
797 
798 	/*
799 	 * The connection request may occur while the
800 	 * previous connection exist, e.g. in case of failover.
801 	 * But as connections may be initiated simultaneously
802 	 * by both hosts, we have a random backoff mechanism -
803 	 * see the comment above rds_queue_reconnect()
804 	 */
805 	mutex_lock(&conn->c_cm_lock);
806 	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
807 		if (rds_conn_state(conn) == RDS_CONN_UP) {
808 			rdsdebug("incoming connect while connecting\n");
809 			rds_conn_drop(conn);
810 			rds_ib_stats_inc(s_ib_listen_closed_stale);
811 		} else
812 		if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
813 			/* Wait and see - our connect may still be succeeding */
814 			rds_ib_stats_inc(s_ib_connect_raced);
815 		}
816 		goto out;
817 	}
818 
819 	ic = conn->c_transport_data;
820 
821 	rds_ib_set_protocol(conn, version);
822 	rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit));
823 
824 	/* If the peer gave us the last packet it saw, process this as if
825 	 * we had received a regular ACK. */
826 	if (dp_cmn->ricpc_ack_seq)
827 		rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq),
828 				    NULL);
829 
830 	BUG_ON(cm_id->context);
831 	BUG_ON(ic->i_cm_id);
832 
833 	ic->i_cm_id = cm_id;
834 	cm_id->context = conn;
835 
836 	/* We got halfway through setting up the ib_connection, if we
837 	 * fail now, we have to take the long route out of this mess. */
838 	destroy = 0;
839 
840 	err = rds_ib_setup_qp(conn);
841 	if (err) {
842 		rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
843 		goto out;
844 	}
845 
846 	rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
847 				  event->param.conn.responder_resources,
848 				  event->param.conn.initiator_depth, isv6);
849 
850 	/* rdma_accept() calls rdma_reject() internally if it fails */
851 	if (rdma_accept(cm_id, &conn_param))
852 		rds_ib_conn_error(conn, "rdma_accept failed\n");
853 
854 out:
855 	if (conn)
856 		mutex_unlock(&conn->c_cm_lock);
857 	if (err)
858 		rdma_reject(cm_id, &err, sizeof(int));
859 	return destroy;
860 }
861 
862 
863 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
864 {
865 	struct rds_connection *conn = cm_id->context;
866 	struct rds_ib_connection *ic = conn->c_transport_data;
867 	struct rdma_conn_param conn_param;
868 	union rds_ib_conn_priv dp;
869 	int ret;
870 
871 	/* If the peer doesn't do protocol negotiation, we must
872 	 * default to RDSv3.0 */
873 	rds_ib_set_protocol(conn, RDS_PROTOCOL_4_1);
874 	ic->i_flowctl = rds_ib_sysctl_flow_control;	/* advertise flow control */
875 
876 	ret = rds_ib_setup_qp(conn);
877 	if (ret) {
878 		rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
879 		goto out;
880 	}
881 
882 	rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
883 				  conn->c_proposed_version,
884 				  UINT_MAX, UINT_MAX, isv6);
885 	ret = rdma_connect(cm_id, &conn_param);
886 	if (ret)
887 		rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
888 
889 out:
890 	/* Beware - returning non-zero tells the rdma_cm to destroy
891 	 * the cm_id. We should certainly not do it as long as we still
892 	 * "own" the cm_id. */
893 	if (ret) {
894 		if (ic->i_cm_id == cm_id)
895 			ret = 0;
896 	}
897 	ic->i_active_side = true;
898 	return ret;
899 }
900 
901 int rds_ib_conn_path_connect(struct rds_conn_path *cp)
902 {
903 	struct rds_connection *conn = cp->cp_conn;
904 	struct sockaddr_storage src, dest;
905 	rdma_cm_event_handler handler;
906 	struct rds_ib_connection *ic;
907 	int ret;
908 
909 	ic = conn->c_transport_data;
910 
911 	/* XXX I wonder what affect the port space has */
912 	/* delegate cm event handler to rdma_transport */
913 #if IS_ENABLED(CONFIG_IPV6)
914 	if (conn->c_isv6)
915 		handler = rds6_rdma_cm_event_handler;
916 	else
917 #endif
918 		handler = rds_rdma_cm_event_handler;
919 	ic->i_cm_id = rdma_create_id(&init_net, handler, conn,
920 				     RDMA_PS_TCP, IB_QPT_RC);
921 	if (IS_ERR(ic->i_cm_id)) {
922 		ret = PTR_ERR(ic->i_cm_id);
923 		ic->i_cm_id = NULL;
924 		rdsdebug("rdma_create_id() failed: %d\n", ret);
925 		goto out;
926 	}
927 
928 	rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
929 
930 	if (ipv6_addr_v4mapped(&conn->c_faddr)) {
931 		struct sockaddr_in *sin;
932 
933 		sin = (struct sockaddr_in *)&src;
934 		sin->sin_family = AF_INET;
935 		sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3];
936 		sin->sin_port = 0;
937 
938 		sin = (struct sockaddr_in *)&dest;
939 		sin->sin_family = AF_INET;
940 		sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3];
941 		sin->sin_port = htons(RDS_PORT);
942 	} else {
943 		struct sockaddr_in6 *sin6;
944 
945 		sin6 = (struct sockaddr_in6 *)&src;
946 		sin6->sin6_family = AF_INET6;
947 		sin6->sin6_addr = conn->c_laddr;
948 		sin6->sin6_port = 0;
949 		sin6->sin6_scope_id = conn->c_dev_if;
950 
951 		sin6 = (struct sockaddr_in6 *)&dest;
952 		sin6->sin6_family = AF_INET6;
953 		sin6->sin6_addr = conn->c_faddr;
954 		sin6->sin6_port = htons(RDS_CM_PORT);
955 		sin6->sin6_scope_id = conn->c_dev_if;
956 	}
957 
958 	ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
959 				(struct sockaddr *)&dest,
960 				RDS_RDMA_RESOLVE_TIMEOUT_MS);
961 	if (ret) {
962 		rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
963 			 ret);
964 		rdma_destroy_id(ic->i_cm_id);
965 		ic->i_cm_id = NULL;
966 	}
967 
968 out:
969 	return ret;
970 }
971 
972 /*
973  * This is so careful about only cleaning up resources that were built up
974  * so that it can be called at any point during startup.  In fact it
975  * can be called multiple times for a given connection.
976  */
977 void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
978 {
979 	struct rds_connection *conn = cp->cp_conn;
980 	struct rds_ib_connection *ic = conn->c_transport_data;
981 	int err = 0;
982 
983 	rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
984 		 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
985 		 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
986 
987 	if (ic->i_cm_id) {
988 		struct ib_device *dev = ic->i_cm_id->device;
989 
990 		rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
991 		err = rdma_disconnect(ic->i_cm_id);
992 		if (err) {
993 			/* Actually this may happen quite frequently, when
994 			 * an outgoing connect raced with an incoming connect.
995 			 */
996 			rdsdebug("failed to disconnect, cm: %p err %d\n",
997 				ic->i_cm_id, err);
998 		}
999 
1000 		/*
1001 		 * We want to wait for tx and rx completion to finish
1002 		 * before we tear down the connection, but we have to be
1003 		 * careful not to get stuck waiting on a send ring that
1004 		 * only has unsignaled sends in it.  We've shutdown new
1005 		 * sends before getting here so by waiting for signaled
1006 		 * sends to complete we're ensured that there will be no
1007 		 * more tx processing.
1008 		 */
1009 		wait_event(rds_ib_ring_empty_wait,
1010 			   rds_ib_ring_empty(&ic->i_recv_ring) &&
1011 			   (atomic_read(&ic->i_signaled_sends) == 0) &&
1012 			   (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) &&
1013 			   (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR));
1014 		tasklet_kill(&ic->i_send_tasklet);
1015 		tasklet_kill(&ic->i_recv_tasklet);
1016 
1017 		atomic_set(&ic->i_cq_quiesce, 1);
1018 
1019 		/* first destroy the ib state that generates callbacks */
1020 		if (ic->i_cm_id->qp)
1021 			rdma_destroy_qp(ic->i_cm_id);
1022 		if (ic->i_send_cq) {
1023 			if (ic->rds_ibdev)
1024 				ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector);
1025 			ib_destroy_cq(ic->i_send_cq);
1026 		}
1027 
1028 		if (ic->i_recv_cq) {
1029 			if (ic->rds_ibdev)
1030 				ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector);
1031 			ib_destroy_cq(ic->i_recv_cq);
1032 		}
1033 
1034 		/* then free the resources that ib callbacks use */
1035 		if (ic->i_send_hdrs)
1036 			ib_dma_free_coherent(dev,
1037 					   ic->i_send_ring.w_nr *
1038 						sizeof(struct rds_header),
1039 					   ic->i_send_hdrs,
1040 					   ic->i_send_hdrs_dma);
1041 
1042 		if (ic->i_recv_hdrs)
1043 			ib_dma_free_coherent(dev,
1044 					   ic->i_recv_ring.w_nr *
1045 						sizeof(struct rds_header),
1046 					   ic->i_recv_hdrs,
1047 					   ic->i_recv_hdrs_dma);
1048 
1049 		if (ic->i_ack)
1050 			ib_dma_free_coherent(dev, sizeof(struct rds_header),
1051 					     ic->i_ack, ic->i_ack_dma);
1052 
1053 		if (ic->i_sends)
1054 			rds_ib_send_clear_ring(ic);
1055 		if (ic->i_recvs)
1056 			rds_ib_recv_clear_ring(ic);
1057 
1058 		rdma_destroy_id(ic->i_cm_id);
1059 
1060 		/*
1061 		 * Move connection back to the nodev list.
1062 		 */
1063 		if (ic->rds_ibdev)
1064 			rds_ib_remove_conn(ic->rds_ibdev, conn);
1065 
1066 		ic->i_cm_id = NULL;
1067 		ic->i_pd = NULL;
1068 		ic->i_send_cq = NULL;
1069 		ic->i_recv_cq = NULL;
1070 		ic->i_send_hdrs = NULL;
1071 		ic->i_recv_hdrs = NULL;
1072 		ic->i_ack = NULL;
1073 	}
1074 	BUG_ON(ic->rds_ibdev);
1075 
1076 	/* Clear pending transmit */
1077 	if (ic->i_data_op) {
1078 		struct rds_message *rm;
1079 
1080 		rm = container_of(ic->i_data_op, struct rds_message, data);
1081 		rds_message_put(rm);
1082 		ic->i_data_op = NULL;
1083 	}
1084 
1085 	/* Clear the ACK state */
1086 	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
1087 #ifdef KERNEL_HAS_ATOMIC64
1088 	atomic64_set(&ic->i_ack_next, 0);
1089 #else
1090 	ic->i_ack_next = 0;
1091 #endif
1092 	ic->i_ack_recv = 0;
1093 
1094 	/* Clear flow control state */
1095 	ic->i_flowctl = 0;
1096 	atomic_set(&ic->i_credits, 0);
1097 
1098 	rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
1099 	rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
1100 
1101 	if (ic->i_ibinc) {
1102 		rds_inc_put(&ic->i_ibinc->ii_inc);
1103 		ic->i_ibinc = NULL;
1104 	}
1105 
1106 	vfree(ic->i_sends);
1107 	ic->i_sends = NULL;
1108 	vfree(ic->i_recvs);
1109 	ic->i_recvs = NULL;
1110 	ic->i_active_side = false;
1111 }
1112 
1113 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1114 {
1115 	struct rds_ib_connection *ic;
1116 	unsigned long flags;
1117 	int ret;
1118 
1119 	/* XXX too lazy? */
1120 	ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
1121 	if (!ic)
1122 		return -ENOMEM;
1123 
1124 	ret = rds_ib_recv_alloc_caches(ic, gfp);
1125 	if (ret) {
1126 		kfree(ic);
1127 		return ret;
1128 	}
1129 
1130 	INIT_LIST_HEAD(&ic->ib_node);
1131 	tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
1132 		     (unsigned long)ic);
1133 	tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
1134 		     (unsigned long)ic);
1135 	mutex_init(&ic->i_recv_mutex);
1136 #ifndef KERNEL_HAS_ATOMIC64
1137 	spin_lock_init(&ic->i_ack_lock);
1138 #endif
1139 	atomic_set(&ic->i_signaled_sends, 0);
1140 
1141 	/*
1142 	 * rds_ib_conn_shutdown() waits for these to be emptied so they
1143 	 * must be initialized before it can be called.
1144 	 */
1145 	rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
1146 	rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
1147 
1148 	ic->conn = conn;
1149 	conn->c_transport_data = ic;
1150 
1151 	spin_lock_irqsave(&ib_nodev_conns_lock, flags);
1152 	list_add_tail(&ic->ib_node, &ib_nodev_conns);
1153 	spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
1154 
1155 
1156 	rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
1157 	return 0;
1158 }
1159 
1160 /*
1161  * Free a connection. Connection must be shut down and not set for reconnect.
1162  */
1163 void rds_ib_conn_free(void *arg)
1164 {
1165 	struct rds_ib_connection *ic = arg;
1166 	spinlock_t	*lock_ptr;
1167 
1168 	rdsdebug("ic %p\n", ic);
1169 
1170 	/*
1171 	 * Conn is either on a dev's list or on the nodev list.
1172 	 * A race with shutdown() or connect() would cause problems
1173 	 * (since rds_ibdev would change) but that should never happen.
1174 	 */
1175 	lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
1176 
1177 	spin_lock_irq(lock_ptr);
1178 	list_del(&ic->ib_node);
1179 	spin_unlock_irq(lock_ptr);
1180 
1181 	rds_ib_recv_free_caches(ic);
1182 
1183 	kfree(ic);
1184 }
1185 
1186 
1187 /*
1188  * An error occurred on the connection
1189  */
1190 void
1191 __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
1192 {
1193 	va_list ap;
1194 
1195 	rds_conn_drop(conn);
1196 
1197 	va_start(ap, fmt);
1198 	vprintk(fmt, ap);
1199 	va_end(ap);
1200 }
1201