xref: /illumos-gate/usr/src/uts/common/io/ib/ibtl/ibtl_qp.c (revision 20a7641f9918de8574b8b3b47dbe35c4bfc78df1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/ib/ibtl/impl/ibtl.h>
26 #include <sys/ib/ibtl/impl/ibtl_cm.h>
27 
28 /*
29  * ibtl_qp.c
30  *	These routines implement (most of) the verbs related to
31  *	Queue Pairs.
32  */
33 
34 /* Globals. */
35 static char ibtf_qp[] = "ibtl";
36 
37 /* This table indirectly initializes the ibt_cep_next_state[] table. */
38 typedef struct ibt_cep_next_state_s {
39 	ibt_cep_state_t		next_state;
40 	ibt_cep_modify_flags_t	modify_flags;
41 } ibt_cep_next_state_t;
42 
43 struct	{
44 	ibt_cep_state_t		current_state;
45 	ibt_cep_state_t		next_state;
46 	ibt_cep_modify_flags_t	modify_flags;
47 } ibt_cep_next_state_inits[] = {
48 	{ IBT_STATE_RESET, IBT_STATE_INIT, IBT_CEP_SET_RESET_INIT},
49 	{ IBT_STATE_INIT, IBT_STATE_RTR, IBT_CEP_SET_INIT_RTR},
50 	{ IBT_STATE_RTR, IBT_STATE_RTS, IBT_CEP_SET_RTR_RTS}
51 };
52 
53 ibt_cep_next_state_t ibt_cep_next_state[IBT_STATE_NUM];
54 
55 _NOTE(SCHEME_PROTECTS_DATA("unique", ibt_cep_next_state))
56 
57 /* The following data and functions can increase system stability. */
58 
59 int ibtl_qp_calls_curr;
60 int ibtl_qp_calls_max = 128;	/* limit on # of simultaneous QP verb calls */
61 kmutex_t ibtl_qp_mutex;
62 kcondvar_t ibtl_qp_cv;
63 
64 void
65 ibtl_qp_flow_control_enter(void)
66 {
67 	mutex_enter(&ibtl_qp_mutex);
68 	while (ibtl_qp_calls_curr >= ibtl_qp_calls_max) {
69 		cv_wait(&ibtl_qp_cv, &ibtl_qp_mutex);
70 	}
71 	++ibtl_qp_calls_curr;
72 	mutex_exit(&ibtl_qp_mutex);
73 }
74 
75 void
76 ibtl_qp_flow_control_exit(void)
77 {
78 	mutex_enter(&ibtl_qp_mutex);
79 	cv_signal(&ibtl_qp_cv);
80 	--ibtl_qp_calls_curr;
81 	mutex_exit(&ibtl_qp_mutex);
82 }
83 
84 /*
85  * Function:
86  *	ibt_alloc_qp
87  * Input:
88  *	hca_hdl		HCA Handle.
89  *	type		Specifies the type of QP to alloc in ibt_alloc_qp()
90  *	qp_attrp	Specifies the ibt_qp_alloc_attr_t that are needed to
91  *			allocate a QP and transition it to the RTS state for
92  *			UDs and INIT state for all other QPs.
93  * Output:
94  *	queue_sizes_p	Returned sizes for SQ, RQ, SQ WR SGL elements & RQ
95  *			WR SGL elements.
96  *	qpn_p		Returned QP Number of the allocated QP.
97  *	ibt_qp_p	The ibt_qp_hdl_t of the allocated QP.
98  * Returns:
99  *	IBT_SUCCESS
100  * Description:
101  *	Allocate a QP with specified attributes.
102  */
103 ibt_status_t
104 ibt_alloc_qp(ibt_hca_hdl_t hca_hdl, ibt_qp_type_t type,
105     ibt_qp_alloc_attr_t *qp_attrp, ibt_chan_sizes_t *queue_sizes_p,
106     ib_qpn_t *qpn_p, ibt_qp_hdl_t *ibt_qp_p)
107 {
108 	ibt_status_t		retval;
109 	ibtl_channel_t		*chanp;
110 	ibt_tran_srv_t		qp_type;
111 
112 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_alloc_qp(%p, %d, %p, %p, %p, %p) ",
113 	    hca_hdl, type, qp_attrp, queue_sizes_p, qpn_p, ibt_qp_p);
114 
115 	switch (type) {
116 	case IBT_UD_RQP:
117 		qp_type = IBT_UD_SRV;
118 		break;
119 	case IBT_RC_RQP:
120 		qp_type = IBT_RC_SRV;
121 		break;
122 	case IBT_UC_RQP:
123 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Unreliable Connected "
124 		    "Transport Type is not supported.");
125 		*ibt_qp_p = NULL;
126 		return (IBT_NOT_SUPPORTED);
127 	case IBT_RD_RQP:
128 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Reliable Datagram "
129 		    "Transport Type is not supported.");
130 		*ibt_qp_p = NULL;
131 		return (IBT_NOT_SUPPORTED);
132 	default:
133 		/* shouldn't happen ILLEGAL Type */
134 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Illegal Transport Type "
135 		    "%d", type);
136 		*ibt_qp_p = NULL;
137 		return (IBT_QP_SRV_TYPE_INVALID);
138 	}
139 
140 	/* Get CI CQ handles */
141 	qp_attrp->qp_ibc_scq_hdl = (qp_attrp->qp_scq_hdl == NULL) ? NULL :
142 	    qp_attrp->qp_scq_hdl->cq_ibc_cq_hdl;
143 	qp_attrp->qp_ibc_rcq_hdl = (qp_attrp->qp_rcq_hdl == NULL) ? NULL :
144 	    qp_attrp->qp_rcq_hdl->cq_ibc_cq_hdl;
145 
146 	/* Get CI SRQ handle */
147 	if ((qp_attrp->qp_alloc_flags & IBT_QP_USES_SRQ) &&
148 	    (qp_attrp->qp_srq_hdl != NULL))
149 		qp_attrp->qp_ibc_srq_hdl =
150 		    qp_attrp->qp_srq_hdl->srq_ibc_srq_hdl;
151 	else
152 		qp_attrp->qp_ibc_srq_hdl = NULL;
153 
154 	/* Allocate Channel structure */
155 	chanp = kmem_zalloc(sizeof (*chanp), KM_SLEEP);
156 
157 	ibtl_qp_flow_control_enter();
158 	retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_qp)(
159 	    IBTL_HCA2CIHCA(hca_hdl), &chanp->ch_qp, type, qp_attrp,
160 	    queue_sizes_p, qpn_p, &chanp->ch_qp.qp_ibc_qp_hdl);
161 	ibtl_qp_flow_control_exit();
162 	if (retval != IBT_SUCCESS) {
163 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: "
164 		    "Failed to allocate QP: %d", retval);
165 		kmem_free(chanp, sizeof (*chanp));
166 		*ibt_qp_p = NULL;
167 		return (retval);
168 	}
169 
170 	/* Initialize the internal QP struct. */
171 	chanp->ch_qp.qp_type = qp_type;
172 	chanp->ch_qp.qp_hca = hca_hdl;
173 	chanp->ch_qp.qp_send_cq = qp_attrp->qp_scq_hdl;
174 	chanp->ch_qp.qp_recv_cq = qp_attrp->qp_rcq_hdl;
175 	chanp->ch_current_state = IBT_STATE_RESET;
176 	/*
177 	 * The IBTA spec does not include the signal type or PD on a QP
178 	 * query operation. In order to implement the "CLONE" feature
179 	 * we need to cache these values.  Mostly used by TI client.
180 	 */
181 	chanp->ch_qp.qp_flags = qp_attrp->qp_flags;
182 	chanp->ch_qp.qp_pd_hdl = qp_attrp->qp_pd_hdl;
183 	mutex_init(&chanp->ch_cm_mutex, NULL, MUTEX_DEFAULT, NULL);
184 	cv_init(&chanp->ch_cm_cv, NULL, CV_DEFAULT, NULL);
185 
186 	atomic_inc_32(&hca_hdl->ha_qp_cnt);
187 
188 	IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: SUCCESS: qp %p owned by '%s'",
189 	    chanp, hca_hdl->ha_clnt_devp->clnt_name);
190 
191 	*ibt_qp_p = chanp;
192 
193 	return (retval);
194 }
195 
196 
197 /*
198  * Function:
199  *	ibt_initialize_qp
200  * Input:
201  *	ibt_qp		The previously allocated IBT QP Handle.
202  *	modify_attrp	Specifies the QP Modify attributes that to transition
203  *			the QP to the RTS state for UDs (including special QPs)
204  *			and INIT state for all other QPs.
205  * Output:
206  *	none.
207  * Returns:
208  *	IBT_SUCCESS
209  * Description:
210  *	Transition the QP to the RTS state for UDs (including special QPs)
211  *	and INIT state for all other QPs.
212  */
213 ibt_status_t
214 ibt_initialize_qp(ibt_qp_hdl_t ibt_qp, ibt_qp_info_t *modify_attrp)
215 {
216 	ibt_status_t		status;
217 	ibt_cep_state_t		state;
218 	ibc_hca_hdl_t		ibc_hca_hdl = IBTL_CHAN2CIHCA(ibt_qp);
219 	ibc_qp_hdl_t		ibc_qp_hdl = IBTL_CHAN2CIQP(ibt_qp);
220 	ibc_operations_t	*hca_ops_p = IBTL_CHAN2CIHCAOPS_P(ibt_qp);
221 	ibt_cep_modify_flags_t	modify_flags;
222 
223 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_initialize_qp(%p, %p)",
224 	    ibt_qp, modify_attrp);
225 
226 	/*
227 	 * Validate the QP Type from the channel with QP Type from the
228 	 * modify attribute struct.
229 	 */
230 	if (ibt_qp->ch_qp.qp_type != modify_attrp->qp_trans) {
231 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: "
232 		    "QP Type mismatch: Chan QP Type<%d>, Modify QP Type<%d>",
233 		    ibt_qp->ch_qp.qp_type, modify_attrp->qp_trans);
234 		return (IBT_QP_SRV_TYPE_INVALID);
235 	}
236 	if (ibt_qp->ch_current_state != IBT_STATE_RESET) {
237 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: "
238 		    "QP needs to be in RESET state: Chan QP State<%d>",
239 		    ibt_qp->ch_current_state);
240 		return (IBT_CHAN_STATE_INVALID);
241 	}
242 
243 	/*
244 	 * Initialize the QP to the RTS state for UDs
245 	 * and INIT state for all other QPs.
246 	 */
247 	switch (modify_attrp->qp_trans) {
248 	case IBT_UD_SRV:
249 
250 		/*
251 		 * Bring the QP to the RTS state.
252 		 */
253 		state = IBT_STATE_RESET;
254 		ibtl_qp_flow_control_enter();
255 		do {
256 			modify_attrp->qp_current_state = state;
257 			modify_flags = ibt_cep_next_state[state].modify_flags;
258 			modify_attrp->qp_state = state =
259 			    ibt_cep_next_state[state].next_state;
260 
261 			IBTF_DPRINTF_L3(ibtf_qp, "ibt_initialize_qp: "
262 			    "modifying qp state to 0x%x", state);
263 			status = (hca_ops_p->ibc_modify_qp)(ibc_hca_hdl,
264 			    ibc_qp_hdl, modify_flags, modify_attrp, NULL);
265 		} while ((state != IBT_STATE_RTS) && (status == IBT_SUCCESS));
266 		ibtl_qp_flow_control_exit();
267 
268 		if (status == IBT_SUCCESS) {
269 			ibt_qp->ch_current_state = state;
270 			ibt_qp->ch_transport.ud.ud_port_num =
271 			    modify_attrp->qp_transport.ud.ud_port;
272 			ibt_qp->ch_transport.ud.ud_qkey =
273 			    modify_attrp->qp_transport.ud.ud_qkey;
274 		}
275 		break;
276 	case IBT_UC_SRV:
277 	case IBT_RD_SRV:
278 	case IBT_RC_SRV:
279 
280 		/*
281 		 * Bring the QP to the INIT state.
282 		 */
283 		modify_attrp->qp_state = IBT_STATE_INIT;
284 
285 		ibtl_qp_flow_control_enter();
286 		status = (hca_ops_p->ibc_modify_qp)(ibc_hca_hdl, ibc_qp_hdl,
287 		    IBT_CEP_SET_RESET_INIT, modify_attrp, NULL);
288 		ibtl_qp_flow_control_exit();
289 		if (status == IBT_SUCCESS)
290 			ibt_qp->ch_current_state = IBT_STATE_INIT;
291 		break;
292 	default:
293 		/* shouldn't happen ILLEGAL Type */
294 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: Illegal Type %d",
295 		    modify_attrp->qp_trans);
296 		return (IBT_QP_SRV_TYPE_INVALID);
297 	} /* End switch */
298 
299 	return (status);
300 }
301 
302 
303 /*
304  * Function:
305  *	ibt_alloc_special_qp
306  * Input:
307  *	hca_hdl		HCA Handle.
308  *	type		Specifies the type of Special QP to be allocated.
309  *	qp_attrp	Specifies the ibt_qp_alloc_attr_t that are needed to
310  *			allocate a special QP.
311  * Output:
312  *	queue_sizes_p	Returned sizes for SQ, RQ, SQ WR SGL elements & RQ
313  *			WR SGL elements.
314  *	qpn_p		Returned qpn of the allocated QP.
315  *	ibt_qp_p	The ibt_qp_hdl_t of the allocated QP.
316  * Returns:
317  *	IBT_SUCCESS
318  * Description:
319  *	Allocate a special QP with specified attributes.
320  */
321 ibt_status_t
322 ibt_alloc_special_qp(ibt_hca_hdl_t hca_hdl, uint8_t port, ibt_sqp_type_t type,
323     ibt_qp_alloc_attr_t *qp_attrp, ibt_chan_sizes_t *queue_sizes_p,
324     ibt_qp_hdl_t *ibt_qp_p)
325 {
326 	ibt_qp_hdl_t	chanp;
327 	ibt_status_t	retval;
328 	ibt_tran_srv_t	sqp_type;
329 
330 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_alloc_special_qp(%p, %d, %x, %p, %p, %p)",
331 	    hca_hdl, port, type, qp_attrp, queue_sizes_p, ibt_qp_p);
332 
333 	switch (type) {
334 	case IBT_SMI_SQP:
335 	case IBT_GSI_SQP:
336 		sqp_type = IBT_UD_SRV;
337 		break;
338 
339 	case IBT_RAWIP_SQP:
340 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: Raw IP "
341 		    "Transport Type is not supported.");
342 		*ibt_qp_p = NULL;
343 		return (IBT_NOT_SUPPORTED);
344 
345 	case IBT_RAWETHER_SQP:
346 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: Raw Ethernet "
347 		    "Transport Type is not supported.");
348 		*ibt_qp_p = NULL;
349 		return (IBT_NOT_SUPPORTED);
350 
351 	default:
352 		/* Shouldn't happen */
353 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: "
354 		    "Illegal Type 0x%x", type);
355 		*ibt_qp_p = NULL;
356 		return (IBT_QP_SPECIAL_TYPE_INVALID);
357 	}
358 
359 	/* convert the CQ handles for the CI */
360 	qp_attrp->qp_ibc_scq_hdl = qp_attrp->qp_scq_hdl->cq_ibc_cq_hdl;
361 	qp_attrp->qp_ibc_rcq_hdl = qp_attrp->qp_rcq_hdl->cq_ibc_cq_hdl;
362 
363 	/* Allocate Channel structure */
364 	chanp = kmem_zalloc(sizeof (*chanp), KM_SLEEP);
365 
366 	ibtl_qp_flow_control_enter();
367 	retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_special_qp)(
368 	    IBTL_HCA2CIHCA(hca_hdl), port, &chanp->ch_qp, type, qp_attrp,
369 	    queue_sizes_p, &chanp->ch_qp.qp_ibc_qp_hdl);
370 	ibtl_qp_flow_control_exit();
371 	if (retval != IBT_SUCCESS) {
372 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: "
373 		    "Failed to allocate Special QP: %d", retval);
374 		kmem_free(chanp, sizeof (*chanp));
375 		*ibt_qp_p = NULL;
376 		return (retval);
377 	}
378 
379 	/* Initialize the internal QP struct. */
380 	chanp->ch_qp.qp_type = sqp_type;
381 	chanp->ch_qp.qp_hca = hca_hdl;
382 	chanp->ch_qp.qp_send_cq = qp_attrp->qp_scq_hdl;
383 	chanp->ch_qp.qp_recv_cq = qp_attrp->qp_rcq_hdl;
384 	chanp->ch_current_state = IBT_STATE_RESET;
385 	mutex_init(&chanp->ch_cm_mutex, NULL, MUTEX_DEFAULT, NULL);
386 	cv_init(&chanp->ch_cm_cv, NULL, CV_DEFAULT, NULL);
387 
388 	/* Updating these variable, so that debugger shows correct values. */
389 	chanp->ch_qp.qp_flags = qp_attrp->qp_flags;
390 	chanp->ch_qp.qp_pd_hdl = qp_attrp->qp_pd_hdl;
391 
392 	atomic_inc_32(&hca_hdl->ha_qp_cnt);
393 
394 	*ibt_qp_p = chanp;
395 
396 	return (retval);
397 }
398 
399 
400 /*
401  * Function:
402  *	ibt_flush_qp
403  * Input:
404  *	ibtl_qp		Handle for QP that needs to be flushed.
405  * Output:
406  *	none.
407  * Returns:
408  *	IBT_SUCCESS
409  *	IBT_QP_HDL_INVALID
410  * Description:
411  *	Put the QP into error state to flush out work requests.
412  */
413 ibt_status_t
414 ibt_flush_qp(ibt_qp_hdl_t ibt_qp)
415 {
416 	ibt_qp_info_t		modify_attr;
417 	ibt_status_t		retval;
418 
419 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_flush_qp(%p)", ibt_qp);
420 
421 	if (ibt_qp->ch_qp.qp_type == IBT_RC_SRV) {
422 		mutex_enter(&ibtl_free_qp_mutex);
423 		if ((ibt_qp->ch_transport.rc.rc_free_flags &
424 		    (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING)) ==
425 		    IBTL_RC_QP_CONNECTED) {
426 			mutex_exit(&ibtl_free_qp_mutex);
427 			IBTF_DPRINTF_L2(ibtf_qp, "ibt_flush_qp(%p): "
428 			    "called with a connected RC QP", ibt_qp);
429 			return (IBT_CHAN_STATE_INVALID);
430 		}
431 		mutex_exit(&ibtl_free_qp_mutex);
432 	}
433 
434 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
435 
436 	/*
437 	 * Set the QP state to error to flush any uncompleted WRs.
438 	 */
439 	modify_attr.qp_state = IBT_STATE_ERROR;
440 	modify_attr.qp_trans = ibt_qp->ch_qp.qp_type;
441 
442 	retval = ibt_modify_qp(ibt_qp, IBT_CEP_SET_STATE, &modify_attr, NULL);
443 
444 	if (retval != IBT_SUCCESS) {
445 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_flush_qp: "
446 		    "failed on chan %p: %d", ibt_qp, retval);
447 	}
448 	return (retval);
449 }
450 
451 
452 /*
453  * ibtl_cm_chan_is_opening()
454  *
455  *	Inform IBTL that the connection established process is in progress
456  *	on this channel so that care need to be taken while free'ing when
457  *	open is NOT yet complete.
458  *
459  *	chan	Channel Handle
460  */
461 void
462 ibtl_cm_chan_is_opening(ibt_channel_hdl_t chan)
463 {
464 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_opening(%p)", chan);
465 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
466 	mutex_enter(&ibtl_free_qp_mutex);
467 	ASSERT(chan->ch_transport.rc.rc_free_flags == 0);
468 	chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CONNECTING;
469 	mutex_exit(&ibtl_free_qp_mutex);
470 }
471 
472 /*
473  * ibtl_cm_chan_open_is_aborted()
474  *
475  *	Inform IBTL that the connection established on this channel has
476  *	aborted. So undo what was done in ibtl_cm_chan_is_opening().
477  *
478  *	chan	Channel Handle
479  */
480 void
481 ibtl_cm_chan_open_is_aborted(ibt_channel_hdl_t chan)
482 {
483 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_open_is_aborted(%p)", chan);
484 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
485 	mutex_enter(&ibtl_free_qp_mutex);
486 	chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CONNECTING;
487 	mutex_exit(&ibtl_free_qp_mutex);
488 }
489 
490 /*
491  * ibtl_cm_chan_is_open()
492  *
493  *	Inform IBTL that the connection has been established on this
494  *	channel so that a later call to ibtl_cm_chan_is_closed()
495  *	will be required to free the QPN used by this channel.
496  *
497  *	chan	Channel Handle
498  */
499 void
500 ibtl_cm_chan_is_open(ibt_channel_hdl_t chan)
501 {
502 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_open(%p)", chan);
503 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
504 	mutex_enter(&ibtl_free_qp_mutex);
505 	chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CONNECTING;
506 	chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CONNECTED;
507 	mutex_exit(&ibtl_free_qp_mutex);
508 }
509 
510 /*
511  * ibtl_cm_is_chan_closing()
512  *
513  *	Returns 1, if the connection that has been
514  *	started for this channel has moved to TIMEWAIT
515  *	If not, returns 0
516  *
517  *	chan	Channel Handle
518  */
519 int
520 ibtl_cm_is_chan_closing(ibt_channel_hdl_t chan)
521 {
522 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_is_chan_closing(%p)", chan);
523 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
524 	mutex_enter(&ibtl_free_qp_mutex);
525 	if (chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CLOSING) {
526 		mutex_exit(&ibtl_free_qp_mutex);
527 		return (1);
528 	}
529 	mutex_exit(&ibtl_free_qp_mutex);
530 	return (0);
531 }
532 
533 /*
534  * ibtl_cm_is_chan_closed()
535  *
536  *	Returns 1, if the connection that has been
537  *	started for this channel has completed TIMEWAIT
538  *	If not, returns 0
539  *
540  *	chan	Channel Handle
541  */
542 int
543 ibtl_cm_is_chan_closed(ibt_channel_hdl_t chan)
544 {
545 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_is_chan_closed(%p)", chan);
546 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
547 	mutex_enter(&ibtl_free_qp_mutex);
548 	if (chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CLOSED) {
549 		mutex_exit(&ibtl_free_qp_mutex);
550 		return (1);
551 	}
552 	mutex_exit(&ibtl_free_qp_mutex);
553 	return (0);
554 }
555 /*
556  * ibtl_cm_chan_is_closing()
557  *
558  *	Inform IBTL that the TIMEWAIT delay for the connection has been
559  *	started for this channel so that the QP can be freed.
560  *
561  *	chan	Channel Handle
562  */
563 void
564 ibtl_cm_chan_is_closing(ibt_channel_hdl_t chan)
565 {
566 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_closing(%p)", chan);
567 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
568 	mutex_enter(&ibtl_free_qp_mutex);
569 	ASSERT(chan->ch_transport.rc.rc_free_flags == IBTL_RC_QP_CONNECTED);
570 	chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CLOSING;
571 	mutex_exit(&ibtl_free_qp_mutex);
572 }
573 /*
574  * ibtl_cm_chan_is_closed()
575  *
576  *	Inform IBTL that the TIMEWAIT delay for the connection has been
577  *	reached for this channel so that the QPN can be reused.
578  *
579  *	chan	Channel Handle
580  */
581 void
582 ibtl_cm_chan_is_closed(ibt_channel_hdl_t chan)
583 {
584 	ibt_status_t status;
585 	ibtl_hca_t *ibtl_hca = chan->ch_qp.qp_hca;
586 
587 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_closed(%p)", chan);
588 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
589 	mutex_enter(&ibtl_free_qp_mutex);
590 	ASSERT((chan->ch_transport.rc.rc_free_flags &
591 	    (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING)) ==
592 	    (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING));
593 
594 	chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CONNECTED;
595 	chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CLOSING;
596 	chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CLOSED;
597 
598 	ibtl_cm_set_chan_private(chan, NULL);
599 
600 	if ((chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_FREED) == 0) {
601 		mutex_exit(&ibtl_free_qp_mutex);
602 		return;
603 	}
604 	mutex_exit(&ibtl_free_qp_mutex);
605 	ibtl_qp_flow_control_enter();
606 	if ((status = (IBTL_CHAN2CIHCAOPS_P(chan)->ibc_release_qpn)
607 	    (IBTL_CHAN2CIHCA(chan), chan->ch_transport.rc.rc_qpn_hdl)) ==
608 	    IBT_SUCCESS) {
609 		/* effectively, this is kmem_free(chan); */
610 		ibtl_free_qp_async_check(&chan->ch_qp);
611 
612 		/* decrement ha_qpn_cnt and check for close in progress */
613 		ibtl_close_hca_check(ibtl_hca);
614 	} else
615 		IBTF_DPRINTF_L2(ibtf_qp, "ibtl_cm_chan_is_closed: "
616 		    "ibc_release_qpn failed: status = %d\n", status);
617 	ibtl_qp_flow_control_exit();
618 }
619 
620 /*
621  * ibtl_cm_chan_is_reused()
622  *
623  *	Inform IBTL that the channel is going to be re-used
624  *	chan	Channel Handle
625  */
626 void
627 ibtl_cm_chan_is_reused(ibt_channel_hdl_t chan)
628 {
629 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_reused(%p)", chan);
630 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
631 	mutex_enter(&ibtl_free_qp_mutex);
632 	ASSERT(((chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CONNECTED) !=
633 	    IBTL_RC_QP_CONNECTED));
634 
635 	/* channel is no longer in closed state, shall be re-used */
636 	chan->ch_transport.rc.rc_free_flags = 0;
637 
638 	mutex_exit(&ibtl_free_qp_mutex);
639 
640 }
641 
642 /*
643  * Function:	ibt_free_qp()
644  *
645  * Input:	ibt_qp		Handle for Channel(QP) that needs to be freed.
646  *
647  * Output:	NONE.
648  *
649  * Returns:	IBT_SUCCESS
650  *		IBT_QP_STATE_INVALID
651  *		IBT_QP_HDL_INVALID
652  *
653  * Description:
654  *		Free a previously allocated QP.
655  */
656 ibt_status_t
657 ibt_free_qp(ibt_qp_hdl_t ibt_qp)
658 {
659 	ibt_status_t		status;
660 	ibtl_hca_t		*ibtl_hca = ibt_qp->ch_qp.qp_hca;
661 
662 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p)", ibt_qp);
663 
664 	if (ibt_qp->ch_qp.qp_type == IBT_RC_SRV) {
665 		ibtl_qp_flow_control_enter();
666 		mutex_enter(&ibtl_free_qp_mutex);
667 		if (ibt_qp->ch_transport.rc.rc_free_flags &
668 		    IBTL_RC_QP_CONNECTING) {
669 			IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: ERROR - "
670 			    "Channel establishment is still in PROGRESS.");
671 			mutex_exit(&ibtl_free_qp_mutex);
672 			ibtl_qp_flow_control_exit();
673 			return (IBT_CHAN_STATE_INVALID);
674 		}
675 		if (ibt_qp->ch_transport.rc.rc_free_flags &
676 		    IBTL_RC_QP_CONNECTED) {
677 			if ((ibt_qp->ch_transport.rc.rc_free_flags &
678 			    IBTL_RC_QP_CLOSING) == 0) {
679 				IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: ERROR - "
680 				    "need to call ibt_close_rc_channel");
681 				mutex_exit(&ibtl_free_qp_mutex);
682 				ibtl_qp_flow_control_exit();
683 				return (IBT_CHAN_STATE_INVALID);
684 			}
685 			ibt_qp->ch_transport.rc.rc_free_flags |=
686 			    IBTL_RC_QP_FREED;
687 			status = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_free_qp)
688 			    (IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp),
689 			    IBC_FREE_QP_ONLY,
690 			    &ibt_qp->ch_transport.rc.rc_qpn_hdl);
691 			mutex_exit(&ibtl_free_qp_mutex);
692 			ibtl_qp_flow_control_exit();
693 
694 			if (status == IBT_SUCCESS) {
695 				mutex_enter(&ibtl_clnt_list_mutex);
696 				ibtl_hca->ha_qpn_cnt++;
697 				mutex_exit(&ibtl_clnt_list_mutex);
698 				atomic_dec_32(&ibtl_hca->ha_qp_cnt);
699 				IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p) - "
700 				    "SUCCESS", ibt_qp);
701 			} else
702 				IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: "
703 				    "ibc_free_qp failed: status = %d", status);
704 			return (status);
705 		}
706 		mutex_exit(&ibtl_free_qp_mutex);
707 	} else
708 		ibtl_qp_flow_control_enter();
709 
710 	status = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_free_qp)
711 	    (IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp),
712 	    IBC_FREE_QP_AND_QPN, NULL);
713 	ibtl_qp_flow_control_exit();
714 
715 	if (status == IBT_SUCCESS) {
716 		/* effectively, this is kmem_free(ibt_qp); */
717 		ibtl_free_qp_async_check(&ibt_qp->ch_qp);
718 
719 		atomic_dec_32(&ibtl_hca->ha_qp_cnt);
720 		IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p) - SUCCESS", ibt_qp);
721 	} else {
722 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: "
723 		    "ibc_free_qp failed with error %d", status);
724 	}
725 
726 	return (status);
727 }
728 
729 
730 /* helper function for ibt_query_qp */
731 static void
732 ibtl_fillin_sgid(ibt_cep_path_t *pathp, ibtl_hca_devinfo_t *hca_devp)
733 {
734 	uint8_t port;
735 	uint32_t sgid_ix;
736 	ib_gid_t *sgidp;
737 
738 	port = pathp->cep_hca_port_num;
739 	sgid_ix = pathp->cep_adds_vect.av_sgid_ix;
740 	if (port == 0 || port > hca_devp->hd_hca_attr->hca_nports ||
741 	    sgid_ix >= IBTL_HDIP2SGIDTBLSZ(hca_devp)) {
742 		pathp->cep_adds_vect.av_sgid.gid_prefix = 0;
743 		pathp->cep_adds_vect.av_sgid.gid_guid = 0;
744 	} else {
745 		mutex_enter(&ibtl_clnt_list_mutex);
746 		sgidp = hca_devp->hd_portinfop[port-1].p_sgid_tbl;
747 		pathp->cep_adds_vect.av_sgid = sgidp[sgid_ix];
748 		mutex_exit(&ibtl_clnt_list_mutex);
749 	}
750 }
751 
752 
753 /*
754  * Function:	ibt_query_qp
755  *
756  * Input:	ibt_qp 			- The IBT QP Handle.
757  *
758  * Output:	ibt_qp_query_attrp 	- Points to a ibt_qp_query_attr_t
759  *					  that on return contains all the
760  *					  attributes of the specified qp.
761  *
762  * Returns:	IBT_SUCCESS
763  *		IBT_QP_HDL_INVALID
764  *
765  * Description:
766  *		Query QP attributes
767  *
768  */
769 ibt_status_t
770 ibt_query_qp(ibt_qp_hdl_t ibt_qp, ibt_qp_query_attr_t *qp_query_attrp)
771 {
772 	ibt_status_t		retval;
773 	ibtl_hca_devinfo_t	*hca_devp;
774 	ibt_qp_info_t		*qp_infop;
775 
776 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_query_qp(%p, %p)",
777 	    ibt_qp, qp_query_attrp);
778 
779 	ibtl_qp_flow_control_enter();
780 	retval = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_query_qp(
781 	    IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), qp_query_attrp));
782 	ibtl_qp_flow_control_exit();
783 	if (retval == IBT_SUCCESS) {
784 		ibt_qp->ch_current_state = qp_query_attrp->qp_info.qp_state;
785 
786 		/* need to fill in sgid from port and sgid_ix for RC and UC */
787 		hca_devp = ibt_qp->ch_qp.qp_hca->ha_hca_devp;
788 		qp_infop = &qp_query_attrp->qp_info;
789 
790 		switch (qp_infop->qp_trans) {
791 		case IBT_RC_SRV:
792 			ibtl_fillin_sgid(&qp_infop->qp_transport.rc.rc_path,
793 			    hca_devp);
794 			ibtl_fillin_sgid(&qp_infop->qp_transport.rc.rc_alt_path,
795 			    hca_devp);
796 			break;
797 		case IBT_UC_SRV:
798 			ibtl_fillin_sgid(&qp_infop->qp_transport.uc.uc_path,
799 			    hca_devp);
800 			ibtl_fillin_sgid(&qp_infop->qp_transport.uc.uc_alt_path,
801 			    hca_devp);
802 			break;
803 		}
804 	} else {
805 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_query_qp: "
806 		    "failed on chan %p: %d", ibt_qp, retval);
807 	}
808 
809 	return (retval);
810 }
811 
812 
813 /*
814  * Function:
815  *	ibt_modify_qp
816  * Input:
817  *	ibt_qp		The IBT QP Handle.
818  *	flags		Specifies which attributes in ibt_qp_mod_attr_t
819  *			are to be modified.
820  *	qp_attrp	Points to an ibt_qp_mod_attr_t struct that contains all
821  *			the attributes of the specified QP that a client is
822  *			allowed to modify after a QP has been allocated
823  * Output:
824  *	actual_sz	Returned actual queue sizes.
825  * Returns:
826  *	IBT_SUCCESS
827  * Description:
828  *	Modify the attributes of an existing QP.
829  */
830 ibt_status_t
831 ibt_modify_qp(ibt_qp_hdl_t ibt_qp, ibt_cep_modify_flags_t flags,
832     ibt_qp_info_t *modify_attrp, ibt_queue_sizes_t *actual_sz)
833 {
834 	ibt_status_t		retval;
835 
836 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_modify_qp(%p, %d, %p, %p)",
837 	    ibt_qp, flags, modify_attrp, actual_sz);
838 
839 	ibtl_qp_flow_control_enter();
840 	retval = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_modify_qp)(
841 	    IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), flags,
842 	    modify_attrp, actual_sz);
843 	ibtl_qp_flow_control_exit();
844 	if (retval == IBT_SUCCESS) {
845 		ibt_qp->ch_current_state = modify_attrp->qp_state;
846 		if (ibt_qp->ch_qp.qp_type == IBT_UD_SRV) {
847 			if (flags & (IBT_CEP_SET_PORT | IBT_CEP_SET_RESET_INIT))
848 				ibt_qp->ch_transport.ud.ud_port_num =
849 				    modify_attrp->qp_transport.ud.ud_port;
850 			if (flags & (IBT_CEP_SET_QKEY | IBT_CEP_SET_RESET_INIT))
851 				ibt_qp->ch_transport.ud.ud_qkey =
852 				    modify_attrp->qp_transport.ud.ud_qkey;
853 		}
854 	} else {
855 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_qp: failed on chan %p: %d",
856 		    ibt_qp, retval);
857 
858 		if (retval == IBT_CHAN_STATE_INVALID) {
859 			/* That means our cache had invalid QP state value. */
860 			ibt_qp_query_attr_t	qp_attr;
861 
862 			/* Query the channel (QP) */
863 			if (ibt_query_qp(ibt_qp, &qp_attr) == IBT_SUCCESS)
864 				ibt_qp->ch_current_state =
865 				    qp_attr.qp_info.qp_state;
866 		}
867 	}
868 	return (retval);
869 }
870 
871 
872 /*
873  * Function:
874  *	ibt_migrate_path
875  * Input:
876  *	rc_chan		A previously allocated RC channel handle.
877  * Output:
878  *	none.
879  * Returns:
880  *	IBT_SUCCESS on Success else appropriate error.
881  * Description:
882  *	Force the CI to use the alternate path. The alternate path becomes
883  *	the primary path. A new alternate path should be loaded and enabled.
884  *	Assumes that the given channel is in RTS/SQD state
885  */
886 ibt_status_t
887 ibt_migrate_path(ibt_channel_hdl_t rc_chan)
888 {
889 	ibt_status_t		retval;
890 	ibt_qp_info_t		qp_info;
891 	ibt_qp_query_attr_t	qp_attr;
892 	ibt_cep_modify_flags_t	cep_flags;
893 	int			retries = 1;
894 
895 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_migrate_path: channel %p", rc_chan);
896 
897 	if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) {
898 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path: "
899 		    "Invalid Channel type: Applicable only to RC Channel");
900 		return (IBT_CHAN_SRV_TYPE_INVALID);
901 	}
902 
903 	if (rc_chan->ch_current_state != IBT_STATE_RTS &&
904 	    rc_chan->ch_current_state != IBT_STATE_SQD) {
905 		if (ibt_query_qp(rc_chan, &qp_attr) == IBT_SUCCESS) {
906 			/* ch_current_state is fixed by ibt_query_qp */
907 			if (rc_chan->ch_current_state != IBT_STATE_RTS &&
908 			    rc_chan->ch_current_state != IBT_STATE_SQD)
909 				return (IBT_CHAN_STATE_INVALID);
910 			retries = 0;
911 		} else /* query_qp should never really fail */
912 			return (IBT_CHAN_STATE_INVALID);
913 	}
914 
915 retry:
916 	/* Call modify_qp */
917 	cep_flags = IBT_CEP_SET_MIG | IBT_CEP_SET_STATE;
918 	qp_info.qp_state = rc_chan->ch_current_state;
919 	qp_info.qp_current_state = rc_chan->ch_current_state;
920 	qp_info.qp_trans = IBT_RC_SRV;
921 	qp_info.qp_transport.rc.rc_mig_state = IBT_STATE_MIGRATED;
922 	retval = ibt_modify_qp(rc_chan, cep_flags, &qp_info, NULL);
923 
924 	if (retval != IBT_SUCCESS) {
925 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path:"
926 		    " ibt_modify_qp() returned = %d", retval);
927 		if (rc_chan->ch_current_state != qp_info.qp_state &&
928 		    --retries >= 0) {
929 			/*
930 			 * That means our cached 'state' was invalid.
931 			 * We know ibt_modify_qp() fixed it up, so it
932 			 * might be worth retrying.
933 			 */
934 			if (rc_chan->ch_current_state != IBT_STATE_RTS &&
935 			    rc_chan->ch_current_state != IBT_STATE_SQD)
936 				return (IBT_CHAN_STATE_INVALID);
937 			IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path:"
938 			    " retrying after 'state' fixed");
939 			goto retry;
940 		}
941 	}
942 	return (retval);
943 }
944 
945 
946 /*
947  * Function:
948  *	ibt_set_qp_private
949  * Input:
950  *	ibt_qp		The ibt_qp_hdl_t of the allocated QP.
951  *	clnt_private	The client private data.
952  * Output:
953  *	none.
954  * Returns:
955  *	none.
956  * Description:
957  *	Set the client private data.
958  */
959 void
960 ibt_set_qp_private(ibt_qp_hdl_t ibt_qp, void *clnt_private)
961 {
962 	ibt_qp->ch_clnt_private = clnt_private;
963 }
964 
965 
966 /*
967  * Function:
968  *	ibt_get_qp_private
969  * Input:
970  *	ibt_qp		The ibt_qp_hdl_t of the allocated QP.
971  * Output:
972  *	none.
973  * Returns:
974  *	The client private data.
975  * Description:
976  *	Get the client private data.
977  */
978 void *
979 ibt_get_qp_private(ibt_qp_hdl_t ibt_qp)
980 {
981 	return (ibt_qp->ch_clnt_private);
982 }
983 
984 
985 /*
986  * Function:
987  *	ibt_qp_to_hca_guid
988  * Input:
989  *	ibt_qp		The ibt_qp_hdl_t of the allocated QP.
990  * Output:
991  *	none.
992  * Returns:
993  *	hca_guid	Returned HCA GUID on which the specified QP is
994  *			allocated. Valid if it is non-NULL on return.
995  * Description:
996  *	A helper function to retrieve HCA GUID for the specified QP.
997  */
998 ib_guid_t
999 ibt_qp_to_hca_guid(ibt_qp_hdl_t ibt_qp)
1000 {
1001 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_qp_to_hca_guid(%p)", ibt_qp);
1002 
1003 	return (IBTL_HCA2HCAGUID(IBTL_CHAN2HCA(ibt_qp)));
1004 }
1005 
1006 
1007 /*
1008  * Function:
1009  *	ibt_recover_ud_qp
1010  * Input:
1011  *	ibt_qp		An QP Handle which is in SQError state.
1012  * Output:
1013  *	none.
1014  * Returns:
1015  *	IBT_SUCCESS
1016  *	IBT_QP_SRV_TYPE_INVALID
1017  *	IBT_QP_STATE_INVALID.
1018  * Description:
1019  *	Recover an UD QP which has transitioned to SQ Error state. The
1020  *	ibt_recover_ud_qp() transitions the QP from SQ Error state to
1021  *	Ready-To-Send QP state.
1022  *
1023  *	If a work request posted to a UD QP's send queue completes with an
1024  *	error (see ibt_wc_status_t), the QP gets transitioned to SQ Error state.
1025  *	In order to reuse this QP, ibt_recover_ud_qp() can be used to recover
1026  *	the QP to a usable (Ready-to-Send) state.
1027  */
1028 ibt_status_t
1029 ibt_recover_ud_qp(ibt_qp_hdl_t ibt_qp)
1030 {
1031 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_recover_ud_qp(%p)", ibt_qp);
1032 
1033 	return (ibt_recover_ud_channel(IBTL_QP2CHAN(ibt_qp)));
1034 }
1035 
1036 
1037 /*
1038  * Function:
1039  *	ibt_recycle_ud
1040  * Input:
1041  *	ud_chan		The IBT UD QP Handle.
1042  *	various attributes
1043  *
1044  * Output:
1045  *	none
1046  * Returns:
1047  *	IBT_SUCCESS
1048  *	IBT_CHAN_SRV_TYPE_INVALID
1049  *	IBT_CHAN_STATE_INVALID
1050  *
1051  * Description:
1052  *	Revert the UD QP back to a usable state.
1053  */
1054 ibt_status_t
1055 ibt_recycle_ud(ibt_channel_hdl_t ud_chan, uint8_t hca_port_num,
1056     uint16_t pkey_ix, ib_qkey_t qkey)
1057 {
1058 	ibt_qp_query_attr_t	qp_attr;
1059 	ibt_status_t		retval;
1060 
1061 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_recycle_ud(%p, %d, %x, %x): ",
1062 	    ud_chan, hca_port_num, pkey_ix, qkey);
1063 
1064 	if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) {
1065 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1066 		    "chan %p is not a UD channel", ud_chan);
1067 		return (IBT_CHAN_SRV_TYPE_INVALID);
1068 	}
1069 
1070 	retval = ibt_query_qp(ud_chan, &qp_attr);
1071 	if (retval != IBT_SUCCESS) {
1072 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1073 		    "ibt_query_qp failed on chan %p: %d", ud_chan, retval);
1074 		return (retval);
1075 	}
1076 	if (qp_attr.qp_info.qp_state != IBT_STATE_ERROR) {
1077 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1078 		    "chan %p is in state %d (not in ERROR state)",
1079 		    ud_chan, qp_attr.qp_info.qp_state);
1080 		ud_chan->ch_current_state = qp_attr.qp_info.qp_state;
1081 		return (IBT_CHAN_STATE_INVALID);
1082 	}
1083 
1084 	/* transition the QP from ERROR to RESET */
1085 	qp_attr.qp_info.qp_state = IBT_STATE_RESET;
1086 	qp_attr.qp_info.qp_trans = ud_chan->ch_qp.qp_type;
1087 	retval = ibt_modify_qp(ud_chan, IBT_CEP_SET_STATE, &qp_attr.qp_info,
1088 	    NULL);
1089 	if (retval != IBT_SUCCESS) {
1090 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1091 		    "ibt_modify_qp(ERROR=>RESET) failed on chan %p: %d",
1092 		    ud_chan, retval);
1093 		return (retval);
1094 	}
1095 	ud_chan->ch_current_state = IBT_STATE_RESET;
1096 
1097 	/* transition the QP back to RTS */
1098 	qp_attr.qp_info.qp_transport.ud.ud_port = hca_port_num;
1099 	qp_attr.qp_info.qp_transport.ud.ud_qkey = qkey;
1100 	qp_attr.qp_info.qp_transport.ud.ud_pkey_ix = pkey_ix;
1101 	retval = ibt_initialize_qp(ud_chan, &qp_attr.qp_info);
1102 	if (retval != IBT_SUCCESS) {
1103 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1104 		    "ibt_initialize_qp failed on chan %p: %d", ud_chan, retval);
1105 		/* the man page says the QP should be left in ERROR state */
1106 		(void) ibt_flush_qp(ud_chan);
1107 	}
1108 	return (retval);
1109 }
1110 
1111 /*
1112  * Function:
1113  *	ibt_pause_sendq
1114  * Input:
1115  *	chan		The IBT QP Handle.
1116  *	modify_flags	IBT_CEP_SET_NOTHING or IBT_CEP_SET_SQD_EVENT
1117  *
1118  * Output:
1119  *	none.
1120  * Returns:
1121  *	IBT_SUCCESS
1122  *	IBT_CHAN_HDL_INVALID
1123  *	IBT_CHAN_STATE_INVALID
1124  *	IBT_INVALID_PARAM
1125  *
1126  * Description:
1127  *	Place the send queue of the specified channel into the send queue
1128  *	drained (SQD) state.
1129  *
1130  */
1131 ibt_status_t
1132 ibt_pause_sendq(ibt_channel_hdl_t chan, ibt_cep_modify_flags_t modify_flags)
1133 {
1134 	ibt_qp_info_t		modify_attr;
1135 	ibt_status_t		retval;
1136 
1137 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_pause_sendq(%p, %x)", chan, modify_flags);
1138 
1139 	modify_flags &= IBT_CEP_SET_SQD_EVENT;	/* ignore other bits */
1140 	modify_flags |= IBT_CEP_SET_STATE;
1141 
1142 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1143 	/*
1144 	 * Set the QP state to SQD.
1145 	 */
1146 	modify_attr.qp_state = IBT_STATE_SQD;
1147 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1148 
1149 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL);
1150 
1151 	if (retval != IBT_SUCCESS) {
1152 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_pause_sendq: "
1153 		    "failed on chan %p: %d", chan, retval);
1154 	}
1155 	return (retval);
1156 }
1157 
1158 
1159 /*
1160  * Function:
1161  *	ibt_unpause_sendq
1162  * Input:
1163  *	chan	The IBT Channel Handle.
1164  * Output:
1165  *	none.
1166  * Returns:
1167  *	IBT_SUCCESS
1168  *	IBT_CHAN_HDL_INVALID
1169  *	IBT_CHAN_STATE_INVALID
1170  * Description:
1171  *	Un-pauses the previously paused channel. This call will transition the
1172  *	QP from SQD to RTS state.
1173  */
1174 ibt_status_t
1175 ibt_unpause_sendq(ibt_channel_hdl_t chan)
1176 {
1177 	ibt_qp_info_t		modify_attr;
1178 	ibt_status_t		retval;
1179 
1180 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_unpause_sendq(%p)", chan);
1181 
1182 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1183 
1184 	/*
1185 	 * Set the QP state to RTS.
1186 	 */
1187 	modify_attr.qp_current_state = IBT_STATE_SQD;
1188 	modify_attr.qp_state = IBT_STATE_RTS;
1189 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1190 
1191 	retval = ibt_modify_qp(chan, IBT_CEP_SET_STATE, &modify_attr, NULL);
1192 	if (retval != IBT_SUCCESS) {
1193 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_unpause_sendq: "
1194 		    "failed on chan %p: %d", chan, retval);
1195 	}
1196 	return (retval);
1197 }
1198 
1199 
1200 /*
1201  * Function:
1202  *	ibt_resize_queues
1203  * Input:
1204  *	chan		A previously allocated channel handle.
1205  *	flags		QP Flags
1206  *				IBT_SEND_Q
1207  *				IBT_RECV_Q
1208  *	request_sz	Requested new sizes.
1209  * Output:
1210  *	actual_sz	Returned actual sizes.
1211  * Returns:
1212  *	IBT_SUCCESS
1213  * Description:
1214  *	Resize the SendQ/RecvQ sizes of a channel. Can only be called on
1215  *	a previously opened channel.
1216  */
1217 ibt_status_t
1218 ibt_resize_queues(ibt_channel_hdl_t chan, ibt_qflags_t flags,
1219     ibt_queue_sizes_t *request_sz, ibt_queue_sizes_t *actual_sz)
1220 {
1221 	ibt_cep_modify_flags_t	modify_flags = IBT_CEP_SET_STATE;
1222 	ibt_qp_info_t		modify_attr;
1223 	ibt_status_t		retval;
1224 
1225 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_resize_queues(%p, 0x%X, %p, %p)",
1226 	    chan, flags, request_sz, actual_sz);
1227 
1228 	if ((flags & (IBT_SEND_Q | IBT_RECV_Q)) == 0)  {
1229 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_resize_queues: "
1230 		    "Flags <0x%X> not set", flags);
1231 		return (IBT_INVALID_PARAM);
1232 	}
1233 
1234 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1235 
1236 	modify_attr.qp_current_state = chan->ch_current_state;
1237 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1238 	modify_attr.qp_state = chan->ch_current_state;
1239 
1240 	if (flags & IBT_SEND_Q) {
1241 		modify_attr.qp_sq_sz = request_sz->qs_sq;
1242 		modify_flags |= IBT_CEP_SET_SQ_SIZE;
1243 	}
1244 
1245 	if (flags & IBT_RECV_Q) {
1246 		modify_attr.qp_rq_sz = request_sz->qs_rq;
1247 		modify_flags |= IBT_CEP_SET_RQ_SIZE;
1248 	}
1249 
1250 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, actual_sz);
1251 	if (retval != IBT_SUCCESS) {
1252 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_resize_queues: "
1253 		    "failed on QP %p: %d", chan, retval);
1254 	}
1255 
1256 	return (retval);
1257 }
1258 
1259 
1260 /*
1261  * Function:
1262  *	ibt_query_queues
1263  * Input:
1264  *	chan		A previously allocated channel handle.
1265  * Output:
1266  *	actual_sz	Returned actual sizes.
1267  * Returns:
1268  *	IBT_SUCCESS
1269  * Description:
1270  *	Query the SendQ/RecvQ sizes of a channel.
1271  */
1272 ibt_status_t
1273 ibt_query_queues(ibt_channel_hdl_t chan, ibt_queue_sizes_t *actual_sz)
1274 {
1275 	ibt_status_t		retval;
1276 	ibt_qp_query_attr_t	qp_query_attr;
1277 
1278 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_query_queues(%p)", chan);
1279 
1280 	/* Perform Query QP and retrieve QP sizes. */
1281 	retval = ibt_query_qp(chan, &qp_query_attr);
1282 	if (retval != IBT_SUCCESS) {
1283 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_query_queues: "
1284 		    "ibt_query_qp failed: qp %p: %d", chan, retval);
1285 		return (retval);
1286 	}
1287 
1288 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(actual_sz->qs_rq,
1289 	    actual_sz->qs_sq))
1290 	actual_sz->qs_sq = qp_query_attr.qp_info.qp_sq_sz;
1291 	actual_sz->qs_rq = qp_query_attr.qp_info.qp_rq_sz;
1292 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(actual_sz->qs_rq,
1293 	    actual_sz->qs_sq))
1294 	chan->ch_current_state = qp_query_attr.qp_info.qp_state;
1295 
1296 	return (retval);
1297 }
1298 
1299 
1300 /*
1301  * Function:
1302  *	ibt_modify_rdma
1303  * Input:
1304  *	rc_chan		A previously allocated channel handle.
1305  *
1306  *	modify_flags	Bitwise "or" of any of the following:
1307  *			IBT_CEP_SET_RDMA_R	Enable/Disable RDMA RD
1308  *			IBT_CEP_SET_RDMA_W	Enable/Disable RDMA WR
1309  *			IBT_CEP_SET_ATOMIC	Enable/Disable Atomics
1310  *
1311  *	flags		Channel End Point (CEP) Disable Flags (0 => enable).
1312  *			IBT_CEP_NO_RDMA_RD	Disable incoming RDMA RD's
1313  *			IBT_CEP_NO_RDMA_WR	Disable incoming RDMA WR's
1314  *			IBT_CEP_NO_ATOMIC	Disable incoming Atomics.
1315  * Output:
1316  *	none.
1317  * Returns:
1318  *	IBT_SUCCESS
1319  *	IBT_QP_SRV_TYPE_INVALID
1320  *	IBT_CHAN_HDL_INVALID
1321  *	IBT_CHAN_ATOMICS_NOT_SUPPORTED
1322  *	IBT_CHAN_STATE_INVALID
1323  * Description:
1324  *	Enable/disable RDMA operations. To enable an operation clear the
1325  *	"disable" flag. Can call this function when the channel is in
1326  *	INIT, RTS or SQD states. If called in any other state
1327  *	IBT_CHAN_STATE_INVALID is returned. When the operation completes the
1328  *	channel state is left unchanged.
1329  */
1330 ibt_status_t
1331 ibt_modify_rdma(ibt_channel_hdl_t rc_chan,
1332     ibt_cep_modify_flags_t modify_flags, ibt_cep_flags_t flags)
1333 {
1334 	ibt_status_t		retval;
1335 	ibt_qp_info_t		modify_attr;
1336 
1337 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_modify_rdma(%p, 0x%x, 0x%x)",
1338 	    rc_chan, modify_flags, flags);
1339 
1340 	if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) {
1341 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: "
1342 		    "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1343 		    rc_chan->ch_qp.qp_type);
1344 		return (IBT_QP_SRV_TYPE_INVALID);
1345 	}
1346 
1347 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1348 
1349 	/*
1350 	 * Can only call this function when the channel in INIT, RTS or SQD
1351 	 * states.
1352 	 */
1353 	if ((rc_chan->ch_current_state != IBT_STATE_INIT) &&
1354 	    (rc_chan->ch_current_state != IBT_STATE_RTS) &&
1355 	    (rc_chan->ch_current_state != IBT_STATE_SQD)) {
1356 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: Invalid Channel "
1357 		    "state: 0x%X", rc_chan->ch_current_state);
1358 		return (IBT_CHAN_STATE_INVALID);
1359 	}
1360 
1361 	modify_attr.qp_state = modify_attr.qp_current_state =
1362 	    rc_chan->ch_current_state;
1363 	modify_attr.qp_trans = rc_chan->ch_qp.qp_type;
1364 	modify_attr.qp_flags = flags;
1365 
1366 	modify_flags &= (IBT_CEP_SET_RDMA_R | IBT_CEP_SET_RDMA_W |
1367 	    IBT_CEP_SET_ATOMIC);
1368 	modify_flags |= IBT_CEP_SET_STATE;
1369 
1370 	retval = ibt_modify_qp(rc_chan, modify_flags, &modify_attr, NULL);
1371 	if (retval != IBT_SUCCESS) {
1372 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: "
1373 		    "failed on chan %p: %d", rc_chan, retval);
1374 	}
1375 	return (retval);
1376 }
1377 
1378 
1379 /*
1380  * Function:
1381  *	ibt_set_rdma_resource
1382  * Input:
1383  *	chan		A previously allocated RC channel handle.
1384  *	modify_flags	Bitwise "or" of any of the following:
1385  *			IBT_CEP_SET_RDMARA_OUT	Initiator depth (rdma_ra_out)
1386  *			IBT_CEP_SET_RDMARA_IN	Responder Resources
1387  *						(rdma_ra_in)
1388  *	rdma_ra_out	Outgoing RDMA Reads/Atomics
1389  *	rdma_ra_in	Incoming RDMA Reads/Atomics
1390  * Output:
1391  *	none.
1392  * Returns:
1393  *	IBT_SUCCESS
1394  * Description:
1395  *	Change the number of resources to be used for incoming and outgoing
1396  *	RDMA reads & Atomics. Can only be called on a previously opened
1397  *	RC channel.  Can only be called on a paused channel, and this will
1398  *	un-pause that channel.
1399  */
1400 ibt_status_t
1401 ibt_set_rdma_resource(ibt_channel_hdl_t chan,
1402     ibt_cep_modify_flags_t modify_flags, uint8_t rdma_ra_out,
1403     uint8_t resp_rdma_ra_out)
1404 {
1405 	ibt_qp_info_t		modify_attr;
1406 	ibt_status_t		retval;
1407 
1408 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_set_rdma_resource(%p, 0x%x, %d, %d)",
1409 	    chan, modify_flags, rdma_ra_out, resp_rdma_ra_out);
1410 
1411 	if (chan->ch_qp.qp_type != IBT_RC_SRV) {
1412 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_set_rdma_resource: "
1413 		    "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1414 		    chan->ch_qp.qp_type);
1415 		return (IBT_CHAN_SRV_TYPE_INVALID);
1416 	}
1417 
1418 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1419 
1420 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1421 	modify_attr.qp_state = IBT_STATE_SQD;
1422 
1423 	modify_attr.qp_transport.rc.rc_rdma_ra_out = rdma_ra_out;
1424 	modify_attr.qp_transport.rc.rc_rdma_ra_in = resp_rdma_ra_out;
1425 	modify_flags &= (IBT_CEP_SET_RDMARA_OUT | IBT_CEP_SET_RDMARA_IN);
1426 	modify_flags |= IBT_CEP_SET_STATE;
1427 
1428 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL);
1429 	if (retval != IBT_SUCCESS) {
1430 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_set_rdma_resource: "
1431 		    "failed on chan %p: %d", chan, retval);
1432 	}
1433 	return (retval);
1434 }
1435 
1436 
1437 /*
1438  * Function:
1439  *	ibt_change_port
1440  * Input:
1441  *	rc_chan		A previously allocated RC channel handle.
1442  *	port_num	New HCA port.
1443  * Output:
1444  *	none.
1445  * Returns:
1446  *	IBT_SUCCESS
1447  * Description:
1448  *	Change the primary physical port of a channel. (This is done only if
1449  *	HCA supports this capability).
1450  */
1451 ibt_status_t
1452 ibt_change_port(ibt_channel_hdl_t chan, uint8_t port_num)
1453 {
1454 	ibt_cep_modify_flags_t	modify_flags;
1455 	ibt_qp_info_t		modify_attr;
1456 	ibt_status_t		retval;
1457 
1458 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_change_port(%p, %d)", chan, port_num);
1459 
1460 	if (chan->ch_qp.qp_type != IBT_RC_SRV) {
1461 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_change_port: "
1462 		    "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1463 		    chan->ch_qp.qp_type);
1464 		return (IBT_CHAN_SRV_TYPE_INVALID);
1465 	}
1466 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1467 
1468 	modify_attr.qp_state = IBT_STATE_SQD;
1469 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1470 	modify_attr.qp_transport.rc.rc_path.cep_hca_port_num = port_num;
1471 
1472 	modify_flags = IBT_CEP_SET_STATE | IBT_CEP_SET_PORT;
1473 
1474 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL);
1475 	if (retval != IBT_SUCCESS) {
1476 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_change_port: "
1477 		    "failed on chan %p: %d", chan, retval);
1478 	}
1479 	return (retval);
1480 }
1481 
1482 
1483 void
1484 ibtl_init_cep_states(void)
1485 {
1486 	int	index;
1487 	int	ibt_nstate_inits;
1488 
1489 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_init_cep_states()");
1490 
1491 	ibt_nstate_inits = sizeof (ibt_cep_next_state_inits) /
1492 	    sizeof (ibt_cep_next_state_inits[0]);
1493 
1494 	/*
1495 	 * Initialize CEP next state table, using an indirect lookup table so
1496 	 * that this code isn't dependent on the ibt_cep_state_t enum values.
1497 	 */
1498 	for (index = 0; index < ibt_nstate_inits; index++) {
1499 		ibt_cep_state_t	state;
1500 
1501 		state = ibt_cep_next_state_inits[index].current_state;
1502 
1503 		ibt_cep_next_state[state].next_state =
1504 		    ibt_cep_next_state_inits[index].next_state;
1505 
1506 		ibt_cep_next_state[state].modify_flags =
1507 		    ibt_cep_next_state_inits[index].modify_flags;
1508 	}
1509 }
1510