xref: /illumos-gate/usr/src/uts/common/io/ib/ibtl/ibtl_qp.c (revision 4fceebdf03eeac0d7c58a4f70cc19b00a8c40a73)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/ib/ibtl/impl/ibtl.h>
29 #include <sys/ib/ibtl/impl/ibtl_cm.h>
30 
31 /*
32  * ibtl_qp.c
33  *	These routines implement (most of) the verbs related to
34  *	Queue Pairs.
35  */
36 
37 /* Globals. */
38 static char ibtf_qp[] = "ibtl";
39 
40 /* This table indirectly initializes the ibt_cep_next_state[] table. */
41 typedef struct ibt_cep_next_state_s {
42 	ibt_cep_state_t		next_state;
43 	ibt_cep_modify_flags_t	modify_flags;
44 } ibt_cep_next_state_t;
45 
46 struct	{
47 	ibt_cep_state_t		current_state;
48 	ibt_cep_state_t		next_state;
49 	ibt_cep_modify_flags_t	modify_flags;
50 } ibt_cep_next_state_inits[] = {
51 	{ IBT_STATE_RESET, IBT_STATE_INIT, IBT_CEP_SET_RESET_INIT},
52 	{ IBT_STATE_INIT, IBT_STATE_RTR, IBT_CEP_SET_INIT_RTR},
53 	{ IBT_STATE_RTR, IBT_STATE_RTS, IBT_CEP_SET_RTR_RTS}
54 };
55 
56 ibt_cep_next_state_t ibt_cep_next_state[IBT_STATE_NUM];
57 
58 _NOTE(SCHEME_PROTECTS_DATA("unique", ibt_cep_next_state))
59 
60 /* The following data and functions can increase system stability. */
61 
62 int ibtl_qp_calls_curr;
63 int ibtl_qp_calls_max = 128;	/* limit on # of simultaneous QP verb calls */
64 kmutex_t ibtl_qp_mutex;
65 kcondvar_t ibtl_qp_cv;
66 
67 void
68 ibtl_qp_flow_control_enter(void)
69 {
70 	mutex_enter(&ibtl_qp_mutex);
71 	while (ibtl_qp_calls_curr >= ibtl_qp_calls_max) {
72 		cv_wait(&ibtl_qp_cv, &ibtl_qp_mutex);
73 	}
74 	++ibtl_qp_calls_curr;
75 	mutex_exit(&ibtl_qp_mutex);
76 }
77 
78 void
79 ibtl_qp_flow_control_exit(void)
80 {
81 	mutex_enter(&ibtl_qp_mutex);
82 	cv_signal(&ibtl_qp_cv);
83 	--ibtl_qp_calls_curr;
84 	mutex_exit(&ibtl_qp_mutex);
85 }
86 
87 /*
88  * Function:
89  *	ibt_alloc_qp
90  * Input:
91  *	hca_hdl		HCA Handle.
92  *	type		Specifies the type of QP to alloc in ibt_alloc_qp()
93  *	qp_attrp	Specifies the ibt_qp_alloc_attr_t that are needed to
94  *			allocate a QP and transition it to the RTS state for
95  *			UDs and INIT state for all other QPs.
96  * Output:
97  *	queue_sizes_p	Returned sizes for SQ, RQ, SQ WR SGL elements & RQ
98  *			WR SGL elements.
99  *	qpn_p		Returned QP Number of the allocated QP.
100  *	ibt_qp_p	The ibt_qp_hdl_t of the allocated QP.
101  * Returns:
102  *	IBT_SUCCESS
103  * Description:
104  *	Allocate a QP with specified attributes.
105  */
106 ibt_status_t
107 ibt_alloc_qp(ibt_hca_hdl_t hca_hdl, ibt_qp_type_t type,
108     ibt_qp_alloc_attr_t *qp_attrp, ibt_chan_sizes_t *queue_sizes_p,
109     ib_qpn_t *qpn_p, ibt_qp_hdl_t *ibt_qp_p)
110 {
111 	ibt_status_t		retval;
112 	ibtl_channel_t		*chanp;
113 	ibt_tran_srv_t		qp_type;
114 
115 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_alloc_qp(%p, %d, %p, %p, %p, %p) ",
116 	    hca_hdl, type, qp_attrp, queue_sizes_p, qpn_p, ibt_qp_p);
117 
118 	switch (type) {
119 	case IBT_UD_RQP:
120 		qp_type = IBT_UD_SRV;
121 		break;
122 	case IBT_RC_RQP:
123 		qp_type = IBT_RC_SRV;
124 		break;
125 	case IBT_UC_RQP:
126 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Unreliable Connected "
127 		    "Transport Type is not supported.");
128 		*ibt_qp_p = NULL;
129 		return (IBT_NOT_SUPPORTED);
130 	case IBT_RD_RQP:
131 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Reliable Datagram "
132 		    "Transport Type is not supported.");
133 		*ibt_qp_p = NULL;
134 		return (IBT_NOT_SUPPORTED);
135 	default:
136 		/* shouldn't happen ILLEGAL Type */
137 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Illegal Transport Type "
138 		    "%d", type);
139 		*ibt_qp_p = NULL;
140 		return (IBT_QP_SRV_TYPE_INVALID);
141 	}
142 
143 	/* Get CI CQ handles */
144 	if ((qp_attrp->qp_scq_hdl == NULL) || (qp_attrp->qp_rcq_hdl == NULL)) {
145 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: Invalid CQ Handle");
146 		*ibt_qp_p = NULL;
147 		return (IBT_CQ_HDL_INVALID);
148 	}
149 	qp_attrp->qp_ibc_scq_hdl = qp_attrp->qp_scq_hdl->cq_ibc_cq_hdl;
150 	qp_attrp->qp_ibc_rcq_hdl = qp_attrp->qp_rcq_hdl->cq_ibc_cq_hdl;
151 
152 	if ((qp_attrp->qp_alloc_flags & IBT_QP_USES_SRQ) &&
153 	    (qp_attrp->qp_srq_hdl != NULL))
154 		qp_attrp->qp_ibc_srq_hdl =
155 		    qp_attrp->qp_srq_hdl->srq_ibc_srq_hdl;
156 	else
157 		qp_attrp->qp_ibc_srq_hdl = NULL;
158 
159 	/* Allocate Channel structure */
160 	chanp = kmem_zalloc(sizeof (*chanp), KM_SLEEP);
161 
162 	ibtl_qp_flow_control_enter();
163 	retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_qp)(
164 	    IBTL_HCA2CIHCA(hca_hdl), &chanp->ch_qp, type, qp_attrp,
165 	    queue_sizes_p, qpn_p, &chanp->ch_qp.qp_ibc_qp_hdl);
166 	ibtl_qp_flow_control_exit();
167 	if (retval != IBT_SUCCESS) {
168 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_qp: "
169 		    "Failed to allocate QP: %d", retval);
170 		kmem_free(chanp, sizeof (*chanp));
171 		*ibt_qp_p = NULL;
172 		return (retval);
173 	}
174 
175 	/* Initialize the internal QP struct. */
176 	chanp->ch_qp.qp_type = qp_type;
177 	chanp->ch_qp.qp_hca = hca_hdl;
178 	chanp->ch_qp.qp_send_cq = qp_attrp->qp_scq_hdl;
179 	chanp->ch_qp.qp_recv_cq = qp_attrp->qp_rcq_hdl;
180 	chanp->ch_current_state = IBT_STATE_RESET;
181 	mutex_init(&chanp->ch_cm_mutex, NULL, MUTEX_DEFAULT, NULL);
182 	cv_init(&chanp->ch_cm_cv, NULL, CV_DEFAULT, NULL);
183 
184 	mutex_enter(&hca_hdl->ha_mutex);
185 	hca_hdl->ha_qp_cnt++;
186 	mutex_exit(&hca_hdl->ha_mutex);
187 
188 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_alloc_qp: SUCCESS: qp %p owned by '%s'",
189 	    chanp, hca_hdl->ha_clnt_devp->clnt_name);
190 
191 	*ibt_qp_p = chanp;
192 
193 	return (retval);
194 }
195 
196 
197 /*
198  * Function:
199  *	ibt_initialize_qp
200  * Input:
201  *	ibt_qp		The previously allocated IBT QP Handle.
202  *	modify_attrp	Specifies the QP Modify attributes that to transition
203  *			the QP to the RTS state for UDs (including special QPs)
204  *			and INIT state for all other QPs.
205  * Output:
206  *	none.
207  * Returns:
208  *	IBT_SUCCESS
209  * Description:
210  *	Transition the QP to the RTS state for UDs (including special QPs)
211  *	and INIT state for all other QPs.
212  */
213 ibt_status_t
214 ibt_initialize_qp(ibt_qp_hdl_t ibt_qp, ibt_qp_info_t *modify_attrp)
215 {
216 	ibt_status_t		status;
217 	ibt_cep_state_t		state;
218 	ibc_hca_hdl_t		ibc_hca_hdl = IBTL_CHAN2CIHCA(ibt_qp);
219 	ibc_qp_hdl_t		ibc_qp_hdl = IBTL_CHAN2CIQP(ibt_qp);
220 	ibc_operations_t	*hca_ops_p = IBTL_CHAN2CIHCAOPS_P(ibt_qp);
221 	ibt_cep_modify_flags_t	modify_flags;
222 
223 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_initialize_qp(%p, %p)",
224 	    ibt_qp, modify_attrp);
225 
226 	/*
227 	 * Validate the QP Type from the channel with QP Type from the
228 	 * modify attribute struct.
229 	 */
230 	if (ibt_qp->ch_qp.qp_type != modify_attrp->qp_trans) {
231 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: "
232 		    "QP Type mismatch: Chan QP Type<%d>, Modify QP Type<%d>",
233 		    ibt_qp->ch_qp.qp_type, modify_attrp->qp_trans);
234 		return (IBT_QP_SRV_TYPE_INVALID);
235 	}
236 	if (ibt_qp->ch_current_state != IBT_STATE_RESET) {
237 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: "
238 		    "QP needs to be in RESET state: Chan QP State<%d>",
239 		    ibt_qp->ch_current_state);
240 		return (IBT_CHAN_STATE_INVALID);
241 	}
242 
243 	/*
244 	 * Initialize the QP to the RTS state for UDs
245 	 * and INIT state for all other QPs.
246 	 */
247 	switch (modify_attrp->qp_trans) {
248 	case IBT_UD_SRV:
249 
250 		/*
251 		 * Bring the QP to the RTS state.
252 		 */
253 		state = IBT_STATE_RESET;
254 		ibtl_qp_flow_control_enter();
255 		do {
256 			modify_attrp->qp_current_state = state;
257 			modify_flags = ibt_cep_next_state[state].modify_flags;
258 			modify_attrp->qp_state = state =
259 			    ibt_cep_next_state[state].next_state;
260 
261 			IBTF_DPRINTF_L3(ibtf_qp, "ibt_initialize_qp: "
262 			    "modifying qp state to 0x%x", state);
263 			status = (hca_ops_p->ibc_modify_qp)(ibc_hca_hdl,
264 			    ibc_qp_hdl, modify_flags, modify_attrp, NULL);
265 		} while ((state != IBT_STATE_RTS) && (status == IBT_SUCCESS));
266 		ibtl_qp_flow_control_exit();
267 
268 		if (status == IBT_SUCCESS) {
269 			ibt_qp->ch_current_state = state;
270 			ibt_qp->ch_transport.ud.ud_port_num =
271 			    modify_attrp->qp_transport.ud.ud_port;
272 			ibt_qp->ch_transport.ud.ud_qkey =
273 			    modify_attrp->qp_transport.ud.ud_qkey;
274 		}
275 		break;
276 	case IBT_UC_SRV:
277 	case IBT_RD_SRV:
278 	case IBT_RC_SRV:
279 
280 		/*
281 		 * Bring the QP to the INIT state.
282 		 */
283 		modify_attrp->qp_state = IBT_STATE_INIT;
284 
285 		ibtl_qp_flow_control_enter();
286 		status = (hca_ops_p->ibc_modify_qp)(ibc_hca_hdl, ibc_qp_hdl,
287 		    IBT_CEP_SET_RESET_INIT, modify_attrp, NULL);
288 		ibtl_qp_flow_control_exit();
289 		if (status == IBT_SUCCESS)
290 			ibt_qp->ch_current_state = IBT_STATE_INIT;
291 		break;
292 	default:
293 		/* shouldn't happen ILLEGAL Type */
294 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_initialize_qp: Illegal Type %d",
295 		    modify_attrp->qp_trans);
296 		return (IBT_QP_SRV_TYPE_INVALID);
297 	} /* End switch */
298 
299 	return (status);
300 }
301 
302 
303 /*
304  * Function:
305  *	ibt_alloc_special_qp
306  * Input:
307  *	hca_hdl		HCA Handle.
308  *	type		Specifies the type of Special QP to be allocated.
309  *	qp_attrp	Specifies the ibt_qp_alloc_attr_t that are needed to
310  *			allocate a special QP.
311  * Output:
312  *	queue_sizes_p	Returned sizes for SQ, RQ, SQ WR SGL elements & RQ
313  *			WR SGL elements.
314  *	qpn_p		Returned qpn of the allocated QP.
315  *	ibt_qp_p	The ibt_qp_hdl_t of the allocated QP.
316  * Returns:
317  *	IBT_SUCCESS
318  * Description:
319  *	Allocate a special QP with specified attributes.
320  */
321 ibt_status_t
322 ibt_alloc_special_qp(ibt_hca_hdl_t hca_hdl, uint8_t port, ibt_sqp_type_t type,
323     ibt_qp_alloc_attr_t *qp_attrp, ibt_chan_sizes_t *queue_sizes_p,
324     ibt_qp_hdl_t *ibt_qp_p)
325 {
326 	ibt_qp_hdl_t	chanp;
327 	ibt_status_t	retval;
328 	ibt_tran_srv_t	sqp_type;
329 
330 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_alloc_special_qp(%p, %d, %x, %p, %p, %p)",
331 	    hca_hdl, port, type, qp_attrp, queue_sizes_p, ibt_qp_p);
332 
333 	switch (type) {
334 	case IBT_SMI_SQP:
335 	case IBT_GSI_SQP:
336 		sqp_type = IBT_UD_SRV;
337 		break;
338 
339 	case IBT_RAWIP_SQP:
340 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: Raw IP "
341 		    "Transport Type is not supported.");
342 		*ibt_qp_p = NULL;
343 		return (IBT_NOT_SUPPORTED);
344 
345 	case IBT_RAWETHER_SQP:
346 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: Raw Ethernet "
347 		    "Transport Type is not supported.");
348 		*ibt_qp_p = NULL;
349 		return (IBT_NOT_SUPPORTED);
350 
351 	default:
352 		/* Shouldn't happen */
353 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: "
354 		    "Illegal Type 0x%x", type);
355 		*ibt_qp_p = NULL;
356 		return (IBT_QP_SPECIAL_TYPE_INVALID);
357 	}
358 
359 	/* convert the CQ handles for the CI */
360 	qp_attrp->qp_ibc_scq_hdl = qp_attrp->qp_scq_hdl->cq_ibc_cq_hdl;
361 	qp_attrp->qp_ibc_rcq_hdl = qp_attrp->qp_rcq_hdl->cq_ibc_cq_hdl;
362 
363 	/* Allocate Channel structure */
364 	chanp = kmem_zalloc(sizeof (*chanp), KM_SLEEP);
365 
366 	ibtl_qp_flow_control_enter();
367 	retval = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_special_qp)(
368 	    IBTL_HCA2CIHCA(hca_hdl), port, &chanp->ch_qp, type, qp_attrp,
369 	    queue_sizes_p, &chanp->ch_qp.qp_ibc_qp_hdl);
370 	ibtl_qp_flow_control_exit();
371 	if (retval != IBT_SUCCESS) {
372 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_alloc_special_qp: "
373 		    "Failed to allocate Special QP: %d", retval);
374 		kmem_free(chanp, sizeof (*chanp));
375 		*ibt_qp_p = NULL;
376 		return (retval);
377 	}
378 
379 	/* Initialize the internal QP struct. */
380 	chanp->ch_qp.qp_type = sqp_type;
381 	chanp->ch_qp.qp_hca = hca_hdl;
382 	chanp->ch_qp.qp_send_cq = qp_attrp->qp_scq_hdl;
383 	chanp->ch_qp.qp_recv_cq = qp_attrp->qp_rcq_hdl;
384 	chanp->ch_current_state = IBT_STATE_RESET;
385 	mutex_init(&chanp->ch_cm_mutex, NULL, MUTEX_DEFAULT, NULL);
386 	cv_init(&chanp->ch_cm_cv, NULL, CV_DEFAULT, NULL);
387 
388 	mutex_enter(&hca_hdl->ha_mutex);
389 	hca_hdl->ha_qp_cnt++;
390 	mutex_exit(&hca_hdl->ha_mutex);
391 
392 	*ibt_qp_p = chanp;
393 
394 	return (retval);
395 }
396 
397 
398 /*
399  * Function:
400  *	ibt_flush_qp
401  * Input:
402  *	ibtl_qp		Handle for QP that needs to be flushed.
403  * Output:
404  *	none.
405  * Returns:
406  *	IBT_SUCCESS
407  *	IBT_QP_HDL_INVALID
408  * Description:
409  *	Put the QP into error state to flush out work requests.
410  */
411 ibt_status_t
412 ibt_flush_qp(ibt_qp_hdl_t ibt_qp)
413 {
414 	ibt_qp_info_t		modify_attr;
415 	ibt_status_t		retval;
416 
417 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_flush_qp(%p)", ibt_qp);
418 
419 	if (ibt_qp->ch_qp.qp_type == IBT_RC_SRV) {
420 		mutex_enter(&ibtl_free_qp_mutex);
421 		if (ibt_qp->ch_transport.rc.rc_free_flags &
422 		    IBTL_RC_QP_CONNECTED) {
423 			mutex_exit(&ibtl_free_qp_mutex);
424 			IBTF_DPRINTF_L2(ibtf_qp, "ibt_flush_qp(%p): "
425 			    "called with a connected RC QP", ibt_qp);
426 			return (IBT_CHAN_STATE_INVALID);
427 		}
428 		mutex_exit(&ibtl_free_qp_mutex);
429 	}
430 
431 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
432 
433 	/*
434 	 * Set the QP state to error to flush any uncompleted WRs.
435 	 */
436 	modify_attr.qp_state = IBT_STATE_ERROR;
437 	modify_attr.qp_trans = ibt_qp->ch_qp.qp_type;
438 
439 	retval = ibt_modify_qp(ibt_qp, IBT_CEP_SET_STATE, &modify_attr, NULL);
440 
441 	if (retval != IBT_SUCCESS) {
442 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_flush_qp: "
443 		    "failed on chan %p: %d", ibt_qp, retval);
444 	}
445 	return (retval);
446 }
447 
448 
449 /*
450  * ibtl_cm_chan_is_open()
451  *
452  *	Inform IBTL that the connection has been established on this
453  *	channel so that a later call to ibtl_cm_chan_is_closed()
454  *	will be required to free the QPN used by this channel.
455  *
456  *	chan	Channel Handle
457  */
458 void
459 ibtl_cm_chan_is_open(ibt_channel_hdl_t chan)
460 {
461 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_open(%p)", chan);
462 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
463 	mutex_enter(&ibtl_free_qp_mutex);
464 	ASSERT(chan->ch_transport.rc.rc_free_flags == 0);
465 	chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CONNECTED;
466 	mutex_exit(&ibtl_free_qp_mutex);
467 }
468 
469 /*
470  * ibtl_cm_is_chan_closing()
471  *
472  *	Returns 1, if the connection that has been
473  *	started for this channel has moved to TIMEWAIT
474  *	If not, returns 0
475  *
476  *	chan	Channel Handle
477  */
478 int
479 ibtl_cm_is_chan_closing(ibt_channel_hdl_t chan)
480 {
481 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_is_chan_closing(%p)", chan);
482 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
483 	mutex_enter(&ibtl_free_qp_mutex);
484 	if (chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CLOSING) {
485 		mutex_exit(&ibtl_free_qp_mutex);
486 		return (1);
487 	}
488 	mutex_exit(&ibtl_free_qp_mutex);
489 	return (0);
490 }
491 
492 /*
493  * ibtl_cm_is_chan_closed()
494  *
495  *	Returns 1, if the connection that has been
496  *	started for this channel has completed TIMEWAIT
497  *	If not, returns 0
498  *
499  *	chan	Channel Handle
500  */
501 int
502 ibtl_cm_is_chan_closed(ibt_channel_hdl_t chan)
503 {
504 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_is_chan_closed(%p)", chan);
505 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
506 	mutex_enter(&ibtl_free_qp_mutex);
507 	if (chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CLOSED) {
508 		mutex_exit(&ibtl_free_qp_mutex);
509 		return (1);
510 	}
511 	mutex_exit(&ibtl_free_qp_mutex);
512 	return (0);
513 }
514 /*
515  * ibtl_cm_chan_is_closing()
516  *
517  *	Inform IBTL that the TIMEWAIT delay for the connection has been
518  *	started for this channel so that the QP can be freed.
519  *
520  *	chan	Channel Handle
521  */
522 void
523 ibtl_cm_chan_is_closing(ibt_channel_hdl_t chan)
524 {
525 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_closing(%p)", chan);
526 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
527 	mutex_enter(&ibtl_free_qp_mutex);
528 	ASSERT(chan->ch_transport.rc.rc_free_flags == IBTL_RC_QP_CONNECTED);
529 	chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CLOSING;
530 	mutex_exit(&ibtl_free_qp_mutex);
531 }
532 /*
533  * ibtl_cm_chan_is_closed()
534  *
535  *	Inform IBTL that the TIMEWAIT delay for the connection has been
536  *	reached for this channel so that the QPN can be reused.
537  *
538  *	chan	Channel Handle
539  */
540 void
541 ibtl_cm_chan_is_closed(ibt_channel_hdl_t chan)
542 {
543 	ibt_status_t status;
544 	ibtl_hca_t *ibtl_hca = chan->ch_qp.qp_hca;
545 
546 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_closed(%p)", chan);
547 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
548 	mutex_enter(&ibtl_free_qp_mutex);
549 	ASSERT((chan->ch_transport.rc.rc_free_flags &
550 	    (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING)) ==
551 	    (IBTL_RC_QP_CONNECTED | IBTL_RC_QP_CLOSING));
552 
553 	chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CONNECTED;
554 	chan->ch_transport.rc.rc_free_flags &= ~IBTL_RC_QP_CLOSING;
555 	chan->ch_transport.rc.rc_free_flags |= IBTL_RC_QP_CLOSED;
556 
557 	ibtl_cm_set_chan_private(chan, NULL);
558 
559 	if ((chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_FREED) == 0) {
560 		mutex_exit(&ibtl_free_qp_mutex);
561 		return;
562 	}
563 	mutex_exit(&ibtl_free_qp_mutex);
564 	ibtl_qp_flow_control_enter();
565 	if ((status = (IBTL_CHAN2CIHCAOPS_P(chan)->ibc_release_qpn)
566 	    (IBTL_CHAN2CIHCA(chan), chan->ch_transport.rc.rc_qpn_hdl)) ==
567 	    IBT_SUCCESS) {
568 		/* effectively, this is kmem_free(chan); */
569 		ibtl_free_qp_async_check(&chan->ch_qp);
570 
571 		/* decrement ha_qpn_cnt and check for close in progress */
572 		ibtl_close_hca_check(ibtl_hca);
573 	} else
574 		IBTF_DPRINTF_L2(ibtf_qp, "ibtl_cm_chan_is_closed: "
575 		    "ibc_release_qpn failed: status = %d\n", status);
576 	ibtl_qp_flow_control_exit();
577 }
578 
579 /*
580  * ibtl_cm_chan_is_reused()
581  *
582  *	Inform IBTL that the channel is going to be re-used
583  *	chan	Channel Handle
584  */
585 void
586 ibtl_cm_chan_is_reused(ibt_channel_hdl_t chan)
587 {
588 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_cm_chan_is_reused(%p)", chan);
589 	ASSERT(chan->ch_qp.qp_type == IBT_RC_SRV);
590 	mutex_enter(&ibtl_free_qp_mutex);
591 	ASSERT(((chan->ch_transport.rc.rc_free_flags & IBTL_RC_QP_CONNECTED) !=
592 	    IBTL_RC_QP_CONNECTED));
593 
594 	/* channel is no longer in closed state, shall be re-used */
595 	chan->ch_transport.rc.rc_free_flags = 0;
596 
597 	mutex_exit(&ibtl_free_qp_mutex);
598 
599 }
600 
601 /*
602  * Function:	ibt_free_qp()
603  *
604  * Input:	ibt_qp		Handle for Channel(QP) that needs to be freed.
605  *
606  * Output:	NONE.
607  *
608  * Returns:	IBT_SUCCESS
609  *		IBT_QP_STATE_INVALID
610  *		IBT_QP_HDL_INVALID
611  *
612  * Description:
613  *		Free a previously allocated QP.
614  */
615 ibt_status_t
616 ibt_free_qp(ibt_qp_hdl_t ibt_qp)
617 {
618 	ibt_status_t		status;
619 	ibtl_hca_t		*ibtl_hca = ibt_qp->ch_qp.qp_hca;
620 
621 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p)", ibt_qp);
622 
623 	if (ibt_qp->ch_qp.qp_type == IBT_RC_SRV) {
624 		ibtl_qp_flow_control_enter();
625 		mutex_enter(&ibtl_free_qp_mutex);
626 		if (ibt_qp->ch_transport.rc.rc_free_flags &
627 		    IBTL_RC_QP_CONNECTED) {
628 			if ((ibt_qp->ch_transport.rc.rc_free_flags &
629 			    IBTL_RC_QP_CLOSING) == 0) {
630 				IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: ERROR - "
631 				    "need to call ibt_close_rc_channel");
632 				mutex_exit(&ibtl_free_qp_mutex);
633 				ibtl_qp_flow_control_exit();
634 				return (IBT_CHAN_STATE_INVALID);
635 			}
636 			ibt_qp->ch_transport.rc.rc_free_flags |=
637 			    IBTL_RC_QP_FREED;
638 			status = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_free_qp)
639 			    (IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp),
640 			    IBC_FREE_QP_ONLY,
641 			    &ibt_qp->ch_transport.rc.rc_qpn_hdl);
642 			mutex_exit(&ibtl_free_qp_mutex);
643 			ibtl_qp_flow_control_exit();
644 
645 			if (status == IBT_SUCCESS) {
646 				mutex_enter(&ibtl_clnt_list_mutex);
647 				ibtl_hca->ha_qpn_cnt++;
648 				mutex_exit(&ibtl_clnt_list_mutex);
649 				mutex_enter(&ibtl_hca->ha_mutex);
650 				ibtl_hca->ha_qp_cnt--;
651 				mutex_exit(&ibtl_hca->ha_mutex);
652 				IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p) - "
653 				    "SUCCESS", ibt_qp);
654 			} else
655 				IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: "
656 				    "ibc_free_qp failed: status = %d", status);
657 			return (status);
658 		}
659 		mutex_exit(&ibtl_free_qp_mutex);
660 	} else
661 		ibtl_qp_flow_control_enter();
662 
663 	status = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_free_qp)
664 	    (IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp),
665 	    IBC_FREE_QP_AND_QPN, NULL);
666 	ibtl_qp_flow_control_exit();
667 
668 	if (status == IBT_SUCCESS) {
669 		/* effectively, this is kmem_free(ibt_qp); */
670 		ibtl_free_qp_async_check(&ibt_qp->ch_qp);
671 
672 		mutex_enter(&ibtl_hca->ha_mutex);
673 		ibtl_hca->ha_qp_cnt--;
674 		mutex_exit(&ibtl_hca->ha_mutex);
675 		IBTF_DPRINTF_L3(ibtf_qp, "ibt_free_qp(%p) - SUCCESS", ibt_qp);
676 	} else {
677 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_free_qp: "
678 		    "ibc_free_qp failed with error %d", status);
679 	}
680 
681 	return (status);
682 }
683 
684 
685 /* helper function for ibt_query_qp */
686 static void
687 ibtl_fillin_sgid(ibt_cep_path_t *pathp, ibtl_hca_devinfo_t *hca_devp)
688 {
689 	uint8_t port;
690 	uint32_t sgid_ix;
691 	ib_gid_t *sgidp;
692 
693 	port = pathp->cep_hca_port_num;
694 	sgid_ix = pathp->cep_adds_vect.av_sgid_ix;
695 	if (port == 0 || port > hca_devp->hd_hca_attr->hca_nports ||
696 	    sgid_ix >= IBTL_HDIP2SGIDTBLSZ(hca_devp)) {
697 		pathp->cep_adds_vect.av_sgid.gid_prefix = 0;
698 		pathp->cep_adds_vect.av_sgid.gid_guid = 0;
699 	} else {
700 		mutex_enter(&ibtl_clnt_list_mutex);
701 		sgidp = hca_devp->hd_portinfop[port-1].p_sgid_tbl;
702 		pathp->cep_adds_vect.av_sgid = sgidp[sgid_ix];
703 		mutex_exit(&ibtl_clnt_list_mutex);
704 	}
705 }
706 
707 
708 /*
709  * Function:	ibt_query_qp
710  *
711  * Input:	ibt_qp 			- The IBT QP Handle.
712  *
713  * Output:	ibt_qp_query_attrp 	- Points to a ibt_qp_query_attr_t
714  *					  that on return contains all the
715  *					  attributes of the specified qp.
716  *
717  * Returns:	IBT_SUCCESS
718  *		IBT_QP_HDL_INVALID
719  *
720  * Description:
721  *		Query QP attributes
722  *
723  */
724 ibt_status_t
725 ibt_query_qp(ibt_qp_hdl_t ibt_qp, ibt_qp_query_attr_t *qp_query_attrp)
726 {
727 	ibt_status_t		retval;
728 	ibtl_hca_devinfo_t	*hca_devp;
729 	ibt_qp_info_t		*qp_infop;
730 
731 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_query_qp(%p, %p)",
732 	    ibt_qp, qp_query_attrp);
733 
734 	ibtl_qp_flow_control_enter();
735 	retval = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_query_qp(
736 	    IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), qp_query_attrp));
737 	ibtl_qp_flow_control_exit();
738 	if (retval == IBT_SUCCESS) {
739 		ibt_qp->ch_current_state = qp_query_attrp->qp_info.qp_state;
740 
741 		/* need to fill in sgid from port and sgid_ix for RC and UC */
742 		hca_devp = ibt_qp->ch_qp.qp_hca->ha_hca_devp;
743 		qp_infop = &qp_query_attrp->qp_info;
744 
745 		switch (qp_infop->qp_trans) {
746 		case IBT_RC_SRV:
747 			ibtl_fillin_sgid(&qp_infop->qp_transport.rc.rc_path,
748 			    hca_devp);
749 			ibtl_fillin_sgid(&qp_infop->qp_transport.rc.rc_alt_path,
750 			    hca_devp);
751 			break;
752 		case IBT_UC_SRV:
753 			ibtl_fillin_sgid(&qp_infop->qp_transport.uc.uc_path,
754 			    hca_devp);
755 			ibtl_fillin_sgid(&qp_infop->qp_transport.uc.uc_alt_path,
756 			    hca_devp);
757 			break;
758 		}
759 	} else {
760 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_query_qp: "
761 		    "failed on chan %p: %d", ibt_qp, retval);
762 	}
763 
764 	return (retval);
765 }
766 
767 
768 /*
769  * Function:
770  *	ibt_modify_qp
771  * Input:
772  *	ibt_qp		The IBT QP Handle.
773  *	flags		Specifies which attributes in ibt_qp_mod_attr_t
774  *			are to be modified.
775  *	qp_attrp	Points to an ibt_qp_mod_attr_t struct that contains all
776  *			the attributes of the specified QP that a client is
777  *			allowed to modify after a QP has been allocated
778  * Output:
779  *	actual_sz	Returned actual queue sizes.
780  * Returns:
781  *	IBT_SUCCESS
782  * Description:
783  *	Modify the attributes of an existing QP.
784  */
785 ibt_status_t
786 ibt_modify_qp(ibt_qp_hdl_t ibt_qp, ibt_cep_modify_flags_t flags,
787     ibt_qp_info_t *modify_attrp, ibt_queue_sizes_t *actual_sz)
788 {
789 	ibt_status_t		retval;
790 
791 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_modify_qp(%p, %d, %p, %p)",
792 	    ibt_qp, flags, modify_attrp, actual_sz);
793 
794 	ibtl_qp_flow_control_enter();
795 	retval = (IBTL_CHAN2CIHCAOPS_P(ibt_qp)->ibc_modify_qp)(
796 	    IBTL_CHAN2CIHCA(ibt_qp), IBTL_CHAN2CIQP(ibt_qp), flags,
797 	    modify_attrp, actual_sz);
798 	ibtl_qp_flow_control_exit();
799 	if (retval == IBT_SUCCESS) {
800 		ibt_qp->ch_current_state = modify_attrp->qp_state;
801 		if (ibt_qp->ch_qp.qp_type == IBT_UD_SRV) {
802 			if (flags & (IBT_CEP_SET_PORT | IBT_CEP_SET_RESET_INIT))
803 				ibt_qp->ch_transport.ud.ud_port_num =
804 				    modify_attrp->qp_transport.ud.ud_port;
805 			if (flags & (IBT_CEP_SET_QKEY | IBT_CEP_SET_RESET_INIT))
806 				ibt_qp->ch_transport.ud.ud_qkey =
807 				    modify_attrp->qp_transport.ud.ud_qkey;
808 		}
809 	} else {
810 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_qp: failed on chan %p: %d",
811 		    ibt_qp, retval);
812 
813 		if (retval == IBT_CHAN_STATE_INVALID) {
814 			/* That means our cache had invalid QP state value. */
815 			ibt_qp_query_attr_t	qp_attr;
816 
817 			/* Query the channel (QP) */
818 			if (ibt_query_qp(ibt_qp, &qp_attr) == IBT_SUCCESS)
819 				ibt_qp->ch_current_state =
820 				    qp_attr.qp_info.qp_state;
821 		}
822 	}
823 	return (retval);
824 }
825 
826 
827 /*
828  * Function:
829  *	ibt_migrate_path
830  * Input:
831  *	rc_chan		A previously allocated RC channel handle.
832  * Output:
833  *	none.
834  * Returns:
835  *	IBT_SUCCESS on Success else appropriate error.
836  * Description:
837  *	Force the CI to use the alternate path. The alternate path becomes
838  *	the primary path. A new alternate path should be loaded and enabled.
839  *	Assumes that the given channel is in RTS/SQD state
840  */
841 ibt_status_t
842 ibt_migrate_path(ibt_channel_hdl_t rc_chan)
843 {
844 	ibt_status_t		retval;
845 	ibt_qp_info_t		qp_info;
846 	ibt_qp_query_attr_t	qp_attr;
847 	ibt_cep_modify_flags_t	cep_flags;
848 	int			retries = 1;
849 
850 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_migrate_path: channel %p", rc_chan);
851 
852 	if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) {
853 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path: "
854 		    "Invalid Channel type: Applicable only to RC Channel");
855 		return (IBT_CHAN_SRV_TYPE_INVALID);
856 	}
857 
858 	if (rc_chan->ch_current_state != IBT_STATE_RTS &&
859 	    rc_chan->ch_current_state != IBT_STATE_SQD) {
860 		if (ibt_query_qp(rc_chan, &qp_attr) == IBT_SUCCESS) {
861 			/* ch_current_state is fixed by ibt_query_qp */
862 			if (rc_chan->ch_current_state != IBT_STATE_RTS &&
863 			    rc_chan->ch_current_state != IBT_STATE_SQD)
864 				return (IBT_CHAN_STATE_INVALID);
865 			retries = 0;
866 		} else /* query_qp should never really fail */
867 			return (IBT_CHAN_STATE_INVALID);
868 	}
869 
870 retry:
871 	/* Call modify_qp */
872 	cep_flags = IBT_CEP_SET_MIG | IBT_CEP_SET_STATE;
873 	qp_info.qp_state = rc_chan->ch_current_state;
874 	qp_info.qp_current_state = rc_chan->ch_current_state;
875 	qp_info.qp_trans = IBT_RC_SRV;
876 	qp_info.qp_transport.rc.rc_mig_state = IBT_STATE_MIGRATED;
877 	retval = ibt_modify_qp(rc_chan, cep_flags, &qp_info, NULL);
878 
879 	if (retval != IBT_SUCCESS) {
880 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path:"
881 		    " ibt_modify_qp() returned = %d", retval);
882 		if (rc_chan->ch_current_state != qp_info.qp_state &&
883 		    --retries >= 0) {
884 			/*
885 			 * That means our cached 'state' was invalid.
886 			 * We know ibt_modify_qp() fixed it up, so it
887 			 * might be worth retrying.
888 			 */
889 			if (rc_chan->ch_current_state != IBT_STATE_RTS &&
890 			    rc_chan->ch_current_state != IBT_STATE_SQD)
891 				return (IBT_CHAN_STATE_INVALID);
892 			IBTF_DPRINTF_L2(ibtf_qp, "ibt_migrate_path:"
893 			    " retrying after 'state' fixed");
894 			goto retry;
895 		}
896 	}
897 	return (retval);
898 }
899 
900 
901 /*
902  * Function:
903  *	ibt_set_qp_private
904  * Input:
905  *	ibt_qp		The ibt_qp_hdl_t of the allocated QP.
906  *	clnt_private	The client private data.
907  * Output:
908  *	none.
909  * Returns:
910  *	none.
911  * Description:
912  *	Set the client private data.
913  */
914 void
915 ibt_set_qp_private(ibt_qp_hdl_t ibt_qp, void *clnt_private)
916 {
917 	ibt_qp->ch_clnt_private = clnt_private;
918 }
919 
920 
921 /*
922  * Function:
923  *	ibt_get_qp_private
924  * Input:
925  *	ibt_qp		The ibt_qp_hdl_t of the allocated QP.
926  * Output:
927  *	none.
928  * Returns:
929  *	The client private data.
930  * Description:
931  *	Get the client private data.
932  */
933 void *
934 ibt_get_qp_private(ibt_qp_hdl_t ibt_qp)
935 {
936 	return (ibt_qp->ch_clnt_private);
937 }
938 
939 
940 /*
941  * Function:
942  *	ibt_qp_to_hca_guid
943  * Input:
944  *	ibt_qp		The ibt_qp_hdl_t of the allocated QP.
945  * Output:
946  *	none.
947  * Returns:
948  *	hca_guid	Returned HCA GUID on which the specified QP is
949  *			allocated. Valid if it is non-NULL on return.
950  * Description:
951  *	A helper function to retrieve HCA GUID for the specified QP.
952  */
953 ib_guid_t
954 ibt_qp_to_hca_guid(ibt_qp_hdl_t ibt_qp)
955 {
956 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_qp_to_hca_guid(%p)", ibt_qp);
957 
958 	return (IBTL_HCA2HCAGUID(IBTL_CHAN2HCA(ibt_qp)));
959 }
960 
961 
962 /*
963  * Function:
964  *	ibt_recover_ud_qp
965  * Input:
966  *	ibt_qp		An QP Handle which is in SQError state.
967  * Output:
968  *	none.
969  * Returns:
970  *	IBT_SUCCESS
971  *	IBT_QP_SRV_TYPE_INVALID
972  *	IBT_QP_STATE_INVALID.
973  * Description:
974  *	Recover an UD QP which has transitioned to SQ Error state. The
975  *	ibt_recover_ud_qp() transitions the QP from SQ Error state to
976  *	Ready-To-Send QP state.
977  *
978  *	If a work request posted to a UD QP's send queue completes with an
979  *	error (see ibt_wc_status_t), the QP gets transitioned to SQ Error state.
980  *	In order to reuse this QP, ibt_recover_ud_qp() can be used to recover
981  *	the QP to a usable (Ready-to-Send) state.
982  */
983 ibt_status_t
984 ibt_recover_ud_qp(ibt_qp_hdl_t ibt_qp)
985 {
986 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_recover_ud_qp(%p)", ibt_qp);
987 
988 	return (ibt_recover_ud_channel(IBTL_QP2CHAN(ibt_qp)));
989 }
990 
991 
992 /*
993  * Function:
994  *	ibt_recycle_ud
995  * Input:
996  *	ud_chan		The IBT UD QP Handle.
997  *	various attributes
998  *
999  * Output:
1000  *	none
1001  * Returns:
1002  *	IBT_SUCCESS
1003  *	IBT_CHAN_SRV_TYPE_INVALID
1004  *	IBT_CHAN_STATE_INVALID
1005  *
1006  * Description:
1007  *	Revert the UD QP back to a usable state.
1008  */
1009 ibt_status_t
1010 ibt_recycle_ud(ibt_channel_hdl_t ud_chan, uint8_t hca_port_num,
1011     uint16_t pkey_ix, ib_qkey_t qkey)
1012 {
1013 	ibt_qp_query_attr_t	qp_attr;
1014 	ibt_status_t		retval;
1015 
1016 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_recycle_ud(%p, %d, %x, %x): ",
1017 	    ud_chan, hca_port_num, pkey_ix, qkey);
1018 
1019 	if (ud_chan->ch_qp.qp_type != IBT_UD_SRV) {
1020 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1021 		    "chan %p is not a UD channel", ud_chan);
1022 		return (IBT_CHAN_SRV_TYPE_INVALID);
1023 	}
1024 
1025 	retval = ibt_query_qp(ud_chan, &qp_attr);
1026 	if (retval != IBT_SUCCESS) {
1027 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1028 		    "ibt_query_qp failed on chan %p: %d", ud_chan, retval);
1029 		return (retval);
1030 	}
1031 	if (qp_attr.qp_info.qp_state != IBT_STATE_ERROR) {
1032 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1033 		    "chan %p is in state %d (not in ERROR state)",
1034 		    ud_chan, qp_attr.qp_info.qp_state);
1035 		ud_chan->ch_current_state = qp_attr.qp_info.qp_state;
1036 		return (IBT_CHAN_STATE_INVALID);
1037 	}
1038 
1039 	/* transition the QP from ERROR to RESET */
1040 	qp_attr.qp_info.qp_state = IBT_STATE_RESET;
1041 	qp_attr.qp_info.qp_trans = ud_chan->ch_qp.qp_type;
1042 	retval = ibt_modify_qp(ud_chan, IBT_CEP_SET_STATE, &qp_attr.qp_info,
1043 	    NULL);
1044 	if (retval != IBT_SUCCESS) {
1045 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1046 		    "ibt_modify_qp(ERROR=>RESET) failed on chan %p: %d",
1047 		    ud_chan, retval);
1048 		return (retval);
1049 	}
1050 	ud_chan->ch_current_state = IBT_STATE_RESET;
1051 
1052 	/* transition the QP back to RTS */
1053 	qp_attr.qp_info.qp_transport.ud.ud_port = hca_port_num;
1054 	qp_attr.qp_info.qp_transport.ud.ud_qkey = qkey;
1055 	qp_attr.qp_info.qp_transport.ud.ud_pkey_ix = pkey_ix;
1056 	retval = ibt_initialize_qp(ud_chan, &qp_attr.qp_info);
1057 	if (retval != IBT_SUCCESS) {
1058 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_recycle_ud: "
1059 		    "ibt_initialize_qp failed on chan %p: %d", ud_chan, retval);
1060 		/* the man page says the QP should be left in ERROR state */
1061 		(void) ibt_flush_qp(ud_chan);
1062 	}
1063 	return (retval);
1064 }
1065 
1066 /*
1067  * Function:
1068  *	ibt_pause_sendq
1069  * Input:
1070  *	chan		The IBT QP Handle.
1071  *	modify_flags	IBT_CEP_SET_NOTHING or IBT_CEP_SET_SQD_EVENT
1072  *
1073  * Output:
1074  *	none.
1075  * Returns:
1076  *	IBT_SUCCESS
1077  *	IBT_CHAN_HDL_INVALID
1078  *	IBT_CHAN_STATE_INVALID
1079  *	IBT_INVALID_PARAM
1080  *
1081  * Description:
1082  *	Place the send queue of the specified channel into the send queue
1083  *	drained (SQD) state.
1084  *
1085  */
1086 ibt_status_t
1087 ibt_pause_sendq(ibt_channel_hdl_t chan, ibt_cep_modify_flags_t modify_flags)
1088 {
1089 	ibt_qp_info_t		modify_attr;
1090 	ibt_status_t		retval;
1091 
1092 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_pause_sendq(%p, %x)", chan, modify_flags);
1093 
1094 	modify_flags &= IBT_CEP_SET_SQD_EVENT;	/* ignore other bits */
1095 	modify_flags |= IBT_CEP_SET_STATE;
1096 
1097 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1098 	/*
1099 	 * Set the QP state to SQD.
1100 	 */
1101 	modify_attr.qp_state = IBT_STATE_SQD;
1102 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1103 
1104 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL);
1105 
1106 	if (retval != IBT_SUCCESS) {
1107 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_pause_sendq: "
1108 		    "failed on chan %p: %d", chan, retval);
1109 	}
1110 	return (retval);
1111 }
1112 
1113 
1114 /*
1115  * Function:
1116  *	ibt_unpause_sendq
1117  * Input:
1118  *	chan	The IBT Channel Handle.
1119  * Output:
1120  *	none.
1121  * Returns:
1122  *	IBT_SUCCESS
1123  *	IBT_CHAN_HDL_INVALID
1124  *	IBT_CHAN_STATE_INVALID
1125  * Description:
1126  *	Un-pauses the previously paused channel. This call will transition the
1127  *	QP from SQD to RTS state.
1128  */
1129 ibt_status_t
1130 ibt_unpause_sendq(ibt_channel_hdl_t chan)
1131 {
1132 	ibt_qp_info_t		modify_attr;
1133 	ibt_status_t		retval;
1134 
1135 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_unpause_sendq(%p)", chan);
1136 
1137 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1138 
1139 	/*
1140 	 * Set the QP state to RTS.
1141 	 */
1142 	modify_attr.qp_current_state = IBT_STATE_SQD;
1143 	modify_attr.qp_state = IBT_STATE_RTS;
1144 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1145 
1146 	retval = ibt_modify_qp(chan, IBT_CEP_SET_STATE, &modify_attr, NULL);
1147 	if (retval != IBT_SUCCESS) {
1148 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_unpause_sendq: "
1149 		    "failed on chan %p: %d", chan, retval);
1150 	}
1151 	return (retval);
1152 }
1153 
1154 
1155 /*
1156  * Function:
1157  *	ibt_resize_queues
1158  * Input:
1159  *	chan		A previously allocated channel handle.
1160  *	flags		QP Flags
1161  *				IBT_SEND_Q
1162  *				IBT_RECV_Q
1163  *	request_sz	Requested new sizes.
1164  * Output:
1165  *	actual_sz	Returned actual sizes.
1166  * Returns:
1167  *	IBT_SUCCESS
1168  * Description:
1169  *	Resize the SendQ/RecvQ sizes of a channel. Can only be called on
1170  *	a previously opened channel.
1171  */
1172 ibt_status_t
1173 ibt_resize_queues(ibt_channel_hdl_t chan, ibt_qflags_t flags,
1174     ibt_queue_sizes_t *request_sz, ibt_queue_sizes_t *actual_sz)
1175 {
1176 	ibt_cep_modify_flags_t	modify_flags = IBT_CEP_SET_STATE;
1177 	ibt_qp_info_t		modify_attr;
1178 	ibt_status_t		retval;
1179 
1180 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_resize_queues(%p, 0x%X, %p, %p)",
1181 	    chan, flags, request_sz, actual_sz);
1182 
1183 	if ((flags & (IBT_SEND_Q | IBT_RECV_Q)) == 0)  {
1184 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_resize_queues: "
1185 		    "Flags <0x%X> not set", flags);
1186 		return (IBT_INVALID_PARAM);
1187 	}
1188 
1189 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1190 
1191 	modify_attr.qp_current_state = chan->ch_current_state;
1192 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1193 	modify_attr.qp_state = chan->ch_current_state;
1194 
1195 	if (flags & IBT_SEND_Q) {
1196 		modify_attr.qp_sq_sz = request_sz->qs_sq;
1197 		modify_flags |= IBT_CEP_SET_SQ_SIZE;
1198 	}
1199 
1200 	if (flags & IBT_RECV_Q) {
1201 		modify_attr.qp_rq_sz = request_sz->qs_rq;
1202 		modify_flags |= IBT_CEP_SET_RQ_SIZE;
1203 	}
1204 
1205 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, actual_sz);
1206 	if (retval != IBT_SUCCESS) {
1207 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_resize_queues: "
1208 		    "failed on QP %p: %d", chan, retval);
1209 	}
1210 
1211 	return (retval);
1212 }
1213 
1214 
1215 /*
1216  * Function:
1217  *	ibt_query_queues
1218  * Input:
1219  *	chan		A previously allocated channel handle.
1220  * Output:
1221  *	actual_sz	Returned actual sizes.
1222  * Returns:
1223  *	IBT_SUCCESS
1224  * Description:
1225  *	Query the SendQ/RecvQ sizes of a channel.
1226  */
1227 ibt_status_t
1228 ibt_query_queues(ibt_channel_hdl_t chan, ibt_queue_sizes_t *actual_sz)
1229 {
1230 	ibt_status_t		retval;
1231 	ibt_qp_query_attr_t	qp_query_attr;
1232 
1233 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_query_queues(%p)", chan);
1234 
1235 	/* Perform Query QP and retrieve QP sizes. */
1236 	retval = ibt_query_qp(chan, &qp_query_attr);
1237 	if (retval != IBT_SUCCESS) {
1238 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_query_queues: "
1239 		    "ibt_query_qp failed: qp %p: %d", chan, retval);
1240 		return (retval);
1241 	}
1242 
1243 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(actual_sz->qs_rq,
1244 	    actual_sz->qs_sq))
1245 	actual_sz->qs_sq = qp_query_attr.qp_info.qp_sq_sz;
1246 	actual_sz->qs_rq = qp_query_attr.qp_info.qp_rq_sz;
1247 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(actual_sz->qs_rq,
1248 	    actual_sz->qs_sq))
1249 	chan->ch_current_state = qp_query_attr.qp_info.qp_state;
1250 
1251 	return (retval);
1252 }
1253 
1254 
1255 /*
1256  * Function:
1257  *	ibt_modify_rdma
1258  * Input:
1259  *	rc_chan		A previously allocated channel handle.
1260  *
1261  *	modify_flags	Bitwise "or" of any of the following:
1262  *			IBT_CEP_SET_RDMA_R	Enable/Disable RDMA RD
1263  *			IBT_CEP_SET_RDMA_W	Enable/Disable RDMA WR
1264  *			IBT_CEP_SET_ATOMIC	Enable/Disable Atomics
1265  *
1266  *	flags		Channel End Point (CEP) Disable Flags (0 => enable).
1267  *			IBT_CEP_NO_RDMA_RD	Disable incoming RDMA RD's
1268  *			IBT_CEP_NO_RDMA_WR	Disable incoming RDMA WR's
1269  *			IBT_CEP_NO_ATOMIC	Disable incoming Atomics.
1270  * Output:
1271  *	none.
1272  * Returns:
1273  *	IBT_SUCCESS
1274  *	IBT_QP_SRV_TYPE_INVALID
1275  *	IBT_CHAN_HDL_INVALID
1276  *	IBT_CHAN_ATOMICS_NOT_SUPPORTED
1277  *	IBT_CHAN_STATE_INVALID
1278  * Description:
1279  *	Enable/disable RDMA operations. To enable an operation clear the
1280  *	"disable" flag. Can call this function when the channel is in
1281  *	INIT, RTS or SQD states. If called in any other state
1282  *	IBT_CHAN_STATE_INVALID is returned. When the operation completes the
1283  *	channel state is left unchanged.
1284  */
1285 ibt_status_t
1286 ibt_modify_rdma(ibt_channel_hdl_t rc_chan,
1287     ibt_cep_modify_flags_t modify_flags, ibt_cep_flags_t flags)
1288 {
1289 	ibt_status_t		retval;
1290 	ibt_qp_info_t		modify_attr;
1291 
1292 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_modify_rdma(%p, 0x%x, 0x%x)",
1293 	    rc_chan, modify_flags, flags);
1294 
1295 	if (rc_chan->ch_qp.qp_type != IBT_RC_SRV) {
1296 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: "
1297 		    "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1298 		    rc_chan->ch_qp.qp_type);
1299 		return (IBT_QP_SRV_TYPE_INVALID);
1300 	}
1301 
1302 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1303 
1304 	/*
1305 	 * Can only call this function when the channel in INIT, RTS or SQD
1306 	 * states.
1307 	 */
1308 	if ((rc_chan->ch_current_state != IBT_STATE_INIT) &&
1309 	    (rc_chan->ch_current_state != IBT_STATE_RTS) &&
1310 	    (rc_chan->ch_current_state != IBT_STATE_SQD)) {
1311 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: Invalid Channel "
1312 		    "state: 0x%X", rc_chan->ch_current_state);
1313 		return (IBT_CHAN_STATE_INVALID);
1314 	}
1315 
1316 	modify_attr.qp_state = modify_attr.qp_current_state =
1317 	    rc_chan->ch_current_state;
1318 	modify_attr.qp_trans = rc_chan->ch_qp.qp_type;
1319 	modify_attr.qp_flags = flags;
1320 
1321 	modify_flags &= (IBT_CEP_SET_RDMA_R | IBT_CEP_SET_RDMA_W |
1322 	    IBT_CEP_SET_ATOMIC);
1323 	modify_flags |= IBT_CEP_SET_STATE;
1324 
1325 	retval = ibt_modify_qp(rc_chan, modify_flags, &modify_attr, NULL);
1326 	if (retval != IBT_SUCCESS) {
1327 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_modify_rdma: "
1328 		    "failed on chan %p: %d", rc_chan, retval);
1329 	}
1330 	return (retval);
1331 }
1332 
1333 
1334 /*
1335  * Function:
1336  *	ibt_set_rdma_resource
1337  * Input:
1338  *	chan		A previously allocated RC channel handle.
1339  *	modify_flags	Bitwise "or" of any of the following:
1340  *			IBT_CEP_SET_RDMARA_OUT	Initiator depth (rdma_ra_out)
1341  *			IBT_CEP_SET_RDMARA_IN	Responder Resources
1342  *						(rdma_ra_in)
1343  *	rdma_ra_out	Outgoing RDMA Reads/Atomics
1344  *	rdma_ra_in	Incoming RDMA Reads/Atomics
1345  * Output:
1346  *	none.
1347  * Returns:
1348  *	IBT_SUCCESS
1349  * Description:
1350  *	Change the number of resources to be used for incoming and outgoing
1351  *	RDMA reads & Atomics. Can only be called on a previously opened
1352  *	RC channel.  Can only be called on a paused channel, and this will
1353  *	un-pause that channel.
1354  */
1355 ibt_status_t
1356 ibt_set_rdma_resource(ibt_channel_hdl_t chan,
1357     ibt_cep_modify_flags_t modify_flags, uint8_t rdma_ra_out,
1358     uint8_t resp_rdma_ra_out)
1359 {
1360 	ibt_qp_info_t		modify_attr;
1361 	ibt_status_t		retval;
1362 
1363 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_set_rdma_resource(%p, 0x%x, %d, %d)",
1364 	    chan, modify_flags, rdma_ra_out, resp_rdma_ra_out);
1365 
1366 	if (chan->ch_qp.qp_type != IBT_RC_SRV) {
1367 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_set_rdma_resource: "
1368 		    "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1369 		    chan->ch_qp.qp_type);
1370 		return (IBT_CHAN_SRV_TYPE_INVALID);
1371 	}
1372 
1373 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1374 
1375 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1376 	modify_attr.qp_state = IBT_STATE_SQD;
1377 
1378 	modify_attr.qp_transport.rc.rc_rdma_ra_out = rdma_ra_out;
1379 	modify_attr.qp_transport.rc.rc_rdma_ra_in = resp_rdma_ra_out;
1380 	modify_flags &= (IBT_CEP_SET_RDMARA_OUT | IBT_CEP_SET_RDMARA_IN);
1381 	modify_flags |= IBT_CEP_SET_STATE;
1382 
1383 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL);
1384 	if (retval != IBT_SUCCESS) {
1385 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_set_rdma_resource: "
1386 		    "failed on chan %p: %d", chan, retval);
1387 	}
1388 	return (retval);
1389 }
1390 
1391 
1392 /*
1393  * Function:
1394  *	ibt_change_port
1395  * Input:
1396  *	rc_chan		A previously allocated RC channel handle.
1397  *	port_num	New HCA port.
1398  * Output:
1399  *	none.
1400  * Returns:
1401  *	IBT_SUCCESS
1402  * Description:
1403  *	Change the primary physical port of a channel. (This is done only if
1404  *	HCA supports this capability).
1405  */
1406 ibt_status_t
1407 ibt_change_port(ibt_channel_hdl_t chan, uint8_t port_num)
1408 {
1409 	ibt_cep_modify_flags_t	modify_flags;
1410 	ibt_qp_info_t		modify_attr;
1411 	ibt_status_t		retval;
1412 
1413 	IBTF_DPRINTF_L3(ibtf_qp, "ibt_change_port(%p, %d)", chan, port_num);
1414 
1415 	if (chan->ch_qp.qp_type != IBT_RC_SRV) {
1416 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_change_port: "
1417 		    "Invalid Channel type: 0x%X, Applicable only to RC Channel",
1418 		    chan->ch_qp.qp_type);
1419 		return (IBT_CHAN_SRV_TYPE_INVALID);
1420 	}
1421 	bzero(&modify_attr, sizeof (ibt_qp_info_t));
1422 
1423 	modify_attr.qp_state = IBT_STATE_SQD;
1424 	modify_attr.qp_trans = chan->ch_qp.qp_type;
1425 	modify_attr.qp_transport.rc.rc_path.cep_hca_port_num = port_num;
1426 
1427 	modify_flags = IBT_CEP_SET_STATE | IBT_CEP_SET_PORT;
1428 
1429 	retval = ibt_modify_qp(chan, modify_flags, &modify_attr, NULL);
1430 	if (retval != IBT_SUCCESS) {
1431 		IBTF_DPRINTF_L2(ibtf_qp, "ibt_change_port: "
1432 		    "failed on chan %p: %d", chan, retval);
1433 	}
1434 	return (retval);
1435 }
1436 
1437 
1438 void
1439 ibtl_init_cep_states(void)
1440 {
1441 	int	index;
1442 	int	ibt_nstate_inits;
1443 
1444 	IBTF_DPRINTF_L3(ibtf_qp, "ibtl_init_cep_states()");
1445 
1446 	ibt_nstate_inits = sizeof (ibt_cep_next_state_inits) /
1447 	    sizeof (ibt_cep_next_state_inits[0]);
1448 
1449 	/*
1450 	 * Initialize CEP next state table, using an indirect lookup table so
1451 	 * that this code isn't dependent on the ibt_cep_state_t enum values.
1452 	 */
1453 	for (index = 0; index < ibt_nstate_inits; index++) {
1454 		ibt_cep_state_t	state;
1455 
1456 		state = ibt_cep_next_state_inits[index].current_state;
1457 
1458 		ibt_cep_next_state[state].next_state =
1459 		    ibt_cep_next_state_inits[index].next_state;
1460 
1461 		ibt_cep_next_state[state].modify_flags =
1462 		    ibt_cep_next_state_inits[index].modify_flags;
1463 	}
1464 }
1465