xref: /titanic_50/usr/src/uts/common/io/idm/idm_impl.c (revision 0c19630b1592aa30d3e4d9db1a2a8cf9a91c0e72)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/conf.h>
28 #include <sys/file.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/cpuvar.h>
32 #include <sys/sdt.h>
33 
34 #include <sys/socket.h>
35 #include <sys/strsubr.h>
36 #include <sys/socketvar.h>
37 #include <sys/sysmacros.h>
38 
39 #include <sys/idm/idm.h>
40 #include <sys/idm/idm_so.h>
41 
42 extern idm_transport_t  idm_transport_list[];
43 
44 void
45 idm_pdu_rx(idm_conn_t *ic, idm_pdu_t *pdu)
46 {
47 	iscsi_async_evt_hdr_t *async_evt;
48 
49 	/*
50 	 * If we are in full-featured mode then route SCSI-related
51 	 * commands to the appropriate function vector
52 	 */
53 	ic->ic_timestamp = ddi_get_lbolt();
54 	mutex_enter(&ic->ic_state_mutex);
55 	if (ic->ic_ffp && ic->ic_pdu_events == 0) {
56 		mutex_exit(&ic->ic_state_mutex);
57 
58 		if (idm_pdu_rx_forward_ffp(ic, pdu) == B_TRUE) {
59 			/* Forwarded SCSI-related commands */
60 			return;
61 		}
62 		mutex_enter(&ic->ic_state_mutex);
63 	}
64 
65 	/*
66 	 * If we get here with a SCSI-related PDU then we are not in
67 	 * full-feature mode and the PDU is a protocol error (SCSI command
68 	 * PDU's may sometimes be an exception, see below).  All
69 	 * non-SCSI PDU's get treated them the same regardless of whether
70 	 * we are in full-feature mode.
71 	 *
72 	 * Look at the opcode and in some cases the PDU status and
73 	 * determine the appropriate event to send to the connection
74 	 * state machine.  Generate the event, passing the PDU as data.
75 	 * If the current connection state allows reception of the event
76 	 * the PDU will be submitted to the IDM client for processing,
77 	 * otherwise the PDU will be dropped.
78 	 */
79 	switch (IDM_PDU_OPCODE(pdu)) {
80 	case ISCSI_OP_LOGIN_CMD:
81 		DTRACE_ISCSI_2(login__command, idm_conn_t *, ic,
82 		    iscsi_login_hdr_t *, (iscsi_login_hdr_t *)pdu->isp_hdr);
83 		idm_conn_rx_pdu_event(ic, CE_LOGIN_RCV, (uintptr_t)pdu);
84 		break;
85 	case ISCSI_OP_LOGIN_RSP:
86 		idm_parse_login_rsp(ic, pdu, /* RX */ B_TRUE);
87 		break;
88 	case ISCSI_OP_LOGOUT_CMD:
89 		DTRACE_ISCSI_2(logout__command, idm_conn_t *, ic,
90 		    iscsi_logout_hdr_t *,
91 		    (iscsi_logout_hdr_t *)pdu->isp_hdr);
92 		idm_parse_logout_req(ic, pdu, /* RX */ B_TRUE);
93 		break;
94 	case ISCSI_OP_LOGOUT_RSP:
95 		idm_parse_logout_rsp(ic, pdu, /* RX */ B_TRUE);
96 		break;
97 	case ISCSI_OP_ASYNC_EVENT:
98 		async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
99 		switch (async_evt->async_event) {
100 		case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
101 			idm_conn_rx_pdu_event(ic, CE_ASYNC_LOGOUT_RCV,
102 			    (uintptr_t)pdu);
103 			break;
104 		case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
105 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_CONN_RCV,
106 			    (uintptr_t)pdu);
107 			break;
108 		case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
109 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_RCV,
110 			    (uintptr_t)pdu);
111 			break;
112 		case ISCSI_ASYNC_EVENT_SCSI_EVENT:
113 		case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
114 		default:
115 			idm_conn_rx_pdu_event(ic, CE_MISC_RX,
116 			    (uintptr_t)pdu);
117 			break;
118 		}
119 		break;
120 	case ISCSI_OP_SCSI_CMD:
121 		/*
122 		 * Consider this scenario:  We are a target connection
123 		 * in "in login" state and a "login success sent" event has
124 		 * been generated but not yet handled.  Since we've sent
125 		 * the login response but we haven't actually transitioned
126 		 * to FFP mode we might conceivably receive a SCSI command
127 		 * from the initiator before we are ready.  We are actually
128 		 * in FFP we just don't know it yet -- to address this we
129 		 * can generate an event corresponding to the SCSI command.
130 		 * At the point when the event is handled by the state
131 		 * machine the login request will have been handled and we
132 		 * should be in FFP.  If we are not in FFP by that time
133 		 * we can reject the SCSI command with a protocol error.
134 		 *
135 		 * This scenario only applies to the target.
136 		 *
137 		 * Handle dtrace probe in iscsit so we can find all the
138 		 * pieces of the CDB
139 		 */
140 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
141 		break;
142 	case ISCSI_OP_SCSI_DATA:
143 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
144 		    iscsi_data_hdr_t *,
145 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
146 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
147 		break;
148 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
149 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
150 		    iscsi_scsi_task_mgt_hdr_t *,
151 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
152 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
153 		break;
154 	case ISCSI_OP_NOOP_OUT:
155 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
156 		    iscsi_nop_out_hdr_t *,
157 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
158 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
159 		break;
160 	case ISCSI_OP_TEXT_CMD:
161 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
162 		    iscsi_text_hdr_t *,
163 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
164 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
165 		break;
166 	/* Initiator PDU's */
167 	case ISCSI_OP_SCSI_DATA_RSP:
168 	case ISCSI_OP_RTT_RSP:
169 	case ISCSI_OP_SNACK_CMD:
170 	case ISCSI_OP_NOOP_IN:
171 	case ISCSI_OP_TEXT_RSP:
172 	case ISCSI_OP_REJECT_MSG:
173 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
174 		/* Validate received PDU against current state */
175 		idm_conn_rx_pdu_event(ic, CE_MISC_RX,
176 		    (uintptr_t)pdu);
177 		break;
178 	}
179 	mutex_exit(&ic->ic_state_mutex);
180 }
181 
182 void
183 idm_pdu_tx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
184 {
185 	(*ic->ic_transport_ops->it_tx_pdu)(ic, pdu);
186 }
187 
188 boolean_t
189 idm_pdu_rx_forward_ffp(idm_conn_t *ic, idm_pdu_t *pdu)
190 {
191 	/*
192 	 * If this is an FFP request, call the appropriate handler
193 	 * and return B_TRUE, otherwise return B_FALSE.
194 	 */
195 	switch (IDM_PDU_OPCODE(pdu)) {
196 	case ISCSI_OP_SCSI_CMD:
197 		(*ic->ic_conn_ops.icb_rx_scsi_cmd)(ic, pdu);
198 		return (B_TRUE);
199 	case ISCSI_OP_SCSI_DATA:
200 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
201 		    iscsi_data_hdr_t *,
202 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
203 		(*ic->ic_transport_ops->it_rx_dataout)(ic, pdu);
204 		return (B_TRUE);
205 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
206 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
207 		    iscsi_scsi_task_mgt_hdr_t *,
208 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
209 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
210 		return (B_TRUE);
211 	case ISCSI_OP_NOOP_OUT:
212 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
213 		    iscsi_nop_out_hdr_t *,
214 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
215 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
216 		return (B_TRUE);
217 	case ISCSI_OP_TEXT_CMD:
218 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
219 		    iscsi_text_hdr_t *,
220 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
221 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
222 		return (B_TRUE);
223 		/* Initiator only */
224 	case ISCSI_OP_SCSI_RSP:
225 		(*ic->ic_conn_ops.icb_rx_scsi_rsp)(ic, pdu);
226 		return (B_TRUE);
227 	case ISCSI_OP_SCSI_DATA_RSP:
228 		(*ic->ic_transport_ops->it_rx_datain)(ic, pdu);
229 		return (B_TRUE);
230 	case ISCSI_OP_RTT_RSP:
231 		(*ic->ic_transport_ops->it_rx_rtt)(ic, pdu);
232 		return (B_TRUE);
233 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
234 	case ISCSI_OP_TEXT_RSP:
235 	case ISCSI_OP_NOOP_IN:
236 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
237 		return (B_TRUE);
238 	default:
239 		return (B_FALSE);
240 	}
241 	/*NOTREACHED*/
242 }
243 
244 void
245 idm_pdu_rx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
246 {
247 	/*
248 	 * Some PDU's specific to FFP get special handling.  This function
249 	 * will normally never be called in FFP with an FFP PDU since this
250 	 * is a slow path but in can happen on the target side during
251 	 * the transition to FFP.  We primarily call
252 	 * idm_pdu_rx_forward_ffp here to avoid code duplication.
253 	 */
254 	if (idm_pdu_rx_forward_ffp(ic, pdu) == B_FALSE) {
255 		/*
256 		 * Non-FFP PDU, use generic RC handler
257 		 */
258 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
259 	}
260 }
261 
262 void
263 idm_parse_login_rsp(idm_conn_t *ic, idm_pdu_t *login_rsp_pdu, boolean_t rx)
264 {
265 	iscsi_login_rsp_hdr_t	*login_rsp =
266 	    (iscsi_login_rsp_hdr_t *)login_rsp_pdu->isp_hdr;
267 	idm_conn_event_t	new_event;
268 
269 	if (login_rsp->status_class == ISCSI_STATUS_CLASS_SUCCESS) {
270 		if (!(login_rsp->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
271 		    (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
272 		    (ISCSI_LOGIN_NEXT_STAGE(login_rsp->flags) ==
273 		    ISCSI_FULL_FEATURE_PHASE)) {
274 			new_event = (rx ? CE_LOGIN_SUCCESS_RCV :
275 			    CE_LOGIN_SUCCESS_SND);
276 		} else {
277 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
278 		}
279 	} else {
280 		new_event = (rx ? CE_LOGIN_FAIL_RCV : CE_LOGIN_FAIL_SND);
281 	}
282 
283 	if (rx) {
284 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
285 	} else {
286 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
287 	}
288 }
289 
290 
291 void
292 idm_parse_logout_req(idm_conn_t *ic, idm_pdu_t *logout_req_pdu, boolean_t rx)
293 {
294 	iscsi_logout_hdr_t 	*logout_req =
295 	    (iscsi_logout_hdr_t *)logout_req_pdu->isp_hdr;
296 	idm_conn_event_t	new_event;
297 	uint8_t			reason =
298 	    (logout_req->flags & ISCSI_FLAG_LOGOUT_REASON_MASK);
299 
300 	/*
301 	 *	For a normal logout (close connection or close session) IDM
302 	 *	will terminate processing of all tasks completing the tasks
303 	 *	back to the client with a status indicating the connection
304 	 *	was logged out.  These tasks do not get completed.
305 	 *
306 	 *	For a "close connection for recovery logout) IDM suspends
307 	 *	processing of all tasks and completes them back to the client
308 	 *	with a status indicating connection was logged out for
309 	 *	recovery.  Both initiator and target hang onto these tasks.
310 	 *	When we add ERL2 support IDM will need to provide mechanisms
311 	 *	to change the task and buffer associations to a new connection.
312 	 *
313 	 *	This code doesn't address the possibility of MC/S.  We'll
314 	 *	need to decide how the separate connections get handled
315 	 *	in that case.  One simple option is to make the client
316 	 *	generate the events for the other connections.
317 	 */
318 	if (reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
319 		new_event =
320 		    (rx ? CE_LOGOUT_SESSION_RCV : CE_LOGOUT_SESSION_SND);
321 	} else if ((reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) ||
322 	    (reason == ISCSI_LOGOUT_REASON_RECOVERY)) {
323 		/* Check logout CID against this connection's CID */
324 		if (ntohs(logout_req->cid) == ic->ic_login_cid) {
325 			/* Logout is for this connection */
326 			new_event = (rx ? CE_LOGOUT_THIS_CONN_RCV :
327 			    CE_LOGOUT_THIS_CONN_SND);
328 		} else {
329 			/*
330 			 * Logout affects another connection.  This is not
331 			 * a relevant event for this connection so we'll
332 			 * just treat it as a normal PDU event.  Client
333 			 * will need to lookup the other connection and
334 			 * generate the event.
335 			 */
336 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
337 		}
338 	} else {
339 		/* Invalid reason code */
340 		new_event = (rx ? CE_RX_PROTOCOL_ERROR : CE_TX_PROTOCOL_ERROR);
341 	}
342 
343 	if (rx) {
344 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
345 	} else {
346 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
347 	}
348 }
349 
350 
351 
352 void
353 idm_parse_logout_rsp(idm_conn_t *ic, idm_pdu_t *logout_rsp_pdu, boolean_t rx)
354 {
355 	idm_conn_event_t	new_event;
356 	iscsi_logout_rsp_hdr_t *logout_rsp =
357 	    (iscsi_logout_rsp_hdr_t *)logout_rsp_pdu->isp_hdr;
358 
359 	if (logout_rsp->response == ISCSI_STATUS_CLASS_SUCCESS) {
360 		new_event = rx ? CE_LOGOUT_SUCCESS_RCV : CE_LOGOUT_SUCCESS_SND;
361 	} else {
362 		new_event = rx ? CE_LOGOUT_FAIL_RCV : CE_LOGOUT_FAIL_SND;
363 	}
364 
365 	if (rx) {
366 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
367 	} else {
368 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
369 	}
370 }
371 
372 /*
373  * idm_svc_conn_create()
374  * Transport-agnostic service connection creation, invoked from the transport
375  * layer.
376  */
377 idm_status_t
378 idm_svc_conn_create(idm_svc_t *is, idm_transport_type_t tt,
379     idm_conn_t **ic_result)
380 {
381 	idm_conn_t	*ic;
382 	idm_status_t	rc;
383 
384 	mutex_enter(&is->is_mutex);
385 	if (!is->is_online) {
386 		mutex_exit(&is->is_mutex);
387 		return (IDM_STATUS_FAIL);
388 	}
389 	mutex_exit(&is->is_mutex);
390 
391 	ic = idm_conn_create_common(CONN_TYPE_TGT, tt,
392 	    &is->is_svc_req.sr_conn_ops);
393 	ic->ic_svc_binding = is;
394 
395 	/*
396 	 * Prepare connection state machine
397 	 */
398 	if ((rc = idm_conn_sm_init(ic)) != 0) {
399 		idm_conn_destroy_common(ic);
400 		return (rc);
401 	}
402 
403 
404 	*ic_result = ic;
405 
406 	mutex_enter(&idm.idm_global_mutex);
407 	list_insert_tail(&idm.idm_tgt_conn_list, ic);
408 	idm.idm_tgt_conn_count++;
409 	mutex_exit(&idm.idm_global_mutex);
410 
411 	return (IDM_STATUS_SUCCESS);
412 }
413 
414 void
415 idm_svc_conn_destroy(idm_conn_t *ic)
416 {
417 	mutex_enter(&idm.idm_global_mutex);
418 	list_remove(&idm.idm_tgt_conn_list, ic);
419 	idm.idm_tgt_conn_count--;
420 	mutex_exit(&idm.idm_global_mutex);
421 
422 	if (ic->ic_transport_private != NULL) {
423 		ic->ic_transport_ops->it_tgt_conn_destroy(ic);
424 	}
425 	idm_conn_destroy_common(ic);
426 }
427 
428 /*
429  * idm_conn_create_common()
430  *
431  * Allocate and initialize IDM connection context
432  */
433 idm_conn_t *
434 idm_conn_create_common(idm_conn_type_t conn_type, idm_transport_type_t tt,
435     idm_conn_ops_t *conn_ops)
436 {
437 	idm_conn_t		*ic;
438 	idm_transport_t		*it;
439 	idm_transport_type_t	type;
440 
441 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
442 		it = &idm_transport_list[type];
443 
444 		if ((it->it_ops != NULL) && (it->it_type == tt))
445 			break;
446 	}
447 	ASSERT(it->it_type == tt);
448 	if (it->it_type != tt)
449 		return (NULL);
450 
451 	ic = kmem_zalloc(sizeof (idm_conn_t), KM_SLEEP);
452 
453 	/* Initialize data */
454 	ic->ic_target_name[0] = '\0';
455 	ic->ic_initiator_name[0] = '\0';
456 	ic->ic_isid[0] = '\0';
457 	ic->ic_tsih[0] = '\0';
458 	ic->ic_conn_type = conn_type;
459 	ic->ic_conn_ops = *conn_ops;
460 	ic->ic_transport_ops = it->it_ops;
461 	ic->ic_transport_type = tt;
462 	ic->ic_transport_private = NULL; /* Set by transport service */
463 	ic->ic_internal_cid = idm_cid_alloc();
464 	if (ic->ic_internal_cid == 0) {
465 		kmem_free(ic, sizeof (idm_conn_t));
466 		return (NULL);
467 	}
468 	mutex_init(&ic->ic_mutex, NULL, MUTEX_DEFAULT, NULL);
469 	cv_init(&ic->ic_cv, NULL, CV_DEFAULT, NULL);
470 	idm_refcnt_init(&ic->ic_refcnt, ic);
471 
472 	return (ic);
473 }
474 
475 void
476 idm_conn_destroy_common(idm_conn_t *ic)
477 {
478 	idm_conn_sm_fini(ic);
479 	idm_refcnt_destroy(&ic->ic_refcnt);
480 	cv_destroy(&ic->ic_cv);
481 	mutex_destroy(&ic->ic_mutex);
482 	idm_cid_free(ic->ic_internal_cid);
483 
484 	kmem_free(ic, sizeof (idm_conn_t));
485 }
486 
487 /*
488  * Invoked from the SM as a result of client's invocation of
489  * idm_ini_conn_connect()
490  */
491 idm_status_t
492 idm_ini_conn_finish(idm_conn_t *ic)
493 {
494 	/* invoke transport-specific connection */
495 	return (ic->ic_transport_ops->it_ini_conn_connect(ic));
496 }
497 
498 idm_status_t
499 idm_tgt_conn_finish(idm_conn_t *ic)
500 {
501 	idm_status_t rc;
502 
503 	rc = idm_notify_client(ic, CN_CONNECT_ACCEPT, NULL);
504 	if (rc != IDM_STATUS_SUCCESS) {
505 		return (IDM_STATUS_REJECT);
506 	}
507 
508 	/* Target client is ready to receive a login, start connection */
509 	return (ic->ic_transport_ops->it_tgt_conn_connect(ic));
510 }
511 
512 idm_transport_t *
513 idm_transport_lookup(idm_conn_req_t *cr)
514 {
515 	idm_transport_type_t	type;
516 	idm_transport_t		*it;
517 	idm_transport_caps_t	caps;
518 
519 	/*
520 	 * Make sure all available transports are setup.  We call this now
521 	 * instead of at initialization time in case IB has become available
522 	 * since we started (hotplug, etc).
523 	 */
524 	idm_transport_setup(cr->cr_li);
525 
526 	/* Determine the transport for this connection */
527 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
528 		it = &idm_transport_list[type];
529 
530 		if (it->it_ops == NULL) {
531 			/* transport is not registered */
532 			continue;
533 		}
534 
535 		if (it->it_ops->it_conn_is_capable(cr, &caps)) {
536 			return (it);
537 		}
538 	}
539 
540 	ASSERT(0);
541 	return (NULL); /* Make gcc happy */
542 }
543 
544 void
545 idm_transport_setup(ldi_ident_t li)
546 {
547 	idm_transport_type_t	type;
548 	idm_transport_t		*it;
549 	int			rc;
550 
551 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
552 		it = &idm_transport_list[type];
553 		/*
554 		 * We may want to store the LDI handle in the idm_svc_t
555 		 * and then allow multiple calls to ldi_open_by_name.  This
556 		 * would enable the LDI code to track who has the device open
557 		 * which could be useful in the case where we have multiple
558 		 * services and perhaps also have initiator and target opening
559 		 * the transport simultaneously.  For now we stick with the
560 		 * plan.
561 		 */
562 		if (it->it_ops == NULL) {
563 			/* transport is not ready, try to initialize it */
564 			if (it->it_type == IDM_TRANSPORT_TYPE_SOCKETS) {
565 				idm_so_init(it);
566 			} else {
567 				rc = ldi_open_by_name(it->it_device_path,
568 				    FREAD | FWRITE, kcred, &it->it_ldi_hdl, li);
569 				/*
570 				 * If the open is successful we will have
571 				 * filled in the LDI handle in the transport
572 				 * table and we expect that the transport
573 				 * registered itself.
574 				 */
575 				if (rc != 0) {
576 					it->it_ldi_hdl = NULL;
577 				}
578 			}
579 		}
580 	}
581 }
582 
583 void
584 idm_transport_teardown()
585 {
586 	idm_transport_type_t	type;
587 	idm_transport_t		*it;
588 
589 	ASSERT(mutex_owned(&idm.idm_global_mutex));
590 
591 	/* Caller holds the IDM global mutex */
592 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
593 		it = &idm_transport_list[type];
594 		/* If we have an open LDI handle on this driver, close it */
595 		if (it->it_ldi_hdl != NULL) {
596 			(void) ldi_close(it->it_ldi_hdl, FNDELAY, kcred);
597 			it->it_ldi_hdl = NULL;
598 		}
599 	}
600 }
601 
602 /*
603  * ID pool code.  We use this to generate unique structure identifiers without
604  * searching the existing structures.  This avoids the need to lock entire
605  * sets of structures at inopportune times.  Adapted from the CIFS server code.
606  *
607  *    A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap.
608  *    A bit set to '1' indicates that that particular value has been allocated.
609  *    The allocation process is done shifting a bit through the whole bitmap.
610  *    The current position of that index bit is kept in the idm_idpool_t
611  *    structure and represented by a byte index (0 to buffer size minus 1) and
612  *    a bit index (0 to 7).
613  *
614  *    The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs
615  *    out of IDs its current size is doubled until it reaches its maximum size
616  *    (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which
617  *    means that a pool can have a maximum number of 65534 IDs available.
618  */
619 
620 static int
621 idm_idpool_increment(
622     idm_idpool_t	*pool)
623 {
624 	uint8_t		*new_pool;
625 	uint32_t	new_size;
626 
627 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
628 
629 	new_size = pool->id_size * 2;
630 	if (new_size <= IDM_IDPOOL_MAX_SIZE) {
631 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
632 		if (new_pool) {
633 			bzero(new_pool, new_size / 8);
634 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
635 			kmem_free(pool->id_pool, pool->id_size / 8);
636 			pool->id_pool = new_pool;
637 			pool->id_free_counter += new_size - pool->id_size;
638 			pool->id_max_free_counter += new_size - pool->id_size;
639 			pool->id_size = new_size;
640 			pool->id_idx_msk = (new_size / 8) - 1;
641 			if (new_size >= IDM_IDPOOL_MAX_SIZE) {
642 				/* id -1 made unavailable */
643 				pool->id_pool[pool->id_idx_msk] = 0x80;
644 				pool->id_free_counter--;
645 				pool->id_max_free_counter--;
646 			}
647 			return (0);
648 		}
649 	}
650 	return (-1);
651 }
652 
653 /*
654  * idm_idpool_constructor
655  *
656  * This function initializes the pool structure provided.
657  */
658 
659 int
660 idm_idpool_create(idm_idpool_t *pool)
661 {
662 
663 	ASSERT(pool->id_magic != IDM_IDPOOL_MAGIC);
664 
665 	pool->id_size = IDM_IDPOOL_MIN_SIZE;
666 	pool->id_idx_msk = (IDM_IDPOOL_MIN_SIZE / 8) - 1;
667 	pool->id_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
668 	pool->id_max_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
669 	pool->id_bit = 0x02;
670 	pool->id_bit_idx = 1;
671 	pool->id_idx = 0;
672 	pool->id_pool = (uint8_t *)kmem_alloc((IDM_IDPOOL_MIN_SIZE / 8),
673 	    KM_SLEEP);
674 	bzero(pool->id_pool, (IDM_IDPOOL_MIN_SIZE / 8));
675 	/* -1 id made unavailable */
676 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
677 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
678 	pool->id_magic = IDM_IDPOOL_MAGIC;
679 	return (0);
680 }
681 
682 /*
683  * idm_idpool_destructor
684  *
685  * This function tears down and frees the resources associated with the
686  * pool provided.
687  */
688 
689 void
690 idm_idpool_destroy(idm_idpool_t *pool)
691 {
692 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
693 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
694 	pool->id_magic = (uint32_t)~IDM_IDPOOL_MAGIC;
695 	mutex_destroy(&pool->id_mutex);
696 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
697 }
698 
699 /*
700  * idm_idpool_alloc
701  *
702  * This function allocates an ID from the pool provided.
703  */
704 int
705 idm_idpool_alloc(idm_idpool_t *pool, uint16_t *id)
706 {
707 	uint32_t	i;
708 	uint8_t		bit;
709 	uint8_t		bit_idx;
710 	uint8_t		byte;
711 
712 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
713 
714 	mutex_enter(&pool->id_mutex);
715 	if ((pool->id_free_counter == 0) && idm_idpool_increment(pool)) {
716 		mutex_exit(&pool->id_mutex);
717 		return (-1);
718 	}
719 
720 	i = pool->id_size;
721 	while (i) {
722 		bit = pool->id_bit;
723 		bit_idx = pool->id_bit_idx;
724 		byte = pool->id_pool[pool->id_idx];
725 		while (bit) {
726 			if (byte & bit) {
727 				bit = bit << 1;
728 				bit_idx++;
729 				continue;
730 			}
731 			pool->id_pool[pool->id_idx] |= bit;
732 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
733 			pool->id_free_counter--;
734 			pool->id_bit = bit;
735 			pool->id_bit_idx = bit_idx;
736 			mutex_exit(&pool->id_mutex);
737 			return (0);
738 		}
739 		pool->id_bit = 1;
740 		pool->id_bit_idx = 0;
741 		pool->id_idx++;
742 		pool->id_idx &= pool->id_idx_msk;
743 		--i;
744 	}
745 	/*
746 	 * This section of code shouldn't be reached. If there are IDs
747 	 * available and none could be found there's a problem.
748 	 */
749 	ASSERT(0);
750 	mutex_exit(&pool->id_mutex);
751 	return (-1);
752 }
753 
754 /*
755  * idm_idpool_free
756  *
757  * This function frees the ID provided.
758  */
759 void
760 idm_idpool_free(idm_idpool_t *pool, uint16_t id)
761 {
762 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
763 	ASSERT(id != 0);
764 	ASSERT(id != 0xFFFF);
765 
766 	mutex_enter(&pool->id_mutex);
767 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
768 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
769 		pool->id_free_counter++;
770 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
771 		mutex_exit(&pool->id_mutex);
772 		return;
773 	}
774 	/* Freeing a free ID. */
775 	ASSERT(0);
776 	mutex_exit(&pool->id_mutex);
777 }
778 
779 uint32_t
780 idm_cid_alloc(void)
781 {
782 	/*
783 	 * ID pool works with 16-bit identifiers right now.  That should
784 	 * be plenty since we will probably never have more than 2^16
785 	 * connections simultaneously.
786 	 */
787 	uint16_t cid16;
788 
789 	if (idm_idpool_alloc(&idm.idm_conn_id_pool, &cid16) == -1) {
790 		return (0); /* Fail */
791 	}
792 
793 	return ((uint32_t)cid16);
794 }
795 
796 void
797 idm_cid_free(uint32_t cid)
798 {
799 	idm_idpool_free(&idm.idm_conn_id_pool, (uint16_t)cid);
800 }
801 
802 
803 /*
804  * Code for generating the header and data digests
805  *
806  * This is the CRC-32C table
807  * Generated with:
808  * width = 32 bits
809  * poly = 0x1EDC6F41
810  * reflect input bytes = true
811  * reflect output bytes = true
812  */
813 
814 uint32_t idm_crc32c_table[256] =
815 {
816 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
817 	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
818 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
819 	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
820 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
821 	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
822 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
823 	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
824 	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
825 	0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
826 	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
827 	0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
828 	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
829 	0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
830 	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
831 	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
832 	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
833 	0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
834 	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
835 	0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
836 	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
837 	0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
838 	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
839 	0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
840 	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
841 	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
842 	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
843 	0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
844 	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
845 	0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
846 	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
847 	0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
848 	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
849 	0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
850 	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
851 	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
852 	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
853 	0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
854 	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
855 	0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
856 	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
857 	0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
858 	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
859 	0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
860 	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
861 	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
862 	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
863 	0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
864 	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
865 	0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
866 	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
867 	0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
868 	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
869 	0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
870 	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
871 	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
872 	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
873 	0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
874 	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
875 	0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
876 	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
877 	0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
878 	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
879 	0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
880 };
881 
882 /*
883  * iscsi_crc32c - Steps through buffer one byte at at time, calculates
884  * reflected crc using table.
885  */
886 uint32_t
887 idm_crc32c(void *address, unsigned long length)
888 {
889 	uint8_t *buffer = address;
890 	uint32_t crc = 0xffffffff, result;
891 #ifdef _BIG_ENDIAN
892 	uint8_t byte0, byte1, byte2, byte3;
893 #endif
894 
895 	ASSERT(address != NULL);
896 
897 	while (length--) {
898 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
899 		    (crc >> 8);
900 	}
901 	result = crc ^ 0xffffffff;
902 
903 #ifdef	_BIG_ENDIAN
904 	byte0 = (uint8_t)(result & 0xFF);
905 	byte1 = (uint8_t)((result >> 8) & 0xFF);
906 	byte2 = (uint8_t)((result >> 16) & 0xFF);
907 	byte3 = (uint8_t)((result >> 24) & 0xFF);
908 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
909 #endif	/* _BIG_ENDIAN */
910 
911 	return (result);
912 }
913 
914 
915 /*
916  * idm_crc32c_continued - Continues stepping through buffer one
917  * byte at at time, calculates reflected crc using table.
918  */
919 uint32_t
920 idm_crc32c_continued(void *address, unsigned long length, uint32_t crc)
921 {
922 	uint8_t *buffer = address;
923 	uint32_t result;
924 #ifdef	_BIG_ENDIAN
925 	uint8_t byte0, byte1, byte2, byte3;
926 #endif
927 
928 	ASSERT(address != NULL);
929 
930 #ifdef	_BIG_ENDIAN
931 	byte0 = (uint8_t)((crc >> 24) & 0xFF);
932 	byte1 = (uint8_t)((crc >> 16) & 0xFF);
933 	byte2 = (uint8_t)((crc >> 8) & 0xFF);
934 	byte3 = (uint8_t)(crc & 0xFF);
935 	crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0);
936 #endif
937 
938 	crc = crc ^ 0xffffffff;
939 	while (length--) {
940 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
941 		    (crc >> 8);
942 	}
943 	result = crc ^ 0xffffffff;
944 
945 #ifdef	_BIG_ENDIAN
946 	byte0 = (uint8_t)(result & 0xFF);
947 	byte1 = (uint8_t)((result >> 8) & 0xFF);
948 	byte2 = (uint8_t)((result >> 16) & 0xFF);
949 	byte3 = (uint8_t)((result >> 24) & 0xFF);
950 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
951 #endif
952 	return (result);
953 }
954 
955 /* ARGSUSED */
956 int
957 idm_task_constructor(void *hdl, void *arg, int flags)
958 {
959 	idm_task_t *idt = (idm_task_t *)hdl;
960 	uint32_t next_task;
961 
962 	mutex_init(&idt->idt_mutex, NULL, MUTEX_DEFAULT, NULL);
963 
964 	/* Find the next free task ID */
965 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
966 	next_task = idm.idm_taskid_next;
967 	while (idm.idm_taskid_table[next_task]) {
968 		next_task++;
969 		if (next_task == idm.idm_taskid_max)
970 			next_task = 0;
971 		if (next_task == idm.idm_taskid_next) {
972 			rw_exit(&idm.idm_taskid_table_lock);
973 			return (-1);
974 		}
975 	}
976 
977 	idm.idm_taskid_table[next_task] = idt;
978 	idm.idm_taskid_next = (next_task + 1) % idm.idm_taskid_max;
979 	rw_exit(&idm.idm_taskid_table_lock);
980 
981 	idt->idt_tt = next_task;
982 
983 	list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
984 	    offsetof(idm_buf_t, idb_buflink));
985 	list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
986 	    offsetof(idm_buf_t, idb_buflink));
987 	idm_refcnt_init(&idt->idt_refcnt, idt);
988 
989 	/*
990 	 * Set the transport header pointer explicitly.  This removes the
991 	 * need for per-transport header allocation, which simplifies cache
992 	 * init considerably.  If at a later date we have an additional IDM
993 	 * transport that requires a different size, we'll revisit this.
994 	 */
995 	idt->idt_transport_hdr = (void *)(idt + 1); /* pointer arithmetic */
996 
997 	return (0);
998 }
999 
1000 /* ARGSUSED */
1001 void
1002 idm_task_destructor(void *hdl, void *arg)
1003 {
1004 	idm_task_t *idt = (idm_task_t *)hdl;
1005 
1006 	/* Remove the task from the ID table */
1007 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1008 	idm.idm_taskid_table[idt->idt_tt] = NULL;
1009 	rw_exit(&idm.idm_taskid_table_lock);
1010 
1011 	/* free the inbuf and outbuf */
1012 	idm_refcnt_destroy(&idt->idt_refcnt);
1013 	list_destroy(&idt->idt_inbufv);
1014 	list_destroy(&idt->idt_outbufv);
1015 
1016 	/*
1017 	 * The final call to idm_task_rele may happen with the task
1018 	 * mutex held which may invoke this destructor immediately.
1019 	 * Stall here until the task mutex owner lets go.
1020 	 */
1021 	mutex_enter(&idt->idt_mutex);
1022 	mutex_destroy(&idt->idt_mutex);
1023 }
1024 
1025 /*
1026  * idm_listbuf_insert searches from the back of the list looking for the
1027  * insertion point.
1028  */
1029 void
1030 idm_listbuf_insert(list_t *lst, idm_buf_t *buf)
1031 {
1032 	idm_buf_t	*idb;
1033 
1034 	/* iterate through the list to find the insertion point */
1035 	for (idb = list_tail(lst); idb != NULL; idb = list_prev(lst, idb)) {
1036 
1037 		if (idb->idb_bufoffset < buf->idb_bufoffset) {
1038 
1039 			list_insert_after(lst, idb, buf);
1040 			return;
1041 		}
1042 	}
1043 
1044 	/* add the buf to the head of the list */
1045 	list_insert_head(lst, buf);
1046 
1047 }
1048 
1049 /*ARGSUSED*/
1050 void
1051 idm_wd_thread(void *arg)
1052 {
1053 	idm_conn_t	*ic;
1054 	clock_t		wake_time;
1055 	clock_t		idle_time;
1056 
1057 	/* Record the thread id for thread_join() */
1058 	idm.idm_wd_thread_did = curthread->t_did;
1059 	mutex_enter(&idm.idm_global_mutex);
1060 	idm.idm_wd_thread_running = B_TRUE;
1061 	cv_signal(&idm.idm_wd_cv);
1062 
1063 	while (idm.idm_wd_thread_running) {
1064 		for (ic = list_head(&idm.idm_tgt_conn_list);
1065 		    ic != NULL;
1066 		    ic = list_next(&idm.idm_tgt_conn_list, ic)) {
1067 			idle_time = ddi_get_lbolt() - ic->ic_timestamp;
1068 
1069 			/*
1070 			 * If this connection is in FFP then grab a hold
1071 			 * and check the various timeout thresholds.  Otherwise
1072 			 * the connection is closing and we should just
1073 			 * move on to the next one.
1074 			 */
1075 			mutex_enter(&ic->ic_state_mutex);
1076 			if (ic->ic_ffp) {
1077 				idm_conn_hold(ic);
1078 			} else {
1079 				mutex_exit(&ic->ic_state_mutex);
1080 				continue;
1081 			}
1082 
1083 			/*
1084 			 * If there hasn't been any activity on this
1085 			 * connection for the keepalive timeout period
1086 			 * and if the client has provided a keepalive
1087 			 * callback then call the keepalive callback.
1088 			 * This allows the client to take action to keep
1089 			 * the link alive (like send a nop PDU).
1090 			 */
1091 			if ((TICK_TO_SEC(idle_time) >=
1092 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT) &&
1093 			    !ic->ic_keepalive) {
1094 				ic->ic_keepalive = B_TRUE;
1095 				if (ic->ic_conn_ops.icb_keepalive) {
1096 					mutex_exit(&ic->ic_state_mutex);
1097 					mutex_exit(&idm.idm_global_mutex);
1098 					(*ic->ic_conn_ops.icb_keepalive)(ic);
1099 					mutex_enter(&idm.idm_global_mutex);
1100 					mutex_enter(&ic->ic_state_mutex);
1101 				}
1102 			} else if ((TICK_TO_SEC(idle_time) <
1103 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT)) {
1104 				/* Reset keepalive */
1105 				ic->ic_keepalive = B_FALSE;
1106 			}
1107 
1108 			/*
1109 			 * If there hasn't been any activity on this
1110 			 * connection for the failure timeout period then
1111 			 * drop the connection.  We expect the initiator
1112 			 * to keep the connection alive if it wants the
1113 			 * connection to stay open.
1114 			 *
1115 			 * If it turns out to be desireable to take a
1116 			 * more active role in maintaining the connect
1117 			 * we could add a client callback to send
1118 			 * a "keepalive" kind of message (no doubt a nop)
1119 			 * and fire that on a shorter timer.
1120 			 */
1121 			if (TICK_TO_SEC(idle_time) >
1122 			    IDM_TRANSPORT_FAIL_IDLE_TIMEOUT) {
1123 				mutex_exit(&ic->ic_state_mutex);
1124 				mutex_exit(&idm.idm_global_mutex);
1125 				IDM_SM_LOG(CE_WARN, "idm_wd_thread: "
1126 				    "conn %p idle for %d seconds, "
1127 				    "sending CE_TRANSPORT_FAIL",
1128 				    (void *)ic, (int)idle_time);
1129 				idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
1130 				mutex_enter(&idm.idm_global_mutex);
1131 				mutex_enter(&ic->ic_state_mutex);
1132 			}
1133 
1134 			idm_conn_rele(ic);
1135 
1136 			mutex_exit(&ic->ic_state_mutex);
1137 		}
1138 
1139 		wake_time = ddi_get_lbolt() + SEC_TO_TICK(IDM_WD_INTERVAL);
1140 		(void) cv_timedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
1141 		    wake_time);
1142 	}
1143 	mutex_exit(&idm.idm_global_mutex);
1144 
1145 	thread_exit();
1146 }
1147