xref: /titanic_50/usr/src/uts/common/io/idm/idm_impl.c (revision 5d0e1406420f52cc4d3d0543044034c4894b5865)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/cpuvar.h>
31 #include <sys/sdt.h>
32 
33 #include <sys/socket.h>
34 #include <sys/strsubr.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysmacros.h>
37 
38 #include <sys/idm/idm.h>
39 #include <sys/idm/idm_so.h>
40 #include <hd_crc.h>
41 
42 extern idm_transport_t  idm_transport_list[];
43 /*
44  * -1 - uninitialized
45  * 0  - applicable
46  * others - NA
47  */
48 static int iscsi_crc32_hd = -1;
49 
50 void
51 idm_pdu_rx(idm_conn_t *ic, idm_pdu_t *pdu)
52 {
53 	iscsi_async_evt_hdr_t *async_evt;
54 
55 	/*
56 	 * If we are in full-featured mode then route SCSI-related
57 	 * commands to the appropriate function vector
58 	 */
59 	ic->ic_timestamp = ddi_get_lbolt();
60 	mutex_enter(&ic->ic_state_mutex);
61 	if (ic->ic_ffp && ic->ic_pdu_events == 0) {
62 		mutex_exit(&ic->ic_state_mutex);
63 
64 		if (idm_pdu_rx_forward_ffp(ic, pdu) == B_TRUE) {
65 			/* Forwarded SCSI-related commands */
66 			return;
67 		}
68 		mutex_enter(&ic->ic_state_mutex);
69 	}
70 
71 	/*
72 	 * If we get here with a SCSI-related PDU then we are not in
73 	 * full-feature mode and the PDU is a protocol error (SCSI command
74 	 * PDU's may sometimes be an exception, see below).  All
75 	 * non-SCSI PDU's get treated them the same regardless of whether
76 	 * we are in full-feature mode.
77 	 *
78 	 * Look at the opcode and in some cases the PDU status and
79 	 * determine the appropriate event to send to the connection
80 	 * state machine.  Generate the event, passing the PDU as data.
81 	 * If the current connection state allows reception of the event
82 	 * the PDU will be submitted to the IDM client for processing,
83 	 * otherwise the PDU will be dropped.
84 	 */
85 	switch (IDM_PDU_OPCODE(pdu)) {
86 	case ISCSI_OP_LOGIN_CMD:
87 		DTRACE_ISCSI_2(login__command, idm_conn_t *, ic,
88 		    iscsi_login_hdr_t *, (iscsi_login_hdr_t *)pdu->isp_hdr);
89 		idm_conn_rx_pdu_event(ic, CE_LOGIN_RCV, (uintptr_t)pdu);
90 		break;
91 	case ISCSI_OP_LOGIN_RSP:
92 		idm_parse_login_rsp(ic, pdu, /* RX */ B_TRUE);
93 		break;
94 	case ISCSI_OP_LOGOUT_CMD:
95 		DTRACE_ISCSI_2(logout__command, idm_conn_t *, ic,
96 		    iscsi_logout_hdr_t *,
97 		    (iscsi_logout_hdr_t *)pdu->isp_hdr);
98 		idm_parse_logout_req(ic, pdu, /* RX */ B_TRUE);
99 		break;
100 	case ISCSI_OP_LOGOUT_RSP:
101 		idm_parse_logout_rsp(ic, pdu, /* RX */ B_TRUE);
102 		break;
103 	case ISCSI_OP_ASYNC_EVENT:
104 		async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
105 		switch (async_evt->async_event) {
106 		case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
107 			idm_conn_rx_pdu_event(ic, CE_ASYNC_LOGOUT_RCV,
108 			    (uintptr_t)pdu);
109 			break;
110 		case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
111 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_CONN_RCV,
112 			    (uintptr_t)pdu);
113 			break;
114 		case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
115 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_RCV,
116 			    (uintptr_t)pdu);
117 			break;
118 		case ISCSI_ASYNC_EVENT_SCSI_EVENT:
119 		case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
120 		default:
121 			idm_conn_rx_pdu_event(ic, CE_MISC_RX,
122 			    (uintptr_t)pdu);
123 			break;
124 		}
125 		break;
126 	case ISCSI_OP_SCSI_CMD:
127 		/*
128 		 * Consider this scenario:  We are a target connection
129 		 * in "in login" state and a "login success sent" event has
130 		 * been generated but not yet handled.  Since we've sent
131 		 * the login response but we haven't actually transitioned
132 		 * to FFP mode we might conceivably receive a SCSI command
133 		 * from the initiator before we are ready.  We are actually
134 		 * in FFP we just don't know it yet -- to address this we
135 		 * can generate an event corresponding to the SCSI command.
136 		 * At the point when the event is handled by the state
137 		 * machine the login request will have been handled and we
138 		 * should be in FFP.  If we are not in FFP by that time
139 		 * we can reject the SCSI command with a protocol error.
140 		 *
141 		 * This scenario only applies to the target.
142 		 *
143 		 * Handle dtrace probe in iscsit so we can find all the
144 		 * pieces of the CDB
145 		 */
146 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
147 		break;
148 	case ISCSI_OP_SCSI_DATA:
149 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
150 		    iscsi_data_hdr_t *,
151 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
152 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
153 		break;
154 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
155 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
156 		    iscsi_scsi_task_mgt_hdr_t *,
157 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
158 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
159 		break;
160 	case ISCSI_OP_NOOP_OUT:
161 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
162 		    iscsi_nop_out_hdr_t *,
163 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
164 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
165 		break;
166 	case ISCSI_OP_TEXT_CMD:
167 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
168 		    iscsi_text_hdr_t *,
169 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
170 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
171 		break;
172 	/* Initiator PDU's */
173 	case ISCSI_OP_SCSI_DATA_RSP:
174 	case ISCSI_OP_RTT_RSP:
175 	case ISCSI_OP_SNACK_CMD:
176 	case ISCSI_OP_NOOP_IN:
177 	case ISCSI_OP_TEXT_RSP:
178 	case ISCSI_OP_REJECT_MSG:
179 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
180 		/* Validate received PDU against current state */
181 		idm_conn_rx_pdu_event(ic, CE_MISC_RX,
182 		    (uintptr_t)pdu);
183 		break;
184 	}
185 	mutex_exit(&ic->ic_state_mutex);
186 }
187 
188 void
189 idm_pdu_tx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
190 {
191 	(*ic->ic_transport_ops->it_tx_pdu)(ic, pdu);
192 }
193 
194 boolean_t
195 idm_pdu_rx_forward_ffp(idm_conn_t *ic, idm_pdu_t *pdu)
196 {
197 	/*
198 	 * If this is an FFP request, call the appropriate handler
199 	 * and return B_TRUE, otherwise return B_FALSE.
200 	 */
201 	switch (IDM_PDU_OPCODE(pdu)) {
202 	case ISCSI_OP_SCSI_CMD:
203 		(*ic->ic_conn_ops.icb_rx_scsi_cmd)(ic, pdu);
204 		return (B_TRUE);
205 	case ISCSI_OP_SCSI_DATA:
206 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
207 		    iscsi_data_hdr_t *,
208 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
209 		(*ic->ic_transport_ops->it_rx_dataout)(ic, pdu);
210 		return (B_TRUE);
211 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
212 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
213 		    iscsi_scsi_task_mgt_hdr_t *,
214 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
215 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
216 		return (B_TRUE);
217 	case ISCSI_OP_NOOP_OUT:
218 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
219 		    iscsi_nop_out_hdr_t *,
220 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
221 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
222 		return (B_TRUE);
223 	case ISCSI_OP_TEXT_CMD:
224 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
225 		    iscsi_text_hdr_t *,
226 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
227 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
228 		return (B_TRUE);
229 		/* Initiator only */
230 	case ISCSI_OP_SCSI_RSP:
231 		(*ic->ic_conn_ops.icb_rx_scsi_rsp)(ic, pdu);
232 		return (B_TRUE);
233 	case ISCSI_OP_SCSI_DATA_RSP:
234 		(*ic->ic_transport_ops->it_rx_datain)(ic, pdu);
235 		return (B_TRUE);
236 	case ISCSI_OP_RTT_RSP:
237 		(*ic->ic_transport_ops->it_rx_rtt)(ic, pdu);
238 		return (B_TRUE);
239 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
240 	case ISCSI_OP_TEXT_RSP:
241 	case ISCSI_OP_NOOP_IN:
242 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
243 		return (B_TRUE);
244 	default:
245 		return (B_FALSE);
246 	}
247 	/*NOTREACHED*/
248 }
249 
250 void
251 idm_pdu_rx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
252 {
253 	/*
254 	 * Some PDU's specific to FFP get special handling.  This function
255 	 * will normally never be called in FFP with an FFP PDU since this
256 	 * is a slow path but in can happen on the target side during
257 	 * the transition to FFP.  We primarily call
258 	 * idm_pdu_rx_forward_ffp here to avoid code duplication.
259 	 */
260 	if (idm_pdu_rx_forward_ffp(ic, pdu) == B_FALSE) {
261 		/*
262 		 * Non-FFP PDU, use generic RC handler
263 		 */
264 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
265 	}
266 }
267 
268 void
269 idm_parse_login_rsp(idm_conn_t *ic, idm_pdu_t *login_rsp_pdu, boolean_t rx)
270 {
271 	iscsi_login_rsp_hdr_t	*login_rsp =
272 	    (iscsi_login_rsp_hdr_t *)login_rsp_pdu->isp_hdr;
273 	idm_conn_event_t	new_event;
274 
275 	if (login_rsp->status_class == ISCSI_STATUS_CLASS_SUCCESS) {
276 		if (!(login_rsp->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
277 		    (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
278 		    (ISCSI_LOGIN_NEXT_STAGE(login_rsp->flags) ==
279 		    ISCSI_FULL_FEATURE_PHASE)) {
280 			new_event = (rx ? CE_LOGIN_SUCCESS_RCV :
281 			    CE_LOGIN_SUCCESS_SND);
282 		} else {
283 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
284 		}
285 	} else {
286 		new_event = (rx ? CE_LOGIN_FAIL_RCV : CE_LOGIN_FAIL_SND);
287 	}
288 
289 	if (rx) {
290 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
291 	} else {
292 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
293 	}
294 }
295 
296 
297 void
298 idm_parse_logout_req(idm_conn_t *ic, idm_pdu_t *logout_req_pdu, boolean_t rx)
299 {
300 	iscsi_logout_hdr_t 	*logout_req =
301 	    (iscsi_logout_hdr_t *)logout_req_pdu->isp_hdr;
302 	idm_conn_event_t	new_event;
303 	uint8_t			reason =
304 	    (logout_req->flags & ISCSI_FLAG_LOGOUT_REASON_MASK);
305 
306 	/*
307 	 *	For a normal logout (close connection or close session) IDM
308 	 *	will terminate processing of all tasks completing the tasks
309 	 *	back to the client with a status indicating the connection
310 	 *	was logged out.  These tasks do not get completed.
311 	 *
312 	 *	For a "close connection for recovery logout) IDM suspends
313 	 *	processing of all tasks and completes them back to the client
314 	 *	with a status indicating connection was logged out for
315 	 *	recovery.  Both initiator and target hang onto these tasks.
316 	 *	When we add ERL2 support IDM will need to provide mechanisms
317 	 *	to change the task and buffer associations to a new connection.
318 	 *
319 	 *	This code doesn't address the possibility of MC/S.  We'll
320 	 *	need to decide how the separate connections get handled
321 	 *	in that case.  One simple option is to make the client
322 	 *	generate the events for the other connections.
323 	 */
324 	if (reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
325 		new_event =
326 		    (rx ? CE_LOGOUT_SESSION_RCV : CE_LOGOUT_SESSION_SND);
327 	} else if ((reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) ||
328 	    (reason == ISCSI_LOGOUT_REASON_RECOVERY)) {
329 		/* Check logout CID against this connection's CID */
330 		if (ntohs(logout_req->cid) == ic->ic_login_cid) {
331 			/* Logout is for this connection */
332 			new_event = (rx ? CE_LOGOUT_THIS_CONN_RCV :
333 			    CE_LOGOUT_THIS_CONN_SND);
334 		} else {
335 			/*
336 			 * Logout affects another connection.  This is not
337 			 * a relevant event for this connection so we'll
338 			 * just treat it as a normal PDU event.  Client
339 			 * will need to lookup the other connection and
340 			 * generate the event.
341 			 */
342 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
343 		}
344 	} else {
345 		/* Invalid reason code */
346 		new_event = (rx ? CE_RX_PROTOCOL_ERROR : CE_TX_PROTOCOL_ERROR);
347 	}
348 
349 	if (rx) {
350 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
351 	} else {
352 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
353 	}
354 }
355 
356 
357 
358 void
359 idm_parse_logout_rsp(idm_conn_t *ic, idm_pdu_t *logout_rsp_pdu, boolean_t rx)
360 {
361 	idm_conn_event_t	new_event;
362 	iscsi_logout_rsp_hdr_t *logout_rsp =
363 	    (iscsi_logout_rsp_hdr_t *)logout_rsp_pdu->isp_hdr;
364 
365 	if (logout_rsp->response == ISCSI_STATUS_CLASS_SUCCESS) {
366 		new_event = rx ? CE_LOGOUT_SUCCESS_RCV : CE_LOGOUT_SUCCESS_SND;
367 	} else {
368 		new_event = rx ? CE_LOGOUT_FAIL_RCV : CE_LOGOUT_FAIL_SND;
369 	}
370 
371 	if (rx) {
372 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
373 	} else {
374 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
375 	}
376 }
377 
378 /*
379  * idm_svc_conn_create()
380  * Transport-agnostic service connection creation, invoked from the transport
381  * layer.
382  */
383 idm_status_t
384 idm_svc_conn_create(idm_svc_t *is, idm_transport_type_t tt,
385     idm_conn_t **ic_result)
386 {
387 	idm_conn_t	*ic;
388 	idm_status_t	rc;
389 
390 	/*
391 	 * Skip some work if we can already tell we are going offline.
392 	 * Otherwise we will destroy this connection later as part of
393 	 * shutting down the svc.
394 	 */
395 	mutex_enter(&is->is_mutex);
396 	if (!is->is_online) {
397 		mutex_exit(&is->is_mutex);
398 		return (IDM_STATUS_FAIL);
399 	}
400 	mutex_exit(&is->is_mutex);
401 
402 	ic = idm_conn_create_common(CONN_TYPE_TGT, tt,
403 	    &is->is_svc_req.sr_conn_ops);
404 	if (ic == NULL) {
405 		return (IDM_STATUS_FAIL);
406 	}
407 	ic->ic_svc_binding = is;
408 
409 	/*
410 	 * Prepare connection state machine
411 	 */
412 	if ((rc = idm_conn_sm_init(ic)) != 0) {
413 		idm_conn_destroy_common(ic);
414 		return (rc);
415 	}
416 
417 
418 	*ic_result = ic;
419 
420 	mutex_enter(&idm.idm_global_mutex);
421 	list_insert_tail(&idm.idm_tgt_conn_list, ic);
422 	idm.idm_tgt_conn_count++;
423 	mutex_exit(&idm.idm_global_mutex);
424 
425 	return (IDM_STATUS_SUCCESS);
426 }
427 
428 void
429 idm_svc_conn_destroy(idm_conn_t *ic)
430 {
431 	mutex_enter(&idm.idm_global_mutex);
432 	list_remove(&idm.idm_tgt_conn_list, ic);
433 	idm.idm_tgt_conn_count--;
434 	mutex_exit(&idm.idm_global_mutex);
435 
436 	if (ic->ic_transport_private != NULL) {
437 		ic->ic_transport_ops->it_tgt_conn_destroy(ic);
438 	}
439 	idm_conn_destroy_common(ic);
440 }
441 
442 /*
443  * idm_conn_create_common()
444  *
445  * Allocate and initialize IDM connection context
446  */
447 idm_conn_t *
448 idm_conn_create_common(idm_conn_type_t conn_type, idm_transport_type_t tt,
449     idm_conn_ops_t *conn_ops)
450 {
451 	idm_conn_t		*ic;
452 	idm_transport_t		*it;
453 	idm_transport_type_t	type;
454 
455 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
456 		it = &idm_transport_list[type];
457 
458 		if ((it->it_ops != NULL) && (it->it_type == tt))
459 			break;
460 	}
461 	ASSERT(it->it_type == tt);
462 	if (it->it_type != tt)
463 		return (NULL);
464 
465 	ic = kmem_zalloc(sizeof (idm_conn_t), KM_SLEEP);
466 
467 	/* Initialize data */
468 	ic->ic_target_name[0] = '\0';
469 	ic->ic_initiator_name[0] = '\0';
470 	ic->ic_isid[0] = '\0';
471 	ic->ic_tsih[0] = '\0';
472 	ic->ic_conn_type = conn_type;
473 	ic->ic_conn_ops = *conn_ops;
474 	ic->ic_transport_ops = it->it_ops;
475 	ic->ic_transport_type = tt;
476 	ic->ic_transport_private = NULL; /* Set by transport service */
477 	ic->ic_internal_cid = idm_cid_alloc();
478 	if (ic->ic_internal_cid == 0) {
479 		kmem_free(ic, sizeof (idm_conn_t));
480 		return (NULL);
481 	}
482 	mutex_init(&ic->ic_mutex, NULL, MUTEX_DEFAULT, NULL);
483 	cv_init(&ic->ic_cv, NULL, CV_DEFAULT, NULL);
484 	idm_refcnt_init(&ic->ic_refcnt, ic);
485 
486 	return (ic);
487 }
488 
489 void
490 idm_conn_destroy_common(idm_conn_t *ic)
491 {
492 	idm_conn_sm_fini(ic);
493 	idm_refcnt_destroy(&ic->ic_refcnt);
494 	cv_destroy(&ic->ic_cv);
495 	mutex_destroy(&ic->ic_mutex);
496 	idm_cid_free(ic->ic_internal_cid);
497 
498 	kmem_free(ic, sizeof (idm_conn_t));
499 }
500 
501 /*
502  * Invoked from the SM as a result of client's invocation of
503  * idm_ini_conn_connect()
504  */
505 idm_status_t
506 idm_ini_conn_finish(idm_conn_t *ic)
507 {
508 	/* invoke transport-specific connection */
509 	return (ic->ic_transport_ops->it_ini_conn_connect(ic));
510 }
511 
512 idm_status_t
513 idm_tgt_conn_finish(idm_conn_t *ic)
514 {
515 	idm_status_t rc;
516 
517 	rc = idm_notify_client(ic, CN_CONNECT_ACCEPT, NULL);
518 	if (rc != IDM_STATUS_SUCCESS) {
519 		return (IDM_STATUS_REJECT);
520 	}
521 
522 	/* Target client is ready to receive a login, start connection */
523 	return (ic->ic_transport_ops->it_tgt_conn_connect(ic));
524 }
525 
526 idm_transport_t *
527 idm_transport_lookup(idm_conn_req_t *cr)
528 {
529 	idm_transport_type_t	type;
530 	idm_transport_t		*it;
531 	idm_transport_caps_t	caps;
532 
533 	/*
534 	 * Make sure all available transports are setup.  We call this now
535 	 * instead of at initialization time in case IB has become available
536 	 * since we started (hotplug, etc).
537 	 */
538 	idm_transport_setup(cr->cr_li, cr->cr_boot_conn);
539 
540 	/* Determine the transport for this connection */
541 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
542 		it = &idm_transport_list[type];
543 
544 		if (it->it_ops == NULL) {
545 			/* transport is not registered */
546 			continue;
547 		}
548 
549 		if (it->it_ops->it_conn_is_capable(cr, &caps)) {
550 			return (it);
551 		}
552 	}
553 
554 	ASSERT(0);
555 	return (NULL); /* Make gcc happy */
556 }
557 
558 void
559 idm_transport_setup(ldi_ident_t li, boolean_t boot_conn)
560 {
561 	idm_transport_type_t	type;
562 	idm_transport_t		*it;
563 	int			rc;
564 
565 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
566 		it = &idm_transport_list[type];
567 		/*
568 		 * We may want to store the LDI handle in the idm_svc_t
569 		 * and then allow multiple calls to ldi_open_by_name.  This
570 		 * would enable the LDI code to track who has the device open
571 		 * which could be useful in the case where we have multiple
572 		 * services and perhaps also have initiator and target opening
573 		 * the transport simultaneously.  For now we stick with the
574 		 * plan.
575 		 */
576 		if (it->it_ops == NULL) {
577 			/* transport is not ready, try to initialize it */
578 			if (it->it_type == IDM_TRANSPORT_TYPE_SOCKETS) {
579 				idm_so_init(it);
580 			} else {
581 				if (boot_conn == B_TRUE) {
582 					/*
583 					 * iSCSI boot doesn't need iSER.
584 					 * Open iSER here may drive IO to
585 					 * a failed session and cause
586 					 * deadlock
587 					 */
588 					continue;
589 				}
590 				rc = ldi_open_by_name(it->it_device_path,
591 				    FREAD | FWRITE, kcred, &it->it_ldi_hdl, li);
592 				/*
593 				 * If the open is successful we will have
594 				 * filled in the LDI handle in the transport
595 				 * table and we expect that the transport
596 				 * registered itself.
597 				 */
598 				if (rc != 0) {
599 					it->it_ldi_hdl = NULL;
600 				}
601 			}
602 		}
603 	}
604 }
605 
606 void
607 idm_transport_teardown()
608 {
609 	idm_transport_type_t	type;
610 	idm_transport_t		*it;
611 
612 	ASSERT(mutex_owned(&idm.idm_global_mutex));
613 
614 	/* Caller holds the IDM global mutex */
615 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
616 		it = &idm_transport_list[type];
617 		/* If we have an open LDI handle on this driver, close it */
618 		if (it->it_ldi_hdl != NULL) {
619 			(void) ldi_close(it->it_ldi_hdl, FNDELAY, kcred);
620 			it->it_ldi_hdl = NULL;
621 		}
622 	}
623 }
624 
625 /*
626  * ID pool code.  We use this to generate unique structure identifiers without
627  * searching the existing structures.  This avoids the need to lock entire
628  * sets of structures at inopportune times.  Adapted from the CIFS server code.
629  *
630  *    A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap.
631  *    A bit set to '1' indicates that that particular value has been allocated.
632  *    The allocation process is done shifting a bit through the whole bitmap.
633  *    The current position of that index bit is kept in the idm_idpool_t
634  *    structure and represented by a byte index (0 to buffer size minus 1) and
635  *    a bit index (0 to 7).
636  *
637  *    The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs
638  *    out of IDs its current size is doubled until it reaches its maximum size
639  *    (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which
640  *    means that a pool can have a maximum number of 65534 IDs available.
641  */
642 
643 static int
644 idm_idpool_increment(
645     idm_idpool_t	*pool)
646 {
647 	uint8_t		*new_pool;
648 	uint32_t	new_size;
649 
650 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
651 
652 	new_size = pool->id_size * 2;
653 	if (new_size <= IDM_IDPOOL_MAX_SIZE) {
654 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
655 		if (new_pool) {
656 			bzero(new_pool, new_size / 8);
657 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
658 			kmem_free(pool->id_pool, pool->id_size / 8);
659 			pool->id_pool = new_pool;
660 			pool->id_free_counter += new_size - pool->id_size;
661 			pool->id_max_free_counter += new_size - pool->id_size;
662 			pool->id_size = new_size;
663 			pool->id_idx_msk = (new_size / 8) - 1;
664 			if (new_size >= IDM_IDPOOL_MAX_SIZE) {
665 				/* id -1 made unavailable */
666 				pool->id_pool[pool->id_idx_msk] = 0x80;
667 				pool->id_free_counter--;
668 				pool->id_max_free_counter--;
669 			}
670 			return (0);
671 		}
672 	}
673 	return (-1);
674 }
675 
676 /*
677  * idm_idpool_constructor
678  *
679  * This function initializes the pool structure provided.
680  */
681 
682 int
683 idm_idpool_create(idm_idpool_t *pool)
684 {
685 
686 	ASSERT(pool->id_magic != IDM_IDPOOL_MAGIC);
687 
688 	pool->id_size = IDM_IDPOOL_MIN_SIZE;
689 	pool->id_idx_msk = (IDM_IDPOOL_MIN_SIZE / 8) - 1;
690 	pool->id_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
691 	pool->id_max_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
692 	pool->id_bit = 0x02;
693 	pool->id_bit_idx = 1;
694 	pool->id_idx = 0;
695 	pool->id_pool = (uint8_t *)kmem_alloc((IDM_IDPOOL_MIN_SIZE / 8),
696 	    KM_SLEEP);
697 	bzero(pool->id_pool, (IDM_IDPOOL_MIN_SIZE / 8));
698 	/* -1 id made unavailable */
699 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
700 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
701 	pool->id_magic = IDM_IDPOOL_MAGIC;
702 	return (0);
703 }
704 
705 /*
706  * idm_idpool_destructor
707  *
708  * This function tears down and frees the resources associated with the
709  * pool provided.
710  */
711 
712 void
713 idm_idpool_destroy(idm_idpool_t *pool)
714 {
715 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
716 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
717 	pool->id_magic = (uint32_t)~IDM_IDPOOL_MAGIC;
718 	mutex_destroy(&pool->id_mutex);
719 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
720 }
721 
722 /*
723  * idm_idpool_alloc
724  *
725  * This function allocates an ID from the pool provided.
726  */
727 int
728 idm_idpool_alloc(idm_idpool_t *pool, uint16_t *id)
729 {
730 	uint32_t	i;
731 	uint8_t		bit;
732 	uint8_t		bit_idx;
733 	uint8_t		byte;
734 
735 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
736 
737 	mutex_enter(&pool->id_mutex);
738 	if ((pool->id_free_counter == 0) && idm_idpool_increment(pool)) {
739 		mutex_exit(&pool->id_mutex);
740 		return (-1);
741 	}
742 
743 	i = pool->id_size;
744 	while (i) {
745 		bit = pool->id_bit;
746 		bit_idx = pool->id_bit_idx;
747 		byte = pool->id_pool[pool->id_idx];
748 		while (bit) {
749 			if (byte & bit) {
750 				bit = bit << 1;
751 				bit_idx++;
752 				continue;
753 			}
754 			pool->id_pool[pool->id_idx] |= bit;
755 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
756 			pool->id_free_counter--;
757 			pool->id_bit = bit;
758 			pool->id_bit_idx = bit_idx;
759 			mutex_exit(&pool->id_mutex);
760 			return (0);
761 		}
762 		pool->id_bit = 1;
763 		pool->id_bit_idx = 0;
764 		pool->id_idx++;
765 		pool->id_idx &= pool->id_idx_msk;
766 		--i;
767 	}
768 	/*
769 	 * This section of code shouldn't be reached. If there are IDs
770 	 * available and none could be found there's a problem.
771 	 */
772 	ASSERT(0);
773 	mutex_exit(&pool->id_mutex);
774 	return (-1);
775 }
776 
777 /*
778  * idm_idpool_free
779  *
780  * This function frees the ID provided.
781  */
782 void
783 idm_idpool_free(idm_idpool_t *pool, uint16_t id)
784 {
785 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
786 	ASSERT(id != 0);
787 	ASSERT(id != 0xFFFF);
788 
789 	mutex_enter(&pool->id_mutex);
790 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
791 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
792 		pool->id_free_counter++;
793 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
794 		mutex_exit(&pool->id_mutex);
795 		return;
796 	}
797 	/* Freeing a free ID. */
798 	ASSERT(0);
799 	mutex_exit(&pool->id_mutex);
800 }
801 
802 uint32_t
803 idm_cid_alloc(void)
804 {
805 	/*
806 	 * ID pool works with 16-bit identifiers right now.  That should
807 	 * be plenty since we will probably never have more than 2^16
808 	 * connections simultaneously.
809 	 */
810 	uint16_t cid16;
811 
812 	if (idm_idpool_alloc(&idm.idm_conn_id_pool, &cid16) == -1) {
813 		return (0); /* Fail */
814 	}
815 
816 	return ((uint32_t)cid16);
817 }
818 
819 void
820 idm_cid_free(uint32_t cid)
821 {
822 	idm_idpool_free(&idm.idm_conn_id_pool, (uint16_t)cid);
823 }
824 
825 
826 /*
827  * Code for generating the header and data digests
828  *
829  * This is the CRC-32C table
830  * Generated with:
831  * width = 32 bits
832  * poly = 0x1EDC6F41
833  * reflect input bytes = true
834  * reflect output bytes = true
835  */
836 
837 uint32_t idm_crc32c_table[256] =
838 {
839 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
840 	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
841 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
842 	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
843 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
844 	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
845 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
846 	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
847 	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
848 	0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
849 	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
850 	0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
851 	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
852 	0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
853 	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
854 	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
855 	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
856 	0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
857 	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
858 	0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
859 	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
860 	0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
861 	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
862 	0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
863 	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
864 	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
865 	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
866 	0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
867 	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
868 	0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
869 	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
870 	0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
871 	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
872 	0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
873 	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
874 	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
875 	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
876 	0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
877 	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
878 	0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
879 	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
880 	0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
881 	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
882 	0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
883 	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
884 	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
885 	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
886 	0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
887 	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
888 	0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
889 	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
890 	0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
891 	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
892 	0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
893 	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
894 	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
895 	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
896 	0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
897 	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
898 	0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
899 	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
900 	0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
901 	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
902 	0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
903 };
904 
905 /*
906  * iscsi_crc32c - Steps through buffer one byte at at time, calculates
907  * reflected crc using table.
908  */
909 uint32_t
910 idm_crc32c(void *address, unsigned long length)
911 {
912 	uint8_t *buffer = address;
913 	uint32_t crc = 0xffffffff, result;
914 #ifdef _BIG_ENDIAN
915 	uint8_t byte0, byte1, byte2, byte3;
916 #endif
917 
918 	ASSERT(address != NULL);
919 
920 	if (iscsi_crc32_hd == -1) {
921 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
922 			iscsi_crc32_hd = 0;
923 		} else {
924 			iscsi_crc32_hd = 1;
925 		}
926 	}
927 	if (iscsi_crc32_hd == 0)
928 		return (HW_CRC32(buffer, length, crc));
929 
930 	while (length--) {
931 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
932 		    (crc >> 8);
933 	}
934 	result = crc ^ 0xffffffff;
935 
936 #ifdef	_BIG_ENDIAN
937 	byte0 = (uint8_t)(result & 0xFF);
938 	byte1 = (uint8_t)((result >> 8) & 0xFF);
939 	byte2 = (uint8_t)((result >> 16) & 0xFF);
940 	byte3 = (uint8_t)((result >> 24) & 0xFF);
941 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
942 #endif	/* _BIG_ENDIAN */
943 
944 	return (result);
945 }
946 
947 
948 /*
949  * idm_crc32c_continued - Continues stepping through buffer one
950  * byte at at time, calculates reflected crc using table.
951  */
952 uint32_t
953 idm_crc32c_continued(void *address, unsigned long length, uint32_t crc)
954 {
955 	uint8_t *buffer = address;
956 	uint32_t result;
957 #ifdef	_BIG_ENDIAN
958 	uint8_t byte0, byte1, byte2, byte3;
959 #endif
960 
961 	ASSERT(address != NULL);
962 
963 	if (iscsi_crc32_hd == -1) {
964 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
965 			iscsi_crc32_hd = 0;
966 		} else {
967 			iscsi_crc32_hd = 1;
968 		}
969 	}
970 	if (iscsi_crc32_hd == 0)
971 		return (HW_CRC32_CONT(buffer, length, crc));
972 
973 
974 #ifdef	_BIG_ENDIAN
975 	byte0 = (uint8_t)((crc >> 24) & 0xFF);
976 	byte1 = (uint8_t)((crc >> 16) & 0xFF);
977 	byte2 = (uint8_t)((crc >> 8) & 0xFF);
978 	byte3 = (uint8_t)(crc & 0xFF);
979 	crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0);
980 #endif
981 
982 	crc = crc ^ 0xffffffff;
983 	while (length--) {
984 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
985 		    (crc >> 8);
986 	}
987 	result = crc ^ 0xffffffff;
988 
989 #ifdef	_BIG_ENDIAN
990 	byte0 = (uint8_t)(result & 0xFF);
991 	byte1 = (uint8_t)((result >> 8) & 0xFF);
992 	byte2 = (uint8_t)((result >> 16) & 0xFF);
993 	byte3 = (uint8_t)((result >> 24) & 0xFF);
994 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
995 #endif
996 	return (result);
997 }
998 
999 /* ARGSUSED */
1000 int
1001 idm_task_constructor(void *hdl, void *arg, int flags)
1002 {
1003 	idm_task_t *idt = (idm_task_t *)hdl;
1004 	uint32_t next_task;
1005 
1006 	mutex_init(&idt->idt_mutex, NULL, MUTEX_DEFAULT, NULL);
1007 
1008 	/* Find the next free task ID */
1009 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1010 	next_task = idm.idm_taskid_next;
1011 	while (idm.idm_taskid_table[next_task]) {
1012 		next_task++;
1013 		if (next_task == idm.idm_taskid_max)
1014 			next_task = 0;
1015 		if (next_task == idm.idm_taskid_next) {
1016 			rw_exit(&idm.idm_taskid_table_lock);
1017 			return (-1);
1018 		}
1019 	}
1020 
1021 	idm.idm_taskid_table[next_task] = idt;
1022 	idm.idm_taskid_next = (next_task + 1) % idm.idm_taskid_max;
1023 	rw_exit(&idm.idm_taskid_table_lock);
1024 
1025 	idt->idt_tt = next_task;
1026 
1027 	list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
1028 	    offsetof(idm_buf_t, idb_buflink));
1029 	list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
1030 	    offsetof(idm_buf_t, idb_buflink));
1031 	idm_refcnt_init(&idt->idt_refcnt, idt);
1032 
1033 	/*
1034 	 * Set the transport header pointer explicitly.  This removes the
1035 	 * need for per-transport header allocation, which simplifies cache
1036 	 * init considerably.  If at a later date we have an additional IDM
1037 	 * transport that requires a different size, we'll revisit this.
1038 	 */
1039 	idt->idt_transport_hdr = (void *)(idt + 1); /* pointer arithmetic */
1040 	idt->idt_flags = 0;
1041 	return (0);
1042 }
1043 
1044 /* ARGSUSED */
1045 void
1046 idm_task_destructor(void *hdl, void *arg)
1047 {
1048 	idm_task_t *idt = (idm_task_t *)hdl;
1049 
1050 	/* Remove the task from the ID table */
1051 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1052 	idm.idm_taskid_table[idt->idt_tt] = NULL;
1053 	rw_exit(&idm.idm_taskid_table_lock);
1054 
1055 	/* free the inbuf and outbuf */
1056 	idm_refcnt_destroy(&idt->idt_refcnt);
1057 	list_destroy(&idt->idt_inbufv);
1058 	list_destroy(&idt->idt_outbufv);
1059 
1060 	/*
1061 	 * The final call to idm_task_rele may happen with the task
1062 	 * mutex held which may invoke this destructor immediately.
1063 	 * Stall here until the task mutex owner lets go.
1064 	 */
1065 	mutex_enter(&idt->idt_mutex);
1066 	mutex_destroy(&idt->idt_mutex);
1067 }
1068 
1069 /*
1070  * idm_listbuf_insert searches from the back of the list looking for the
1071  * insertion point.
1072  */
1073 void
1074 idm_listbuf_insert(list_t *lst, idm_buf_t *buf)
1075 {
1076 	idm_buf_t	*idb;
1077 
1078 	/* iterate through the list to find the insertion point */
1079 	for (idb = list_tail(lst); idb != NULL; idb = list_prev(lst, idb)) {
1080 
1081 		if (idb->idb_bufoffset < buf->idb_bufoffset) {
1082 
1083 			list_insert_after(lst, idb, buf);
1084 			return;
1085 		}
1086 	}
1087 
1088 	/* add the buf to the head of the list */
1089 	list_insert_head(lst, buf);
1090 
1091 }
1092 
1093 /*ARGSUSED*/
1094 void
1095 idm_wd_thread(void *arg)
1096 {
1097 	idm_conn_t	*ic;
1098 	clock_t		wake_time = SEC_TO_TICK(IDM_WD_INTERVAL);
1099 	clock_t		idle_time;
1100 
1101 	/* Record the thread id for thread_join() */
1102 	idm.idm_wd_thread_did = curthread->t_did;
1103 	mutex_enter(&idm.idm_global_mutex);
1104 	idm.idm_wd_thread_running = B_TRUE;
1105 	cv_signal(&idm.idm_wd_cv);
1106 
1107 	while (idm.idm_wd_thread_running) {
1108 		for (ic = list_head(&idm.idm_tgt_conn_list);
1109 		    ic != NULL;
1110 		    ic = list_next(&idm.idm_tgt_conn_list, ic)) {
1111 			idle_time = ddi_get_lbolt() - ic->ic_timestamp;
1112 
1113 			/*
1114 			 * If this connection is in FFP then grab a hold
1115 			 * and check the various timeout thresholds.  Otherwise
1116 			 * the connection is closing and we should just
1117 			 * move on to the next one.
1118 			 */
1119 			mutex_enter(&ic->ic_state_mutex);
1120 			if (ic->ic_ffp) {
1121 				idm_conn_hold(ic);
1122 			} else {
1123 				mutex_exit(&ic->ic_state_mutex);
1124 				continue;
1125 			}
1126 
1127 			/*
1128 			 * If there hasn't been any activity on this
1129 			 * connection for the keepalive timeout period
1130 			 * and if the client has provided a keepalive
1131 			 * callback then call the keepalive callback.
1132 			 * This allows the client to take action to keep
1133 			 * the link alive (like send a nop PDU).
1134 			 */
1135 			if ((TICK_TO_SEC(idle_time) >=
1136 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT) &&
1137 			    !ic->ic_keepalive) {
1138 				ic->ic_keepalive = B_TRUE;
1139 				if (ic->ic_conn_ops.icb_keepalive) {
1140 					mutex_exit(&ic->ic_state_mutex);
1141 					mutex_exit(&idm.idm_global_mutex);
1142 					(*ic->ic_conn_ops.icb_keepalive)(ic);
1143 					mutex_enter(&idm.idm_global_mutex);
1144 					mutex_enter(&ic->ic_state_mutex);
1145 				}
1146 			} else if ((TICK_TO_SEC(idle_time) <
1147 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT)) {
1148 				/* Reset keepalive */
1149 				ic->ic_keepalive = B_FALSE;
1150 			}
1151 
1152 			/*
1153 			 * If there hasn't been any activity on this
1154 			 * connection for the failure timeout period then
1155 			 * drop the connection.  We expect the initiator
1156 			 * to keep the connection alive if it wants the
1157 			 * connection to stay open.
1158 			 *
1159 			 * If it turns out to be desireable to take a
1160 			 * more active role in maintaining the connect
1161 			 * we could add a client callback to send
1162 			 * a "keepalive" kind of message (no doubt a nop)
1163 			 * and fire that on a shorter timer.
1164 			 */
1165 			if (TICK_TO_SEC(idle_time) >
1166 			    IDM_TRANSPORT_FAIL_IDLE_TIMEOUT) {
1167 				mutex_exit(&ic->ic_state_mutex);
1168 				mutex_exit(&idm.idm_global_mutex);
1169 				IDM_SM_LOG(CE_WARN, "idm_wd_thread: "
1170 				    "conn %p idle for %d seconds, "
1171 				    "sending CE_TRANSPORT_FAIL",
1172 				    (void *)ic, (int)idle_time);
1173 				idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
1174 				mutex_enter(&idm.idm_global_mutex);
1175 				mutex_enter(&ic->ic_state_mutex);
1176 			}
1177 
1178 			idm_conn_rele(ic);
1179 
1180 			mutex_exit(&ic->ic_state_mutex);
1181 		}
1182 
1183 		(void) cv_reltimedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
1184 		    wake_time, TR_CLOCK_TICK);
1185 	}
1186 	mutex_exit(&idm.idm_global_mutex);
1187 
1188 	thread_exit();
1189 }
1190