xref: /titanic_41/usr/src/uts/common/io/idm/idm_impl.c (revision dd49f125507979bb2ab505a8daf2a46d1be27051)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/cpuvar.h>
31 #include <sys/sdt.h>
32 
33 #include <sys/socket.h>
34 #include <sys/strsubr.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysmacros.h>
37 
38 #include <sys/idm/idm.h>
39 #include <sys/idm/idm_so.h>
40 #include <hd_crc.h>
41 
42 extern idm_transport_t  idm_transport_list[];
43 /*
44  * -1 - uninitialized
45  * 0  - applicable
46  * others - NA
47  */
48 static int iscsi_crc32_hd = -1;
49 
50 void
51 idm_pdu_rx(idm_conn_t *ic, idm_pdu_t *pdu)
52 {
53 	iscsi_async_evt_hdr_t *async_evt;
54 
55 	/*
56 	 * If we are in full-featured mode then route SCSI-related
57 	 * commands to the appropriate function vector
58 	 */
59 	ic->ic_timestamp = ddi_get_lbolt();
60 	mutex_enter(&ic->ic_state_mutex);
61 	if (ic->ic_ffp && ic->ic_pdu_events == 0) {
62 		mutex_exit(&ic->ic_state_mutex);
63 
64 		if (idm_pdu_rx_forward_ffp(ic, pdu) == B_TRUE) {
65 			/* Forwarded SCSI-related commands */
66 			return;
67 		}
68 		mutex_enter(&ic->ic_state_mutex);
69 	}
70 
71 	/*
72 	 * If we get here with a SCSI-related PDU then we are not in
73 	 * full-feature mode and the PDU is a protocol error (SCSI command
74 	 * PDU's may sometimes be an exception, see below).  All
75 	 * non-SCSI PDU's get treated them the same regardless of whether
76 	 * we are in full-feature mode.
77 	 *
78 	 * Look at the opcode and in some cases the PDU status and
79 	 * determine the appropriate event to send to the connection
80 	 * state machine.  Generate the event, passing the PDU as data.
81 	 * If the current connection state allows reception of the event
82 	 * the PDU will be submitted to the IDM client for processing,
83 	 * otherwise the PDU will be dropped.
84 	 */
85 	switch (IDM_PDU_OPCODE(pdu)) {
86 	case ISCSI_OP_LOGIN_CMD:
87 		DTRACE_ISCSI_2(login__command, idm_conn_t *, ic,
88 		    iscsi_login_hdr_t *, (iscsi_login_hdr_t *)pdu->isp_hdr);
89 		idm_conn_rx_pdu_event(ic, CE_LOGIN_RCV, (uintptr_t)pdu);
90 		break;
91 	case ISCSI_OP_LOGIN_RSP:
92 		idm_parse_login_rsp(ic, pdu, /* RX */ B_TRUE);
93 		break;
94 	case ISCSI_OP_LOGOUT_CMD:
95 		DTRACE_ISCSI_2(logout__command, idm_conn_t *, ic,
96 		    iscsi_logout_hdr_t *,
97 		    (iscsi_logout_hdr_t *)pdu->isp_hdr);
98 		idm_parse_logout_req(ic, pdu, /* RX */ B_TRUE);
99 		break;
100 	case ISCSI_OP_LOGOUT_RSP:
101 		idm_parse_logout_rsp(ic, pdu, /* RX */ B_TRUE);
102 		break;
103 	case ISCSI_OP_ASYNC_EVENT:
104 		async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
105 		switch (async_evt->async_event) {
106 		case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
107 			idm_conn_rx_pdu_event(ic, CE_ASYNC_LOGOUT_RCV,
108 			    (uintptr_t)pdu);
109 			break;
110 		case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
111 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_CONN_RCV,
112 			    (uintptr_t)pdu);
113 			break;
114 		case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
115 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_RCV,
116 			    (uintptr_t)pdu);
117 			break;
118 		case ISCSI_ASYNC_EVENT_SCSI_EVENT:
119 		case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
120 		default:
121 			idm_conn_rx_pdu_event(ic, CE_MISC_RX,
122 			    (uintptr_t)pdu);
123 			break;
124 		}
125 		break;
126 	case ISCSI_OP_SCSI_CMD:
127 		/*
128 		 * Consider this scenario:  We are a target connection
129 		 * in "in login" state and a "login success sent" event has
130 		 * been generated but not yet handled.  Since we've sent
131 		 * the login response but we haven't actually transitioned
132 		 * to FFP mode we might conceivably receive a SCSI command
133 		 * from the initiator before we are ready.  We are actually
134 		 * in FFP we just don't know it yet -- to address this we
135 		 * can generate an event corresponding to the SCSI command.
136 		 * At the point when the event is handled by the state
137 		 * machine the login request will have been handled and we
138 		 * should be in FFP.  If we are not in FFP by that time
139 		 * we can reject the SCSI command with a protocol error.
140 		 *
141 		 * This scenario only applies to the target.
142 		 *
143 		 * Handle dtrace probe in iscsit so we can find all the
144 		 * pieces of the CDB
145 		 */
146 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
147 		break;
148 	case ISCSI_OP_SCSI_DATA:
149 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
150 		    iscsi_data_hdr_t *,
151 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
152 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
153 		break;
154 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
155 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
156 		    iscsi_scsi_task_mgt_hdr_t *,
157 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
158 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
159 		break;
160 	case ISCSI_OP_NOOP_OUT:
161 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
162 		    iscsi_nop_out_hdr_t *,
163 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
164 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
165 		break;
166 	case ISCSI_OP_TEXT_CMD:
167 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
168 		    iscsi_text_hdr_t *,
169 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
170 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
171 		break;
172 	/* Initiator PDU's */
173 	case ISCSI_OP_SCSI_DATA_RSP:
174 	case ISCSI_OP_RTT_RSP:
175 	case ISCSI_OP_SNACK_CMD:
176 	case ISCSI_OP_NOOP_IN:
177 	case ISCSI_OP_TEXT_RSP:
178 	case ISCSI_OP_REJECT_MSG:
179 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
180 		/* Validate received PDU against current state */
181 		idm_conn_rx_pdu_event(ic, CE_MISC_RX,
182 		    (uintptr_t)pdu);
183 		break;
184 	}
185 	mutex_exit(&ic->ic_state_mutex);
186 }
187 
188 void
189 idm_pdu_tx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
190 {
191 	(*ic->ic_transport_ops->it_tx_pdu)(ic, pdu);
192 }
193 
194 boolean_t
195 idm_pdu_rx_forward_ffp(idm_conn_t *ic, idm_pdu_t *pdu)
196 {
197 	/*
198 	 * If this is an FFP request, call the appropriate handler
199 	 * and return B_TRUE, otherwise return B_FALSE.
200 	 */
201 	switch (IDM_PDU_OPCODE(pdu)) {
202 	case ISCSI_OP_SCSI_CMD:
203 		(*ic->ic_conn_ops.icb_rx_scsi_cmd)(ic, pdu);
204 		return (B_TRUE);
205 	case ISCSI_OP_SCSI_DATA:
206 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
207 		    iscsi_data_hdr_t *,
208 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
209 		(*ic->ic_transport_ops->it_rx_dataout)(ic, pdu);
210 		return (B_TRUE);
211 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
212 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
213 		    iscsi_scsi_task_mgt_hdr_t *,
214 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
215 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
216 		return (B_TRUE);
217 	case ISCSI_OP_NOOP_OUT:
218 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
219 		    iscsi_nop_out_hdr_t *,
220 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
221 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
222 		return (B_TRUE);
223 	case ISCSI_OP_TEXT_CMD:
224 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
225 		    iscsi_text_hdr_t *,
226 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
227 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
228 		return (B_TRUE);
229 		/* Initiator only */
230 	case ISCSI_OP_SCSI_RSP:
231 		(*ic->ic_conn_ops.icb_rx_scsi_rsp)(ic, pdu);
232 		return (B_TRUE);
233 	case ISCSI_OP_SCSI_DATA_RSP:
234 		(*ic->ic_transport_ops->it_rx_datain)(ic, pdu);
235 		return (B_TRUE);
236 	case ISCSI_OP_RTT_RSP:
237 		(*ic->ic_transport_ops->it_rx_rtt)(ic, pdu);
238 		return (B_TRUE);
239 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
240 	case ISCSI_OP_TEXT_RSP:
241 	case ISCSI_OP_NOOP_IN:
242 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
243 		return (B_TRUE);
244 	default:
245 		return (B_FALSE);
246 	}
247 	/*NOTREACHED*/
248 }
249 
250 void
251 idm_pdu_rx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
252 {
253 	/*
254 	 * Some PDU's specific to FFP get special handling.  This function
255 	 * will normally never be called in FFP with an FFP PDU since this
256 	 * is a slow path but in can happen on the target side during
257 	 * the transition to FFP.  We primarily call
258 	 * idm_pdu_rx_forward_ffp here to avoid code duplication.
259 	 */
260 	if (idm_pdu_rx_forward_ffp(ic, pdu) == B_FALSE) {
261 		/*
262 		 * Non-FFP PDU, use generic RC handler
263 		 */
264 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
265 	}
266 }
267 
268 void
269 idm_parse_login_rsp(idm_conn_t *ic, idm_pdu_t *login_rsp_pdu, boolean_t rx)
270 {
271 	iscsi_login_rsp_hdr_t	*login_rsp =
272 	    (iscsi_login_rsp_hdr_t *)login_rsp_pdu->isp_hdr;
273 	idm_conn_event_t	new_event;
274 
275 	if (login_rsp->status_class == ISCSI_STATUS_CLASS_SUCCESS) {
276 		if (!(login_rsp->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
277 		    (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
278 		    (ISCSI_LOGIN_NEXT_STAGE(login_rsp->flags) ==
279 		    ISCSI_FULL_FEATURE_PHASE)) {
280 			new_event = (rx ? CE_LOGIN_SUCCESS_RCV :
281 			    CE_LOGIN_SUCCESS_SND);
282 		} else {
283 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
284 		}
285 	} else if (rx && login_rsp->status_class ==
286 	    ISCSI_STATUS_CLASS_REDIRECT) {
287 		new_event = CE_MISC_RX;
288 	} else {
289 		new_event = (rx ? CE_LOGIN_FAIL_RCV : CE_LOGIN_FAIL_SND);
290 	}
291 
292 	if (rx) {
293 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
294 	} else {
295 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
296 	}
297 }
298 
299 
300 void
301 idm_parse_logout_req(idm_conn_t *ic, idm_pdu_t *logout_req_pdu, boolean_t rx)
302 {
303 	iscsi_logout_hdr_t 	*logout_req =
304 	    (iscsi_logout_hdr_t *)logout_req_pdu->isp_hdr;
305 	idm_conn_event_t	new_event;
306 	uint8_t			reason =
307 	    (logout_req->flags & ISCSI_FLAG_LOGOUT_REASON_MASK);
308 
309 	/*
310 	 *	For a normal logout (close connection or close session) IDM
311 	 *	will terminate processing of all tasks completing the tasks
312 	 *	back to the client with a status indicating the connection
313 	 *	was logged out.  These tasks do not get completed.
314 	 *
315 	 *	For a "close connection for recovery logout) IDM suspends
316 	 *	processing of all tasks and completes them back to the client
317 	 *	with a status indicating connection was logged out for
318 	 *	recovery.  Both initiator and target hang onto these tasks.
319 	 *	When we add ERL2 support IDM will need to provide mechanisms
320 	 *	to change the task and buffer associations to a new connection.
321 	 *
322 	 *	This code doesn't address the possibility of MC/S.  We'll
323 	 *	need to decide how the separate connections get handled
324 	 *	in that case.  One simple option is to make the client
325 	 *	generate the events for the other connections.
326 	 */
327 	if (reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
328 		new_event =
329 		    (rx ? CE_LOGOUT_SESSION_RCV : CE_LOGOUT_SESSION_SND);
330 	} else if ((reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) ||
331 	    (reason == ISCSI_LOGOUT_REASON_RECOVERY)) {
332 		/* Check logout CID against this connection's CID */
333 		if (ntohs(logout_req->cid) == ic->ic_login_cid) {
334 			/* Logout is for this connection */
335 			new_event = (rx ? CE_LOGOUT_THIS_CONN_RCV :
336 			    CE_LOGOUT_THIS_CONN_SND);
337 		} else {
338 			/*
339 			 * Logout affects another connection.  This is not
340 			 * a relevant event for this connection so we'll
341 			 * just treat it as a normal PDU event.  Client
342 			 * will need to lookup the other connection and
343 			 * generate the event.
344 			 */
345 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
346 		}
347 	} else {
348 		/* Invalid reason code */
349 		new_event = (rx ? CE_RX_PROTOCOL_ERROR : CE_TX_PROTOCOL_ERROR);
350 	}
351 
352 	if (rx) {
353 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
354 	} else {
355 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
356 	}
357 }
358 
359 
360 
361 void
362 idm_parse_logout_rsp(idm_conn_t *ic, idm_pdu_t *logout_rsp_pdu, boolean_t rx)
363 {
364 	idm_conn_event_t	new_event;
365 	iscsi_logout_rsp_hdr_t *logout_rsp =
366 	    (iscsi_logout_rsp_hdr_t *)logout_rsp_pdu->isp_hdr;
367 
368 	if (logout_rsp->response == ISCSI_STATUS_CLASS_SUCCESS) {
369 		new_event = rx ? CE_LOGOUT_SUCCESS_RCV : CE_LOGOUT_SUCCESS_SND;
370 	} else {
371 		new_event = rx ? CE_LOGOUT_FAIL_RCV : CE_LOGOUT_FAIL_SND;
372 	}
373 
374 	if (rx) {
375 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
376 	} else {
377 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
378 	}
379 }
380 
381 /*
382  * idm_svc_conn_create()
383  * Transport-agnostic service connection creation, invoked from the transport
384  * layer.
385  */
386 idm_status_t
387 idm_svc_conn_create(idm_svc_t *is, idm_transport_type_t tt,
388     idm_conn_t **ic_result)
389 {
390 	idm_conn_t	*ic;
391 	idm_status_t	rc;
392 
393 	/*
394 	 * Skip some work if we can already tell we are going offline.
395 	 * Otherwise we will destroy this connection later as part of
396 	 * shutting down the svc.
397 	 */
398 	mutex_enter(&is->is_mutex);
399 	if (!is->is_online) {
400 		mutex_exit(&is->is_mutex);
401 		return (IDM_STATUS_FAIL);
402 	}
403 	mutex_exit(&is->is_mutex);
404 
405 	ic = idm_conn_create_common(CONN_TYPE_TGT, tt,
406 	    &is->is_svc_req.sr_conn_ops);
407 	if (ic == NULL) {
408 		return (IDM_STATUS_FAIL);
409 	}
410 	ic->ic_svc_binding = is;
411 
412 	/*
413 	 * Prepare connection state machine
414 	 */
415 	if ((rc = idm_conn_sm_init(ic)) != 0) {
416 		idm_conn_destroy_common(ic);
417 		return (rc);
418 	}
419 
420 
421 	*ic_result = ic;
422 
423 	mutex_enter(&idm.idm_global_mutex);
424 	list_insert_tail(&idm.idm_tgt_conn_list, ic);
425 	idm.idm_tgt_conn_count++;
426 	mutex_exit(&idm.idm_global_mutex);
427 
428 	return (IDM_STATUS_SUCCESS);
429 }
430 
431 void
432 idm_svc_conn_destroy(idm_conn_t *ic)
433 {
434 	mutex_enter(&idm.idm_global_mutex);
435 	list_remove(&idm.idm_tgt_conn_list, ic);
436 	idm.idm_tgt_conn_count--;
437 	mutex_exit(&idm.idm_global_mutex);
438 
439 	if (ic->ic_transport_private != NULL) {
440 		ic->ic_transport_ops->it_tgt_conn_destroy(ic);
441 	}
442 	idm_conn_destroy_common(ic);
443 }
444 
445 /*
446  * idm_conn_create_common()
447  *
448  * Allocate and initialize IDM connection context
449  */
450 idm_conn_t *
451 idm_conn_create_common(idm_conn_type_t conn_type, idm_transport_type_t tt,
452     idm_conn_ops_t *conn_ops)
453 {
454 	idm_conn_t		*ic;
455 	idm_transport_t		*it;
456 	idm_transport_type_t	type;
457 
458 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
459 		it = &idm_transport_list[type];
460 
461 		if ((it->it_ops != NULL) && (it->it_type == tt))
462 			break;
463 	}
464 	ASSERT(it->it_type == tt);
465 	if (it->it_type != tt)
466 		return (NULL);
467 
468 	ic = kmem_zalloc(sizeof (idm_conn_t), KM_SLEEP);
469 
470 	/* Initialize data */
471 	ic->ic_target_name[0] = '\0';
472 	ic->ic_initiator_name[0] = '\0';
473 	ic->ic_isid[0] = '\0';
474 	ic->ic_tsih[0] = '\0';
475 	ic->ic_conn_type = conn_type;
476 	ic->ic_conn_ops = *conn_ops;
477 	ic->ic_transport_ops = it->it_ops;
478 	ic->ic_transport_type = tt;
479 	ic->ic_transport_private = NULL; /* Set by transport service */
480 	ic->ic_internal_cid = idm_cid_alloc();
481 	if (ic->ic_internal_cid == 0) {
482 		kmem_free(ic, sizeof (idm_conn_t));
483 		return (NULL);
484 	}
485 	mutex_init(&ic->ic_mutex, NULL, MUTEX_DEFAULT, NULL);
486 	cv_init(&ic->ic_cv, NULL, CV_DEFAULT, NULL);
487 	idm_refcnt_init(&ic->ic_refcnt, ic);
488 
489 	return (ic);
490 }
491 
492 void
493 idm_conn_destroy_common(idm_conn_t *ic)
494 {
495 	idm_conn_sm_fini(ic);
496 	idm_refcnt_destroy(&ic->ic_refcnt);
497 	cv_destroy(&ic->ic_cv);
498 	mutex_destroy(&ic->ic_mutex);
499 	idm_cid_free(ic->ic_internal_cid);
500 
501 	kmem_free(ic, sizeof (idm_conn_t));
502 }
503 
504 /*
505  * Invoked from the SM as a result of client's invocation of
506  * idm_ini_conn_connect()
507  */
508 idm_status_t
509 idm_ini_conn_finish(idm_conn_t *ic)
510 {
511 	/* invoke transport-specific connection */
512 	return (ic->ic_transport_ops->it_ini_conn_connect(ic));
513 }
514 
515 idm_status_t
516 idm_tgt_conn_finish(idm_conn_t *ic)
517 {
518 	idm_status_t rc;
519 
520 	rc = idm_notify_client(ic, CN_CONNECT_ACCEPT, NULL);
521 	if (rc != IDM_STATUS_SUCCESS) {
522 		return (IDM_STATUS_REJECT);
523 	}
524 
525 	/* Target client is ready to receive a login, start connection */
526 	return (ic->ic_transport_ops->it_tgt_conn_connect(ic));
527 }
528 
529 idm_transport_t *
530 idm_transport_lookup(idm_conn_req_t *cr)
531 {
532 	idm_transport_type_t	type;
533 	idm_transport_t		*it;
534 	idm_transport_caps_t	caps;
535 
536 	/*
537 	 * Make sure all available transports are setup.  We call this now
538 	 * instead of at initialization time in case IB has become available
539 	 * since we started (hotplug, etc).
540 	 */
541 	idm_transport_setup(cr->cr_li, cr->cr_boot_conn);
542 
543 	/* Determine the transport for this connection */
544 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
545 		it = &idm_transport_list[type];
546 
547 		if (it->it_ops == NULL) {
548 			/* transport is not registered */
549 			continue;
550 		}
551 
552 		if (it->it_ops->it_conn_is_capable(cr, &caps)) {
553 			return (it);
554 		}
555 	}
556 
557 	ASSERT(0);
558 	return (NULL); /* Make gcc happy */
559 }
560 
561 void
562 idm_transport_setup(ldi_ident_t li, boolean_t boot_conn)
563 {
564 	idm_transport_type_t	type;
565 	idm_transport_t		*it;
566 	int			rc;
567 
568 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
569 		it = &idm_transport_list[type];
570 		/*
571 		 * We may want to store the LDI handle in the idm_svc_t
572 		 * and then allow multiple calls to ldi_open_by_name.  This
573 		 * would enable the LDI code to track who has the device open
574 		 * which could be useful in the case where we have multiple
575 		 * services and perhaps also have initiator and target opening
576 		 * the transport simultaneously.  For now we stick with the
577 		 * plan.
578 		 */
579 		if (it->it_ops == NULL) {
580 			/* transport is not ready, try to initialize it */
581 			if (it->it_type == IDM_TRANSPORT_TYPE_SOCKETS) {
582 				idm_so_init(it);
583 			} else {
584 				if (boot_conn == B_TRUE) {
585 					/*
586 					 * iSCSI boot doesn't need iSER.
587 					 * Open iSER here may drive IO to
588 					 * a failed session and cause
589 					 * deadlock
590 					 */
591 					continue;
592 				}
593 				rc = ldi_open_by_name(it->it_device_path,
594 				    FREAD | FWRITE, kcred, &it->it_ldi_hdl, li);
595 				/*
596 				 * If the open is successful we will have
597 				 * filled in the LDI handle in the transport
598 				 * table and we expect that the transport
599 				 * registered itself.
600 				 */
601 				if (rc != 0) {
602 					it->it_ldi_hdl = NULL;
603 				}
604 			}
605 		}
606 	}
607 }
608 
609 void
610 idm_transport_teardown()
611 {
612 	idm_transport_type_t	type;
613 	idm_transport_t		*it;
614 
615 	ASSERT(mutex_owned(&idm.idm_global_mutex));
616 
617 	/* Caller holds the IDM global mutex */
618 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
619 		it = &idm_transport_list[type];
620 		/* If we have an open LDI handle on this driver, close it */
621 		if (it->it_ldi_hdl != NULL) {
622 			(void) ldi_close(it->it_ldi_hdl, FNDELAY, kcred);
623 			it->it_ldi_hdl = NULL;
624 		}
625 	}
626 }
627 
628 /*
629  * ID pool code.  We use this to generate unique structure identifiers without
630  * searching the existing structures.  This avoids the need to lock entire
631  * sets of structures at inopportune times.  Adapted from the CIFS server code.
632  *
633  *    A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap.
634  *    A bit set to '1' indicates that that particular value has been allocated.
635  *    The allocation process is done shifting a bit through the whole bitmap.
636  *    The current position of that index bit is kept in the idm_idpool_t
637  *    structure and represented by a byte index (0 to buffer size minus 1) and
638  *    a bit index (0 to 7).
639  *
640  *    The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs
641  *    out of IDs its current size is doubled until it reaches its maximum size
642  *    (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which
643  *    means that a pool can have a maximum number of 65534 IDs available.
644  */
645 
646 static int
647 idm_idpool_increment(
648     idm_idpool_t	*pool)
649 {
650 	uint8_t		*new_pool;
651 	uint32_t	new_size;
652 
653 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
654 
655 	new_size = pool->id_size * 2;
656 	if (new_size <= IDM_IDPOOL_MAX_SIZE) {
657 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
658 		if (new_pool) {
659 			bzero(new_pool, new_size / 8);
660 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
661 			kmem_free(pool->id_pool, pool->id_size / 8);
662 			pool->id_pool = new_pool;
663 			pool->id_free_counter += new_size - pool->id_size;
664 			pool->id_max_free_counter += new_size - pool->id_size;
665 			pool->id_size = new_size;
666 			pool->id_idx_msk = (new_size / 8) - 1;
667 			if (new_size >= IDM_IDPOOL_MAX_SIZE) {
668 				/* id -1 made unavailable */
669 				pool->id_pool[pool->id_idx_msk] = 0x80;
670 				pool->id_free_counter--;
671 				pool->id_max_free_counter--;
672 			}
673 			return (0);
674 		}
675 	}
676 	return (-1);
677 }
678 
679 /*
680  * idm_idpool_constructor
681  *
682  * This function initializes the pool structure provided.
683  */
684 
685 int
686 idm_idpool_create(idm_idpool_t *pool)
687 {
688 
689 	ASSERT(pool->id_magic != IDM_IDPOOL_MAGIC);
690 
691 	pool->id_size = IDM_IDPOOL_MIN_SIZE;
692 	pool->id_idx_msk = (IDM_IDPOOL_MIN_SIZE / 8) - 1;
693 	pool->id_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
694 	pool->id_max_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
695 	pool->id_bit = 0x02;
696 	pool->id_bit_idx = 1;
697 	pool->id_idx = 0;
698 	pool->id_pool = (uint8_t *)kmem_alloc((IDM_IDPOOL_MIN_SIZE / 8),
699 	    KM_SLEEP);
700 	bzero(pool->id_pool, (IDM_IDPOOL_MIN_SIZE / 8));
701 	/* -1 id made unavailable */
702 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
703 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
704 	pool->id_magic = IDM_IDPOOL_MAGIC;
705 	return (0);
706 }
707 
708 /*
709  * idm_idpool_destructor
710  *
711  * This function tears down and frees the resources associated with the
712  * pool provided.
713  */
714 
715 void
716 idm_idpool_destroy(idm_idpool_t *pool)
717 {
718 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
719 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
720 	pool->id_magic = (uint32_t)~IDM_IDPOOL_MAGIC;
721 	mutex_destroy(&pool->id_mutex);
722 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
723 }
724 
725 /*
726  * idm_idpool_alloc
727  *
728  * This function allocates an ID from the pool provided.
729  */
730 int
731 idm_idpool_alloc(idm_idpool_t *pool, uint16_t *id)
732 {
733 	uint32_t	i;
734 	uint8_t		bit;
735 	uint8_t		bit_idx;
736 	uint8_t		byte;
737 
738 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
739 
740 	mutex_enter(&pool->id_mutex);
741 	if ((pool->id_free_counter == 0) && idm_idpool_increment(pool)) {
742 		mutex_exit(&pool->id_mutex);
743 		return (-1);
744 	}
745 
746 	i = pool->id_size;
747 	while (i) {
748 		bit = pool->id_bit;
749 		bit_idx = pool->id_bit_idx;
750 		byte = pool->id_pool[pool->id_idx];
751 		while (bit) {
752 			if (byte & bit) {
753 				bit = bit << 1;
754 				bit_idx++;
755 				continue;
756 			}
757 			pool->id_pool[pool->id_idx] |= bit;
758 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
759 			pool->id_free_counter--;
760 			pool->id_bit = bit;
761 			pool->id_bit_idx = bit_idx;
762 			mutex_exit(&pool->id_mutex);
763 			return (0);
764 		}
765 		pool->id_bit = 1;
766 		pool->id_bit_idx = 0;
767 		pool->id_idx++;
768 		pool->id_idx &= pool->id_idx_msk;
769 		--i;
770 	}
771 	/*
772 	 * This section of code shouldn't be reached. If there are IDs
773 	 * available and none could be found there's a problem.
774 	 */
775 	ASSERT(0);
776 	mutex_exit(&pool->id_mutex);
777 	return (-1);
778 }
779 
780 /*
781  * idm_idpool_free
782  *
783  * This function frees the ID provided.
784  */
785 void
786 idm_idpool_free(idm_idpool_t *pool, uint16_t id)
787 {
788 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
789 	ASSERT(id != 0);
790 	ASSERT(id != 0xFFFF);
791 
792 	mutex_enter(&pool->id_mutex);
793 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
794 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
795 		pool->id_free_counter++;
796 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
797 		mutex_exit(&pool->id_mutex);
798 		return;
799 	}
800 	/* Freeing a free ID. */
801 	ASSERT(0);
802 	mutex_exit(&pool->id_mutex);
803 }
804 
805 uint32_t
806 idm_cid_alloc(void)
807 {
808 	/*
809 	 * ID pool works with 16-bit identifiers right now.  That should
810 	 * be plenty since we will probably never have more than 2^16
811 	 * connections simultaneously.
812 	 */
813 	uint16_t cid16;
814 
815 	if (idm_idpool_alloc(&idm.idm_conn_id_pool, &cid16) == -1) {
816 		return (0); /* Fail */
817 	}
818 
819 	return ((uint32_t)cid16);
820 }
821 
822 void
823 idm_cid_free(uint32_t cid)
824 {
825 	idm_idpool_free(&idm.idm_conn_id_pool, (uint16_t)cid);
826 }
827 
828 
829 /*
830  * Code for generating the header and data digests
831  *
832  * This is the CRC-32C table
833  * Generated with:
834  * width = 32 bits
835  * poly = 0x1EDC6F41
836  * reflect input bytes = true
837  * reflect output bytes = true
838  */
839 
840 uint32_t idm_crc32c_table[256] =
841 {
842 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
843 	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
844 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
845 	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
846 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
847 	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
848 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
849 	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
850 	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
851 	0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
852 	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
853 	0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
854 	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
855 	0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
856 	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
857 	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
858 	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
859 	0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
860 	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
861 	0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
862 	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
863 	0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
864 	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
865 	0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
866 	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
867 	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
868 	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
869 	0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
870 	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
871 	0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
872 	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
873 	0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
874 	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
875 	0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
876 	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
877 	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
878 	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
879 	0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
880 	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
881 	0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
882 	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
883 	0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
884 	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
885 	0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
886 	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
887 	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
888 	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
889 	0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
890 	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
891 	0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
892 	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
893 	0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
894 	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
895 	0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
896 	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
897 	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
898 	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
899 	0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
900 	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
901 	0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
902 	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
903 	0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
904 	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
905 	0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
906 };
907 
908 /*
909  * iscsi_crc32c - Steps through buffer one byte at at time, calculates
910  * reflected crc using table.
911  */
912 uint32_t
913 idm_crc32c(void *address, unsigned long length)
914 {
915 	uint8_t *buffer = address;
916 	uint32_t crc = 0xffffffff, result;
917 #ifdef _BIG_ENDIAN
918 	uint8_t byte0, byte1, byte2, byte3;
919 #endif
920 
921 	ASSERT(address != NULL);
922 
923 	if (iscsi_crc32_hd == -1) {
924 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
925 			iscsi_crc32_hd = 0;
926 		} else {
927 			iscsi_crc32_hd = 1;
928 		}
929 	}
930 	if (iscsi_crc32_hd == 0)
931 		return (HW_CRC32(buffer, length, crc));
932 
933 	while (length--) {
934 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
935 		    (crc >> 8);
936 	}
937 	result = crc ^ 0xffffffff;
938 
939 #ifdef	_BIG_ENDIAN
940 	byte0 = (uint8_t)(result & 0xFF);
941 	byte1 = (uint8_t)((result >> 8) & 0xFF);
942 	byte2 = (uint8_t)((result >> 16) & 0xFF);
943 	byte3 = (uint8_t)((result >> 24) & 0xFF);
944 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
945 #endif	/* _BIG_ENDIAN */
946 
947 	return (result);
948 }
949 
950 
951 /*
952  * idm_crc32c_continued - Continues stepping through buffer one
953  * byte at at time, calculates reflected crc using table.
954  */
955 uint32_t
956 idm_crc32c_continued(void *address, unsigned long length, uint32_t crc)
957 {
958 	uint8_t *buffer = address;
959 	uint32_t result;
960 #ifdef	_BIG_ENDIAN
961 	uint8_t byte0, byte1, byte2, byte3;
962 #endif
963 
964 	ASSERT(address != NULL);
965 
966 	if (iscsi_crc32_hd == -1) {
967 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
968 			iscsi_crc32_hd = 0;
969 		} else {
970 			iscsi_crc32_hd = 1;
971 		}
972 	}
973 	if (iscsi_crc32_hd == 0)
974 		return (HW_CRC32_CONT(buffer, length, crc));
975 
976 
977 #ifdef	_BIG_ENDIAN
978 	byte0 = (uint8_t)((crc >> 24) & 0xFF);
979 	byte1 = (uint8_t)((crc >> 16) & 0xFF);
980 	byte2 = (uint8_t)((crc >> 8) & 0xFF);
981 	byte3 = (uint8_t)(crc & 0xFF);
982 	crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0);
983 #endif
984 
985 	crc = crc ^ 0xffffffff;
986 	while (length--) {
987 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
988 		    (crc >> 8);
989 	}
990 	result = crc ^ 0xffffffff;
991 
992 #ifdef	_BIG_ENDIAN
993 	byte0 = (uint8_t)(result & 0xFF);
994 	byte1 = (uint8_t)((result >> 8) & 0xFF);
995 	byte2 = (uint8_t)((result >> 16) & 0xFF);
996 	byte3 = (uint8_t)((result >> 24) & 0xFF);
997 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
998 #endif
999 	return (result);
1000 }
1001 
1002 /* ARGSUSED */
1003 int
1004 idm_task_constructor(void *hdl, void *arg, int flags)
1005 {
1006 	idm_task_t *idt = (idm_task_t *)hdl;
1007 	uint32_t next_task;
1008 
1009 	mutex_init(&idt->idt_mutex, NULL, MUTEX_DEFAULT, NULL);
1010 
1011 	/* Find the next free task ID */
1012 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1013 	next_task = idm.idm_taskid_next;
1014 	while (idm.idm_taskid_table[next_task]) {
1015 		next_task++;
1016 		if (next_task == idm.idm_taskid_max)
1017 			next_task = 0;
1018 		if (next_task == idm.idm_taskid_next) {
1019 			rw_exit(&idm.idm_taskid_table_lock);
1020 			return (-1);
1021 		}
1022 	}
1023 
1024 	idm.idm_taskid_table[next_task] = idt;
1025 	idm.idm_taskid_next = (next_task + 1) % idm.idm_taskid_max;
1026 	rw_exit(&idm.idm_taskid_table_lock);
1027 
1028 	idt->idt_tt = next_task;
1029 
1030 	list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
1031 	    offsetof(idm_buf_t, idb_buflink));
1032 	list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
1033 	    offsetof(idm_buf_t, idb_buflink));
1034 	idm_refcnt_init(&idt->idt_refcnt, idt);
1035 
1036 	/*
1037 	 * Set the transport header pointer explicitly.  This removes the
1038 	 * need for per-transport header allocation, which simplifies cache
1039 	 * init considerably.  If at a later date we have an additional IDM
1040 	 * transport that requires a different size, we'll revisit this.
1041 	 */
1042 	idt->idt_transport_hdr = (void *)(idt + 1); /* pointer arithmetic */
1043 	idt->idt_flags = 0;
1044 	return (0);
1045 }
1046 
1047 /* ARGSUSED */
1048 void
1049 idm_task_destructor(void *hdl, void *arg)
1050 {
1051 	idm_task_t *idt = (idm_task_t *)hdl;
1052 
1053 	/* Remove the task from the ID table */
1054 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1055 	idm.idm_taskid_table[idt->idt_tt] = NULL;
1056 	rw_exit(&idm.idm_taskid_table_lock);
1057 
1058 	/* free the inbuf and outbuf */
1059 	idm_refcnt_destroy(&idt->idt_refcnt);
1060 	list_destroy(&idt->idt_inbufv);
1061 	list_destroy(&idt->idt_outbufv);
1062 
1063 	/*
1064 	 * The final call to idm_task_rele may happen with the task
1065 	 * mutex held which may invoke this destructor immediately.
1066 	 * Stall here until the task mutex owner lets go.
1067 	 */
1068 	mutex_enter(&idt->idt_mutex);
1069 	mutex_destroy(&idt->idt_mutex);
1070 }
1071 
1072 /*
1073  * idm_listbuf_insert searches from the back of the list looking for the
1074  * insertion point.
1075  */
1076 void
1077 idm_listbuf_insert(list_t *lst, idm_buf_t *buf)
1078 {
1079 	idm_buf_t	*idb;
1080 
1081 	/* iterate through the list to find the insertion point */
1082 	for (idb = list_tail(lst); idb != NULL; idb = list_prev(lst, idb)) {
1083 
1084 		if (idb->idb_bufoffset < buf->idb_bufoffset) {
1085 
1086 			list_insert_after(lst, idb, buf);
1087 			return;
1088 		}
1089 	}
1090 
1091 	/* add the buf to the head of the list */
1092 	list_insert_head(lst, buf);
1093 
1094 }
1095 
1096 /*ARGSUSED*/
1097 void
1098 idm_wd_thread(void *arg)
1099 {
1100 	idm_conn_t	*ic;
1101 	clock_t		wake_time = SEC_TO_TICK(IDM_WD_INTERVAL);
1102 	clock_t		idle_time;
1103 
1104 	/* Record the thread id for thread_join() */
1105 	idm.idm_wd_thread_did = curthread->t_did;
1106 	mutex_enter(&idm.idm_global_mutex);
1107 	idm.idm_wd_thread_running = B_TRUE;
1108 	cv_signal(&idm.idm_wd_cv);
1109 
1110 	while (idm.idm_wd_thread_running) {
1111 		for (ic = list_head(&idm.idm_tgt_conn_list);
1112 		    ic != NULL;
1113 		    ic = list_next(&idm.idm_tgt_conn_list, ic)) {
1114 			idle_time = ddi_get_lbolt() - ic->ic_timestamp;
1115 
1116 			/*
1117 			 * If this connection is in FFP then grab a hold
1118 			 * and check the various timeout thresholds.  Otherwise
1119 			 * the connection is closing and we should just
1120 			 * move on to the next one.
1121 			 */
1122 			mutex_enter(&ic->ic_state_mutex);
1123 			if (ic->ic_ffp) {
1124 				idm_conn_hold(ic);
1125 			} else {
1126 				mutex_exit(&ic->ic_state_mutex);
1127 				continue;
1128 			}
1129 
1130 			/*
1131 			 * If there hasn't been any activity on this
1132 			 * connection for the keepalive timeout period
1133 			 * and if the client has provided a keepalive
1134 			 * callback then call the keepalive callback.
1135 			 * This allows the client to take action to keep
1136 			 * the link alive (like send a nop PDU).
1137 			 */
1138 			if ((TICK_TO_SEC(idle_time) >=
1139 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT) &&
1140 			    !ic->ic_keepalive) {
1141 				ic->ic_keepalive = B_TRUE;
1142 				if (ic->ic_conn_ops.icb_keepalive) {
1143 					mutex_exit(&ic->ic_state_mutex);
1144 					mutex_exit(&idm.idm_global_mutex);
1145 					(*ic->ic_conn_ops.icb_keepalive)(ic);
1146 					mutex_enter(&idm.idm_global_mutex);
1147 					mutex_enter(&ic->ic_state_mutex);
1148 				}
1149 			} else if ((TICK_TO_SEC(idle_time) <
1150 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT)) {
1151 				/* Reset keepalive */
1152 				ic->ic_keepalive = B_FALSE;
1153 			}
1154 
1155 			/*
1156 			 * If there hasn't been any activity on this
1157 			 * connection for the failure timeout period then
1158 			 * drop the connection.  We expect the initiator
1159 			 * to keep the connection alive if it wants the
1160 			 * connection to stay open.
1161 			 *
1162 			 * If it turns out to be desireable to take a
1163 			 * more active role in maintaining the connect
1164 			 * we could add a client callback to send
1165 			 * a "keepalive" kind of message (no doubt a nop)
1166 			 * and fire that on a shorter timer.
1167 			 */
1168 			if (TICK_TO_SEC(idle_time) >
1169 			    IDM_TRANSPORT_FAIL_IDLE_TIMEOUT) {
1170 				mutex_exit(&ic->ic_state_mutex);
1171 				mutex_exit(&idm.idm_global_mutex);
1172 				IDM_SM_LOG(CE_WARN, "idm_wd_thread: "
1173 				    "conn %p idle for %d seconds, "
1174 				    "sending CE_TRANSPORT_FAIL",
1175 				    (void *)ic, (int)idle_time);
1176 				idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
1177 				mutex_enter(&idm.idm_global_mutex);
1178 				mutex_enter(&ic->ic_state_mutex);
1179 			}
1180 
1181 			idm_conn_rele(ic);
1182 
1183 			mutex_exit(&ic->ic_state_mutex);
1184 		}
1185 
1186 		(void) cv_reltimedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
1187 		    wake_time, TR_CLOCK_TICK);
1188 	}
1189 	mutex_exit(&idm.idm_global_mutex);
1190 
1191 	thread_exit();
1192 }
1193