xref: /titanic_52/usr/src/uts/common/io/idm/idm_impl.c (revision 3f7d54a6b84904c8f4d8daa4c7b577bede7df8b9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/conf.h>
28 #include <sys/file.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/cpuvar.h>
32 #include <sys/sdt.h>
33 
34 #include <sys/socket.h>
35 #include <sys/strsubr.h>
36 #include <sys/socketvar.h>
37 #include <sys/sysmacros.h>
38 
39 #include <sys/idm/idm.h>
40 #include <sys/idm/idm_so.h>
41 #include <hd_crc.h>
42 
43 extern idm_transport_t  idm_transport_list[];
44 /*
45  * -1 - uninitialized
46  * 0  - applicable
47  * others - NA
48  */
49 static int iscsi_crc32_hd = -1;
50 
51 void
52 idm_pdu_rx(idm_conn_t *ic, idm_pdu_t *pdu)
53 {
54 	iscsi_async_evt_hdr_t *async_evt;
55 
56 	/*
57 	 * If we are in full-featured mode then route SCSI-related
58 	 * commands to the appropriate function vector
59 	 */
60 	ic->ic_timestamp = ddi_get_lbolt();
61 	mutex_enter(&ic->ic_state_mutex);
62 	if (ic->ic_ffp && ic->ic_pdu_events == 0) {
63 		mutex_exit(&ic->ic_state_mutex);
64 
65 		if (idm_pdu_rx_forward_ffp(ic, pdu) == B_TRUE) {
66 			/* Forwarded SCSI-related commands */
67 			return;
68 		}
69 		mutex_enter(&ic->ic_state_mutex);
70 	}
71 
72 	/*
73 	 * If we get here with a SCSI-related PDU then we are not in
74 	 * full-feature mode and the PDU is a protocol error (SCSI command
75 	 * PDU's may sometimes be an exception, see below).  All
76 	 * non-SCSI PDU's get treated them the same regardless of whether
77 	 * we are in full-feature mode.
78 	 *
79 	 * Look at the opcode and in some cases the PDU status and
80 	 * determine the appropriate event to send to the connection
81 	 * state machine.  Generate the event, passing the PDU as data.
82 	 * If the current connection state allows reception of the event
83 	 * the PDU will be submitted to the IDM client for processing,
84 	 * otherwise the PDU will be dropped.
85 	 */
86 	switch (IDM_PDU_OPCODE(pdu)) {
87 	case ISCSI_OP_LOGIN_CMD:
88 		DTRACE_ISCSI_2(login__command, idm_conn_t *, ic,
89 		    iscsi_login_hdr_t *, (iscsi_login_hdr_t *)pdu->isp_hdr);
90 		idm_conn_rx_pdu_event(ic, CE_LOGIN_RCV, (uintptr_t)pdu);
91 		break;
92 	case ISCSI_OP_LOGIN_RSP:
93 		idm_parse_login_rsp(ic, pdu, /* RX */ B_TRUE);
94 		break;
95 	case ISCSI_OP_LOGOUT_CMD:
96 		DTRACE_ISCSI_2(logout__command, idm_conn_t *, ic,
97 		    iscsi_logout_hdr_t *,
98 		    (iscsi_logout_hdr_t *)pdu->isp_hdr);
99 		idm_parse_logout_req(ic, pdu, /* RX */ B_TRUE);
100 		break;
101 	case ISCSI_OP_LOGOUT_RSP:
102 		idm_parse_logout_rsp(ic, pdu, /* RX */ B_TRUE);
103 		break;
104 	case ISCSI_OP_ASYNC_EVENT:
105 		async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
106 		switch (async_evt->async_event) {
107 		case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
108 			idm_conn_rx_pdu_event(ic, CE_ASYNC_LOGOUT_RCV,
109 			    (uintptr_t)pdu);
110 			break;
111 		case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
112 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_CONN_RCV,
113 			    (uintptr_t)pdu);
114 			break;
115 		case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
116 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_RCV,
117 			    (uintptr_t)pdu);
118 			break;
119 		case ISCSI_ASYNC_EVENT_SCSI_EVENT:
120 		case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
121 		default:
122 			idm_conn_rx_pdu_event(ic, CE_MISC_RX,
123 			    (uintptr_t)pdu);
124 			break;
125 		}
126 		break;
127 	case ISCSI_OP_SCSI_CMD:
128 		/*
129 		 * Consider this scenario:  We are a target connection
130 		 * in "in login" state and a "login success sent" event has
131 		 * been generated but not yet handled.  Since we've sent
132 		 * the login response but we haven't actually transitioned
133 		 * to FFP mode we might conceivably receive a SCSI command
134 		 * from the initiator before we are ready.  We are actually
135 		 * in FFP we just don't know it yet -- to address this we
136 		 * can generate an event corresponding to the SCSI command.
137 		 * At the point when the event is handled by the state
138 		 * machine the login request will have been handled and we
139 		 * should be in FFP.  If we are not in FFP by that time
140 		 * we can reject the SCSI command with a protocol error.
141 		 *
142 		 * This scenario only applies to the target.
143 		 *
144 		 * Handle dtrace probe in iscsit so we can find all the
145 		 * pieces of the CDB
146 		 */
147 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
148 		break;
149 	case ISCSI_OP_SCSI_DATA:
150 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
151 		    iscsi_data_hdr_t *,
152 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
153 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
154 		break;
155 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
156 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
157 		    iscsi_scsi_task_mgt_hdr_t *,
158 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
159 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
160 		break;
161 	case ISCSI_OP_NOOP_OUT:
162 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
163 		    iscsi_nop_out_hdr_t *,
164 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
165 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
166 		break;
167 	case ISCSI_OP_TEXT_CMD:
168 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
169 		    iscsi_text_hdr_t *,
170 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
171 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
172 		break;
173 	/* Initiator PDU's */
174 	case ISCSI_OP_SCSI_DATA_RSP:
175 	case ISCSI_OP_RTT_RSP:
176 	case ISCSI_OP_SNACK_CMD:
177 	case ISCSI_OP_NOOP_IN:
178 	case ISCSI_OP_TEXT_RSP:
179 	case ISCSI_OP_REJECT_MSG:
180 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
181 		/* Validate received PDU against current state */
182 		idm_conn_rx_pdu_event(ic, CE_MISC_RX,
183 		    (uintptr_t)pdu);
184 		break;
185 	}
186 	mutex_exit(&ic->ic_state_mutex);
187 }
188 
189 void
190 idm_pdu_tx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
191 {
192 	(*ic->ic_transport_ops->it_tx_pdu)(ic, pdu);
193 }
194 
195 boolean_t
196 idm_pdu_rx_forward_ffp(idm_conn_t *ic, idm_pdu_t *pdu)
197 {
198 	/*
199 	 * If this is an FFP request, call the appropriate handler
200 	 * and return B_TRUE, otherwise return B_FALSE.
201 	 */
202 	switch (IDM_PDU_OPCODE(pdu)) {
203 	case ISCSI_OP_SCSI_CMD:
204 		(*ic->ic_conn_ops.icb_rx_scsi_cmd)(ic, pdu);
205 		return (B_TRUE);
206 	case ISCSI_OP_SCSI_DATA:
207 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
208 		    iscsi_data_hdr_t *,
209 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
210 		(*ic->ic_transport_ops->it_rx_dataout)(ic, pdu);
211 		return (B_TRUE);
212 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
213 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
214 		    iscsi_scsi_task_mgt_hdr_t *,
215 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
216 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
217 		return (B_TRUE);
218 	case ISCSI_OP_NOOP_OUT:
219 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
220 		    iscsi_nop_out_hdr_t *,
221 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
222 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
223 		return (B_TRUE);
224 	case ISCSI_OP_TEXT_CMD:
225 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
226 		    iscsi_text_hdr_t *,
227 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
228 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
229 		return (B_TRUE);
230 		/* Initiator only */
231 	case ISCSI_OP_SCSI_RSP:
232 		(*ic->ic_conn_ops.icb_rx_scsi_rsp)(ic, pdu);
233 		return (B_TRUE);
234 	case ISCSI_OP_SCSI_DATA_RSP:
235 		(*ic->ic_transport_ops->it_rx_datain)(ic, pdu);
236 		return (B_TRUE);
237 	case ISCSI_OP_RTT_RSP:
238 		(*ic->ic_transport_ops->it_rx_rtt)(ic, pdu);
239 		return (B_TRUE);
240 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
241 	case ISCSI_OP_TEXT_RSP:
242 	case ISCSI_OP_NOOP_IN:
243 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
244 		return (B_TRUE);
245 	default:
246 		return (B_FALSE);
247 	}
248 	/*NOTREACHED*/
249 }
250 
251 void
252 idm_pdu_rx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
253 {
254 	/*
255 	 * Some PDU's specific to FFP get special handling.  This function
256 	 * will normally never be called in FFP with an FFP PDU since this
257 	 * is a slow path but in can happen on the target side during
258 	 * the transition to FFP.  We primarily call
259 	 * idm_pdu_rx_forward_ffp here to avoid code duplication.
260 	 */
261 	if (idm_pdu_rx_forward_ffp(ic, pdu) == B_FALSE) {
262 		/*
263 		 * Non-FFP PDU, use generic RC handler
264 		 */
265 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
266 	}
267 }
268 
269 void
270 idm_parse_login_rsp(idm_conn_t *ic, idm_pdu_t *login_rsp_pdu, boolean_t rx)
271 {
272 	iscsi_login_rsp_hdr_t	*login_rsp =
273 	    (iscsi_login_rsp_hdr_t *)login_rsp_pdu->isp_hdr;
274 	idm_conn_event_t	new_event;
275 
276 	if (login_rsp->status_class == ISCSI_STATUS_CLASS_SUCCESS) {
277 		if (!(login_rsp->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
278 		    (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
279 		    (ISCSI_LOGIN_NEXT_STAGE(login_rsp->flags) ==
280 		    ISCSI_FULL_FEATURE_PHASE)) {
281 			new_event = (rx ? CE_LOGIN_SUCCESS_RCV :
282 			    CE_LOGIN_SUCCESS_SND);
283 		} else {
284 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
285 		}
286 	} else {
287 		new_event = (rx ? CE_LOGIN_FAIL_RCV : CE_LOGIN_FAIL_SND);
288 	}
289 
290 	if (rx) {
291 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
292 	} else {
293 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
294 	}
295 }
296 
297 
298 void
299 idm_parse_logout_req(idm_conn_t *ic, idm_pdu_t *logout_req_pdu, boolean_t rx)
300 {
301 	iscsi_logout_hdr_t 	*logout_req =
302 	    (iscsi_logout_hdr_t *)logout_req_pdu->isp_hdr;
303 	idm_conn_event_t	new_event;
304 	uint8_t			reason =
305 	    (logout_req->flags & ISCSI_FLAG_LOGOUT_REASON_MASK);
306 
307 	/*
308 	 *	For a normal logout (close connection or close session) IDM
309 	 *	will terminate processing of all tasks completing the tasks
310 	 *	back to the client with a status indicating the connection
311 	 *	was logged out.  These tasks do not get completed.
312 	 *
313 	 *	For a "close connection for recovery logout) IDM suspends
314 	 *	processing of all tasks and completes them back to the client
315 	 *	with a status indicating connection was logged out for
316 	 *	recovery.  Both initiator and target hang onto these tasks.
317 	 *	When we add ERL2 support IDM will need to provide mechanisms
318 	 *	to change the task and buffer associations to a new connection.
319 	 *
320 	 *	This code doesn't address the possibility of MC/S.  We'll
321 	 *	need to decide how the separate connections get handled
322 	 *	in that case.  One simple option is to make the client
323 	 *	generate the events for the other connections.
324 	 */
325 	if (reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
326 		new_event =
327 		    (rx ? CE_LOGOUT_SESSION_RCV : CE_LOGOUT_SESSION_SND);
328 	} else if ((reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) ||
329 	    (reason == ISCSI_LOGOUT_REASON_RECOVERY)) {
330 		/* Check logout CID against this connection's CID */
331 		if (ntohs(logout_req->cid) == ic->ic_login_cid) {
332 			/* Logout is for this connection */
333 			new_event = (rx ? CE_LOGOUT_THIS_CONN_RCV :
334 			    CE_LOGOUT_THIS_CONN_SND);
335 		} else {
336 			/*
337 			 * Logout affects another connection.  This is not
338 			 * a relevant event for this connection so we'll
339 			 * just treat it as a normal PDU event.  Client
340 			 * will need to lookup the other connection and
341 			 * generate the event.
342 			 */
343 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
344 		}
345 	} else {
346 		/* Invalid reason code */
347 		new_event = (rx ? CE_RX_PROTOCOL_ERROR : CE_TX_PROTOCOL_ERROR);
348 	}
349 
350 	if (rx) {
351 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
352 	} else {
353 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
354 	}
355 }
356 
357 
358 
359 void
360 idm_parse_logout_rsp(idm_conn_t *ic, idm_pdu_t *logout_rsp_pdu, boolean_t rx)
361 {
362 	idm_conn_event_t	new_event;
363 	iscsi_logout_rsp_hdr_t *logout_rsp =
364 	    (iscsi_logout_rsp_hdr_t *)logout_rsp_pdu->isp_hdr;
365 
366 	if (logout_rsp->response == ISCSI_STATUS_CLASS_SUCCESS) {
367 		new_event = rx ? CE_LOGOUT_SUCCESS_RCV : CE_LOGOUT_SUCCESS_SND;
368 	} else {
369 		new_event = rx ? CE_LOGOUT_FAIL_RCV : CE_LOGOUT_FAIL_SND;
370 	}
371 
372 	if (rx) {
373 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
374 	} else {
375 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
376 	}
377 }
378 
379 /*
380  * idm_svc_conn_create()
381  * Transport-agnostic service connection creation, invoked from the transport
382  * layer.
383  */
384 idm_status_t
385 idm_svc_conn_create(idm_svc_t *is, idm_transport_type_t tt,
386     idm_conn_t **ic_result)
387 {
388 	idm_conn_t	*ic;
389 	idm_status_t	rc;
390 
391 	/*
392 	 * Skip some work if we can already tell we are going offline.
393 	 * Otherwise we will destroy this connection later as part of
394 	 * shutting down the svc.
395 	 */
396 	mutex_enter(&is->is_mutex);
397 	if (!is->is_online) {
398 		mutex_exit(&is->is_mutex);
399 		return (IDM_STATUS_FAIL);
400 	}
401 	mutex_exit(&is->is_mutex);
402 
403 	ic = idm_conn_create_common(CONN_TYPE_TGT, tt,
404 	    &is->is_svc_req.sr_conn_ops);
405 	if (ic == NULL) {
406 		return (IDM_STATUS_FAIL);
407 	}
408 	ic->ic_svc_binding = is;
409 
410 	/*
411 	 * Prepare connection state machine
412 	 */
413 	if ((rc = idm_conn_sm_init(ic)) != 0) {
414 		idm_conn_destroy_common(ic);
415 		return (rc);
416 	}
417 
418 
419 	*ic_result = ic;
420 
421 	mutex_enter(&idm.idm_global_mutex);
422 	list_insert_tail(&idm.idm_tgt_conn_list, ic);
423 	idm.idm_tgt_conn_count++;
424 	mutex_exit(&idm.idm_global_mutex);
425 
426 	return (IDM_STATUS_SUCCESS);
427 }
428 
429 void
430 idm_svc_conn_destroy(idm_conn_t *ic)
431 {
432 	mutex_enter(&idm.idm_global_mutex);
433 	list_remove(&idm.idm_tgt_conn_list, ic);
434 	idm.idm_tgt_conn_count--;
435 	mutex_exit(&idm.idm_global_mutex);
436 
437 	if (ic->ic_transport_private != NULL) {
438 		ic->ic_transport_ops->it_tgt_conn_destroy(ic);
439 	}
440 	idm_conn_destroy_common(ic);
441 }
442 
443 /*
444  * idm_conn_create_common()
445  *
446  * Allocate and initialize IDM connection context
447  */
448 idm_conn_t *
449 idm_conn_create_common(idm_conn_type_t conn_type, idm_transport_type_t tt,
450     idm_conn_ops_t *conn_ops)
451 {
452 	idm_conn_t		*ic;
453 	idm_transport_t		*it;
454 	idm_transport_type_t	type;
455 
456 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
457 		it = &idm_transport_list[type];
458 
459 		if ((it->it_ops != NULL) && (it->it_type == tt))
460 			break;
461 	}
462 	ASSERT(it->it_type == tt);
463 	if (it->it_type != tt)
464 		return (NULL);
465 
466 	ic = kmem_zalloc(sizeof (idm_conn_t), KM_SLEEP);
467 
468 	/* Initialize data */
469 	ic->ic_target_name[0] = '\0';
470 	ic->ic_initiator_name[0] = '\0';
471 	ic->ic_isid[0] = '\0';
472 	ic->ic_tsih[0] = '\0';
473 	ic->ic_conn_type = conn_type;
474 	ic->ic_conn_ops = *conn_ops;
475 	ic->ic_transport_ops = it->it_ops;
476 	ic->ic_transport_type = tt;
477 	ic->ic_transport_private = NULL; /* Set by transport service */
478 	ic->ic_internal_cid = idm_cid_alloc();
479 	if (ic->ic_internal_cid == 0) {
480 		kmem_free(ic, sizeof (idm_conn_t));
481 		return (NULL);
482 	}
483 	mutex_init(&ic->ic_mutex, NULL, MUTEX_DEFAULT, NULL);
484 	cv_init(&ic->ic_cv, NULL, CV_DEFAULT, NULL);
485 	idm_refcnt_init(&ic->ic_refcnt, ic);
486 
487 	return (ic);
488 }
489 
490 void
491 idm_conn_destroy_common(idm_conn_t *ic)
492 {
493 	idm_conn_sm_fini(ic);
494 	idm_refcnt_destroy(&ic->ic_refcnt);
495 	cv_destroy(&ic->ic_cv);
496 	mutex_destroy(&ic->ic_mutex);
497 	idm_cid_free(ic->ic_internal_cid);
498 
499 	kmem_free(ic, sizeof (idm_conn_t));
500 }
501 
502 /*
503  * Invoked from the SM as a result of client's invocation of
504  * idm_ini_conn_connect()
505  */
506 idm_status_t
507 idm_ini_conn_finish(idm_conn_t *ic)
508 {
509 	/* invoke transport-specific connection */
510 	return (ic->ic_transport_ops->it_ini_conn_connect(ic));
511 }
512 
513 idm_status_t
514 idm_tgt_conn_finish(idm_conn_t *ic)
515 {
516 	idm_status_t rc;
517 
518 	rc = idm_notify_client(ic, CN_CONNECT_ACCEPT, NULL);
519 	if (rc != IDM_STATUS_SUCCESS) {
520 		return (IDM_STATUS_REJECT);
521 	}
522 
523 	/* Target client is ready to receive a login, start connection */
524 	return (ic->ic_transport_ops->it_tgt_conn_connect(ic));
525 }
526 
527 idm_transport_t *
528 idm_transport_lookup(idm_conn_req_t *cr)
529 {
530 	idm_transport_type_t	type;
531 	idm_transport_t		*it;
532 	idm_transport_caps_t	caps;
533 
534 	/*
535 	 * Make sure all available transports are setup.  We call this now
536 	 * instead of at initialization time in case IB has become available
537 	 * since we started (hotplug, etc).
538 	 */
539 	idm_transport_setup(cr->cr_li, cr->cr_boot_conn);
540 
541 	/* Determine the transport for this connection */
542 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
543 		it = &idm_transport_list[type];
544 
545 		if (it->it_ops == NULL) {
546 			/* transport is not registered */
547 			continue;
548 		}
549 
550 		if (it->it_ops->it_conn_is_capable(cr, &caps)) {
551 			return (it);
552 		}
553 	}
554 
555 	ASSERT(0);
556 	return (NULL); /* Make gcc happy */
557 }
558 
559 void
560 idm_transport_setup(ldi_ident_t li, boolean_t boot_conn)
561 {
562 	idm_transport_type_t	type;
563 	idm_transport_t		*it;
564 	int			rc;
565 
566 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
567 		it = &idm_transport_list[type];
568 		/*
569 		 * We may want to store the LDI handle in the idm_svc_t
570 		 * and then allow multiple calls to ldi_open_by_name.  This
571 		 * would enable the LDI code to track who has the device open
572 		 * which could be useful in the case where we have multiple
573 		 * services and perhaps also have initiator and target opening
574 		 * the transport simultaneously.  For now we stick with the
575 		 * plan.
576 		 */
577 		if (it->it_ops == NULL) {
578 			/* transport is not ready, try to initialize it */
579 			if (it->it_type == IDM_TRANSPORT_TYPE_SOCKETS) {
580 				idm_so_init(it);
581 			} else {
582 				if (boot_conn == B_TRUE) {
583 					/*
584 					 * iSCSI boot doesn't need iSER.
585 					 * Open iSER here may drive IO to
586 					 * a failed session and cause
587 					 * deadlock
588 					 */
589 					continue;
590 				}
591 				rc = ldi_open_by_name(it->it_device_path,
592 				    FREAD | FWRITE, kcred, &it->it_ldi_hdl, li);
593 				/*
594 				 * If the open is successful we will have
595 				 * filled in the LDI handle in the transport
596 				 * table and we expect that the transport
597 				 * registered itself.
598 				 */
599 				if (rc != 0) {
600 					it->it_ldi_hdl = NULL;
601 				}
602 			}
603 		}
604 	}
605 }
606 
607 void
608 idm_transport_teardown()
609 {
610 	idm_transport_type_t	type;
611 	idm_transport_t		*it;
612 
613 	ASSERT(mutex_owned(&idm.idm_global_mutex));
614 
615 	/* Caller holds the IDM global mutex */
616 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
617 		it = &idm_transport_list[type];
618 		/* If we have an open LDI handle on this driver, close it */
619 		if (it->it_ldi_hdl != NULL) {
620 			(void) ldi_close(it->it_ldi_hdl, FNDELAY, kcred);
621 			it->it_ldi_hdl = NULL;
622 		}
623 	}
624 }
625 
626 /*
627  * ID pool code.  We use this to generate unique structure identifiers without
628  * searching the existing structures.  This avoids the need to lock entire
629  * sets of structures at inopportune times.  Adapted from the CIFS server code.
630  *
631  *    A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap.
632  *    A bit set to '1' indicates that that particular value has been allocated.
633  *    The allocation process is done shifting a bit through the whole bitmap.
634  *    The current position of that index bit is kept in the idm_idpool_t
635  *    structure and represented by a byte index (0 to buffer size minus 1) and
636  *    a bit index (0 to 7).
637  *
638  *    The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs
639  *    out of IDs its current size is doubled until it reaches its maximum size
640  *    (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which
641  *    means that a pool can have a maximum number of 65534 IDs available.
642  */
643 
644 static int
645 idm_idpool_increment(
646     idm_idpool_t	*pool)
647 {
648 	uint8_t		*new_pool;
649 	uint32_t	new_size;
650 
651 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
652 
653 	new_size = pool->id_size * 2;
654 	if (new_size <= IDM_IDPOOL_MAX_SIZE) {
655 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
656 		if (new_pool) {
657 			bzero(new_pool, new_size / 8);
658 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
659 			kmem_free(pool->id_pool, pool->id_size / 8);
660 			pool->id_pool = new_pool;
661 			pool->id_free_counter += new_size - pool->id_size;
662 			pool->id_max_free_counter += new_size - pool->id_size;
663 			pool->id_size = new_size;
664 			pool->id_idx_msk = (new_size / 8) - 1;
665 			if (new_size >= IDM_IDPOOL_MAX_SIZE) {
666 				/* id -1 made unavailable */
667 				pool->id_pool[pool->id_idx_msk] = 0x80;
668 				pool->id_free_counter--;
669 				pool->id_max_free_counter--;
670 			}
671 			return (0);
672 		}
673 	}
674 	return (-1);
675 }
676 
677 /*
678  * idm_idpool_constructor
679  *
680  * This function initializes the pool structure provided.
681  */
682 
683 int
684 idm_idpool_create(idm_idpool_t *pool)
685 {
686 
687 	ASSERT(pool->id_magic != IDM_IDPOOL_MAGIC);
688 
689 	pool->id_size = IDM_IDPOOL_MIN_SIZE;
690 	pool->id_idx_msk = (IDM_IDPOOL_MIN_SIZE / 8) - 1;
691 	pool->id_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
692 	pool->id_max_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
693 	pool->id_bit = 0x02;
694 	pool->id_bit_idx = 1;
695 	pool->id_idx = 0;
696 	pool->id_pool = (uint8_t *)kmem_alloc((IDM_IDPOOL_MIN_SIZE / 8),
697 	    KM_SLEEP);
698 	bzero(pool->id_pool, (IDM_IDPOOL_MIN_SIZE / 8));
699 	/* -1 id made unavailable */
700 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
701 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
702 	pool->id_magic = IDM_IDPOOL_MAGIC;
703 	return (0);
704 }
705 
706 /*
707  * idm_idpool_destructor
708  *
709  * This function tears down and frees the resources associated with the
710  * pool provided.
711  */
712 
713 void
714 idm_idpool_destroy(idm_idpool_t *pool)
715 {
716 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
717 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
718 	pool->id_magic = (uint32_t)~IDM_IDPOOL_MAGIC;
719 	mutex_destroy(&pool->id_mutex);
720 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
721 }
722 
723 /*
724  * idm_idpool_alloc
725  *
726  * This function allocates an ID from the pool provided.
727  */
728 int
729 idm_idpool_alloc(idm_idpool_t *pool, uint16_t *id)
730 {
731 	uint32_t	i;
732 	uint8_t		bit;
733 	uint8_t		bit_idx;
734 	uint8_t		byte;
735 
736 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
737 
738 	mutex_enter(&pool->id_mutex);
739 	if ((pool->id_free_counter == 0) && idm_idpool_increment(pool)) {
740 		mutex_exit(&pool->id_mutex);
741 		return (-1);
742 	}
743 
744 	i = pool->id_size;
745 	while (i) {
746 		bit = pool->id_bit;
747 		bit_idx = pool->id_bit_idx;
748 		byte = pool->id_pool[pool->id_idx];
749 		while (bit) {
750 			if (byte & bit) {
751 				bit = bit << 1;
752 				bit_idx++;
753 				continue;
754 			}
755 			pool->id_pool[pool->id_idx] |= bit;
756 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
757 			pool->id_free_counter--;
758 			pool->id_bit = bit;
759 			pool->id_bit_idx = bit_idx;
760 			mutex_exit(&pool->id_mutex);
761 			return (0);
762 		}
763 		pool->id_bit = 1;
764 		pool->id_bit_idx = 0;
765 		pool->id_idx++;
766 		pool->id_idx &= pool->id_idx_msk;
767 		--i;
768 	}
769 	/*
770 	 * This section of code shouldn't be reached. If there are IDs
771 	 * available and none could be found there's a problem.
772 	 */
773 	ASSERT(0);
774 	mutex_exit(&pool->id_mutex);
775 	return (-1);
776 }
777 
778 /*
779  * idm_idpool_free
780  *
781  * This function frees the ID provided.
782  */
783 void
784 idm_idpool_free(idm_idpool_t *pool, uint16_t id)
785 {
786 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
787 	ASSERT(id != 0);
788 	ASSERT(id != 0xFFFF);
789 
790 	mutex_enter(&pool->id_mutex);
791 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
792 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
793 		pool->id_free_counter++;
794 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
795 		mutex_exit(&pool->id_mutex);
796 		return;
797 	}
798 	/* Freeing a free ID. */
799 	ASSERT(0);
800 	mutex_exit(&pool->id_mutex);
801 }
802 
803 uint32_t
804 idm_cid_alloc(void)
805 {
806 	/*
807 	 * ID pool works with 16-bit identifiers right now.  That should
808 	 * be plenty since we will probably never have more than 2^16
809 	 * connections simultaneously.
810 	 */
811 	uint16_t cid16;
812 
813 	if (idm_idpool_alloc(&idm.idm_conn_id_pool, &cid16) == -1) {
814 		return (0); /* Fail */
815 	}
816 
817 	return ((uint32_t)cid16);
818 }
819 
820 void
821 idm_cid_free(uint32_t cid)
822 {
823 	idm_idpool_free(&idm.idm_conn_id_pool, (uint16_t)cid);
824 }
825 
826 
827 /*
828  * Code for generating the header and data digests
829  *
830  * This is the CRC-32C table
831  * Generated with:
832  * width = 32 bits
833  * poly = 0x1EDC6F41
834  * reflect input bytes = true
835  * reflect output bytes = true
836  */
837 
838 uint32_t idm_crc32c_table[256] =
839 {
840 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
841 	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
842 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
843 	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
844 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
845 	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
846 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
847 	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
848 	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
849 	0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
850 	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
851 	0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
852 	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
853 	0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
854 	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
855 	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
856 	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
857 	0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
858 	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
859 	0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
860 	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
861 	0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
862 	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
863 	0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
864 	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
865 	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
866 	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
867 	0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
868 	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
869 	0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
870 	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
871 	0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
872 	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
873 	0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
874 	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
875 	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
876 	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
877 	0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
878 	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
879 	0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
880 	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
881 	0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
882 	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
883 	0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
884 	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
885 	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
886 	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
887 	0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
888 	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
889 	0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
890 	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
891 	0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
892 	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
893 	0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
894 	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
895 	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
896 	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
897 	0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
898 	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
899 	0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
900 	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
901 	0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
902 	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
903 	0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
904 };
905 
906 /*
907  * iscsi_crc32c - Steps through buffer one byte at at time, calculates
908  * reflected crc using table.
909  */
910 uint32_t
911 idm_crc32c(void *address, unsigned long length)
912 {
913 	uint8_t *buffer = address;
914 	uint32_t crc = 0xffffffff, result;
915 #ifdef _BIG_ENDIAN
916 	uint8_t byte0, byte1, byte2, byte3;
917 #endif
918 
919 	ASSERT(address != NULL);
920 
921 	if (iscsi_crc32_hd == -1) {
922 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
923 			iscsi_crc32_hd = 0;
924 		} else {
925 			iscsi_crc32_hd = 1;
926 		}
927 	}
928 	if (iscsi_crc32_hd == 0)
929 		return (HW_CRC32(buffer, length, crc));
930 
931 	while (length--) {
932 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
933 		    (crc >> 8);
934 	}
935 	result = crc ^ 0xffffffff;
936 
937 #ifdef	_BIG_ENDIAN
938 	byte0 = (uint8_t)(result & 0xFF);
939 	byte1 = (uint8_t)((result >> 8) & 0xFF);
940 	byte2 = (uint8_t)((result >> 16) & 0xFF);
941 	byte3 = (uint8_t)((result >> 24) & 0xFF);
942 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
943 #endif	/* _BIG_ENDIAN */
944 
945 	return (result);
946 }
947 
948 
949 /*
950  * idm_crc32c_continued - Continues stepping through buffer one
951  * byte at at time, calculates reflected crc using table.
952  */
953 uint32_t
954 idm_crc32c_continued(void *address, unsigned long length, uint32_t crc)
955 {
956 	uint8_t *buffer = address;
957 	uint32_t result;
958 #ifdef	_BIG_ENDIAN
959 	uint8_t byte0, byte1, byte2, byte3;
960 #endif
961 
962 	ASSERT(address != NULL);
963 
964 	if (iscsi_crc32_hd == -1) {
965 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
966 			iscsi_crc32_hd = 0;
967 		} else {
968 			iscsi_crc32_hd = 1;
969 		}
970 	}
971 	if (iscsi_crc32_hd == 0)
972 		return (HW_CRC32_CONT(buffer, length, crc));
973 
974 
975 #ifdef	_BIG_ENDIAN
976 	byte0 = (uint8_t)((crc >> 24) & 0xFF);
977 	byte1 = (uint8_t)((crc >> 16) & 0xFF);
978 	byte2 = (uint8_t)((crc >> 8) & 0xFF);
979 	byte3 = (uint8_t)(crc & 0xFF);
980 	crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0);
981 #endif
982 
983 	crc = crc ^ 0xffffffff;
984 	while (length--) {
985 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
986 		    (crc >> 8);
987 	}
988 	result = crc ^ 0xffffffff;
989 
990 #ifdef	_BIG_ENDIAN
991 	byte0 = (uint8_t)(result & 0xFF);
992 	byte1 = (uint8_t)((result >> 8) & 0xFF);
993 	byte2 = (uint8_t)((result >> 16) & 0xFF);
994 	byte3 = (uint8_t)((result >> 24) & 0xFF);
995 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
996 #endif
997 	return (result);
998 }
999 
1000 /* ARGSUSED */
1001 int
1002 idm_task_constructor(void *hdl, void *arg, int flags)
1003 {
1004 	idm_task_t *idt = (idm_task_t *)hdl;
1005 	uint32_t next_task;
1006 
1007 	mutex_init(&idt->idt_mutex, NULL, MUTEX_DEFAULT, NULL);
1008 
1009 	/* Find the next free task ID */
1010 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1011 	next_task = idm.idm_taskid_next;
1012 	while (idm.idm_taskid_table[next_task]) {
1013 		next_task++;
1014 		if (next_task == idm.idm_taskid_max)
1015 			next_task = 0;
1016 		if (next_task == idm.idm_taskid_next) {
1017 			rw_exit(&idm.idm_taskid_table_lock);
1018 			return (-1);
1019 		}
1020 	}
1021 
1022 	idm.idm_taskid_table[next_task] = idt;
1023 	idm.idm_taskid_next = (next_task + 1) % idm.idm_taskid_max;
1024 	rw_exit(&idm.idm_taskid_table_lock);
1025 
1026 	idt->idt_tt = next_task;
1027 
1028 	list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
1029 	    offsetof(idm_buf_t, idb_buflink));
1030 	list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
1031 	    offsetof(idm_buf_t, idb_buflink));
1032 	idm_refcnt_init(&idt->idt_refcnt, idt);
1033 
1034 	/*
1035 	 * Set the transport header pointer explicitly.  This removes the
1036 	 * need for per-transport header allocation, which simplifies cache
1037 	 * init considerably.  If at a later date we have an additional IDM
1038 	 * transport that requires a different size, we'll revisit this.
1039 	 */
1040 	idt->idt_transport_hdr = (void *)(idt + 1); /* pointer arithmetic */
1041 	idt->idt_flags = 0;
1042 	return (0);
1043 }
1044 
1045 /* ARGSUSED */
1046 void
1047 idm_task_destructor(void *hdl, void *arg)
1048 {
1049 	idm_task_t *idt = (idm_task_t *)hdl;
1050 
1051 	/* Remove the task from the ID table */
1052 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1053 	idm.idm_taskid_table[idt->idt_tt] = NULL;
1054 	rw_exit(&idm.idm_taskid_table_lock);
1055 
1056 	/* free the inbuf and outbuf */
1057 	idm_refcnt_destroy(&idt->idt_refcnt);
1058 	list_destroy(&idt->idt_inbufv);
1059 	list_destroy(&idt->idt_outbufv);
1060 
1061 	/*
1062 	 * The final call to idm_task_rele may happen with the task
1063 	 * mutex held which may invoke this destructor immediately.
1064 	 * Stall here until the task mutex owner lets go.
1065 	 */
1066 	mutex_enter(&idt->idt_mutex);
1067 	mutex_destroy(&idt->idt_mutex);
1068 }
1069 
1070 /*
1071  * idm_listbuf_insert searches from the back of the list looking for the
1072  * insertion point.
1073  */
1074 void
1075 idm_listbuf_insert(list_t *lst, idm_buf_t *buf)
1076 {
1077 	idm_buf_t	*idb;
1078 
1079 	/* iterate through the list to find the insertion point */
1080 	for (idb = list_tail(lst); idb != NULL; idb = list_prev(lst, idb)) {
1081 
1082 		if (idb->idb_bufoffset < buf->idb_bufoffset) {
1083 
1084 			list_insert_after(lst, idb, buf);
1085 			return;
1086 		}
1087 	}
1088 
1089 	/* add the buf to the head of the list */
1090 	list_insert_head(lst, buf);
1091 
1092 }
1093 
1094 /*ARGSUSED*/
1095 void
1096 idm_wd_thread(void *arg)
1097 {
1098 	idm_conn_t	*ic;
1099 	clock_t		wake_time = SEC_TO_TICK(IDM_WD_INTERVAL);
1100 	clock_t		idle_time;
1101 
1102 	/* Record the thread id for thread_join() */
1103 	idm.idm_wd_thread_did = curthread->t_did;
1104 	mutex_enter(&idm.idm_global_mutex);
1105 	idm.idm_wd_thread_running = B_TRUE;
1106 	cv_signal(&idm.idm_wd_cv);
1107 
1108 	while (idm.idm_wd_thread_running) {
1109 		for (ic = list_head(&idm.idm_tgt_conn_list);
1110 		    ic != NULL;
1111 		    ic = list_next(&idm.idm_tgt_conn_list, ic)) {
1112 			idle_time = ddi_get_lbolt() - ic->ic_timestamp;
1113 
1114 			/*
1115 			 * If this connection is in FFP then grab a hold
1116 			 * and check the various timeout thresholds.  Otherwise
1117 			 * the connection is closing and we should just
1118 			 * move on to the next one.
1119 			 */
1120 			mutex_enter(&ic->ic_state_mutex);
1121 			if (ic->ic_ffp) {
1122 				idm_conn_hold(ic);
1123 			} else {
1124 				mutex_exit(&ic->ic_state_mutex);
1125 				continue;
1126 			}
1127 
1128 			/*
1129 			 * If there hasn't been any activity on this
1130 			 * connection for the keepalive timeout period
1131 			 * and if the client has provided a keepalive
1132 			 * callback then call the keepalive callback.
1133 			 * This allows the client to take action to keep
1134 			 * the link alive (like send a nop PDU).
1135 			 */
1136 			if ((TICK_TO_SEC(idle_time) >=
1137 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT) &&
1138 			    !ic->ic_keepalive) {
1139 				ic->ic_keepalive = B_TRUE;
1140 				if (ic->ic_conn_ops.icb_keepalive) {
1141 					mutex_exit(&ic->ic_state_mutex);
1142 					mutex_exit(&idm.idm_global_mutex);
1143 					(*ic->ic_conn_ops.icb_keepalive)(ic);
1144 					mutex_enter(&idm.idm_global_mutex);
1145 					mutex_enter(&ic->ic_state_mutex);
1146 				}
1147 			} else if ((TICK_TO_SEC(idle_time) <
1148 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT)) {
1149 				/* Reset keepalive */
1150 				ic->ic_keepalive = B_FALSE;
1151 			}
1152 
1153 			/*
1154 			 * If there hasn't been any activity on this
1155 			 * connection for the failure timeout period then
1156 			 * drop the connection.  We expect the initiator
1157 			 * to keep the connection alive if it wants the
1158 			 * connection to stay open.
1159 			 *
1160 			 * If it turns out to be desireable to take a
1161 			 * more active role in maintaining the connect
1162 			 * we could add a client callback to send
1163 			 * a "keepalive" kind of message (no doubt a nop)
1164 			 * and fire that on a shorter timer.
1165 			 */
1166 			if (TICK_TO_SEC(idle_time) >
1167 			    IDM_TRANSPORT_FAIL_IDLE_TIMEOUT) {
1168 				mutex_exit(&ic->ic_state_mutex);
1169 				mutex_exit(&idm.idm_global_mutex);
1170 				IDM_SM_LOG(CE_WARN, "idm_wd_thread: "
1171 				    "conn %p idle for %d seconds, "
1172 				    "sending CE_TRANSPORT_FAIL",
1173 				    (void *)ic, (int)idle_time);
1174 				idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
1175 				mutex_enter(&idm.idm_global_mutex);
1176 				mutex_enter(&ic->ic_state_mutex);
1177 			}
1178 
1179 			idm_conn_rele(ic);
1180 
1181 			mutex_exit(&ic->ic_state_mutex);
1182 		}
1183 
1184 		(void) cv_reltimedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
1185 		    wake_time, TR_CLOCK_TICK);
1186 	}
1187 	mutex_exit(&idm.idm_global_mutex);
1188 
1189 	thread_exit();
1190 }
1191