xref: /titanic_44/usr/src/uts/common/io/idm/idm_impl.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/conf.h>
28 #include <sys/file.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/cpuvar.h>
32 #include <sys/sdt.h>
33 
34 #include <sys/socket.h>
35 #include <sys/strsubr.h>
36 #include <sys/socketvar.h>
37 #include <sys/sysmacros.h>
38 
39 #include <sys/idm/idm.h>
40 #include <sys/idm/idm_so.h>
41 #include <hd_crc.h>
42 
43 extern idm_transport_t  idm_transport_list[];
44 /*
45  * -1 - uninitialized
46  * 0  - applicable
47  * others - NA
48  */
49 static int iscsi_crc32_hd = -1;
50 
51 void
52 idm_pdu_rx(idm_conn_t *ic, idm_pdu_t *pdu)
53 {
54 	iscsi_async_evt_hdr_t *async_evt;
55 
56 	/*
57 	 * If we are in full-featured mode then route SCSI-related
58 	 * commands to the appropriate function vector
59 	 */
60 	ic->ic_timestamp = ddi_get_lbolt();
61 	mutex_enter(&ic->ic_state_mutex);
62 	if (ic->ic_ffp && ic->ic_pdu_events == 0) {
63 		mutex_exit(&ic->ic_state_mutex);
64 
65 		if (idm_pdu_rx_forward_ffp(ic, pdu) == B_TRUE) {
66 			/* Forwarded SCSI-related commands */
67 			return;
68 		}
69 		mutex_enter(&ic->ic_state_mutex);
70 	}
71 
72 	/*
73 	 * If we get here with a SCSI-related PDU then we are not in
74 	 * full-feature mode and the PDU is a protocol error (SCSI command
75 	 * PDU's may sometimes be an exception, see below).  All
76 	 * non-SCSI PDU's get treated them the same regardless of whether
77 	 * we are in full-feature mode.
78 	 *
79 	 * Look at the opcode and in some cases the PDU status and
80 	 * determine the appropriate event to send to the connection
81 	 * state machine.  Generate the event, passing the PDU as data.
82 	 * If the current connection state allows reception of the event
83 	 * the PDU will be submitted to the IDM client for processing,
84 	 * otherwise the PDU will be dropped.
85 	 */
86 	switch (IDM_PDU_OPCODE(pdu)) {
87 	case ISCSI_OP_LOGIN_CMD:
88 		DTRACE_ISCSI_2(login__command, idm_conn_t *, ic,
89 		    iscsi_login_hdr_t *, (iscsi_login_hdr_t *)pdu->isp_hdr);
90 		idm_conn_rx_pdu_event(ic, CE_LOGIN_RCV, (uintptr_t)pdu);
91 		break;
92 	case ISCSI_OP_LOGIN_RSP:
93 		idm_parse_login_rsp(ic, pdu, /* RX */ B_TRUE);
94 		break;
95 	case ISCSI_OP_LOGOUT_CMD:
96 		DTRACE_ISCSI_2(logout__command, idm_conn_t *, ic,
97 		    iscsi_logout_hdr_t *,
98 		    (iscsi_logout_hdr_t *)pdu->isp_hdr);
99 		idm_parse_logout_req(ic, pdu, /* RX */ B_TRUE);
100 		break;
101 	case ISCSI_OP_LOGOUT_RSP:
102 		idm_parse_logout_rsp(ic, pdu, /* RX */ B_TRUE);
103 		break;
104 	case ISCSI_OP_ASYNC_EVENT:
105 		async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
106 		switch (async_evt->async_event) {
107 		case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
108 			idm_conn_rx_pdu_event(ic, CE_ASYNC_LOGOUT_RCV,
109 			    (uintptr_t)pdu);
110 			break;
111 		case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
112 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_CONN_RCV,
113 			    (uintptr_t)pdu);
114 			break;
115 		case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
116 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_RCV,
117 			    (uintptr_t)pdu);
118 			break;
119 		case ISCSI_ASYNC_EVENT_SCSI_EVENT:
120 		case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
121 		default:
122 			idm_conn_rx_pdu_event(ic, CE_MISC_RX,
123 			    (uintptr_t)pdu);
124 			break;
125 		}
126 		break;
127 	case ISCSI_OP_SCSI_CMD:
128 		/*
129 		 * Consider this scenario:  We are a target connection
130 		 * in "in login" state and a "login success sent" event has
131 		 * been generated but not yet handled.  Since we've sent
132 		 * the login response but we haven't actually transitioned
133 		 * to FFP mode we might conceivably receive a SCSI command
134 		 * from the initiator before we are ready.  We are actually
135 		 * in FFP we just don't know it yet -- to address this we
136 		 * can generate an event corresponding to the SCSI command.
137 		 * At the point when the event is handled by the state
138 		 * machine the login request will have been handled and we
139 		 * should be in FFP.  If we are not in FFP by that time
140 		 * we can reject the SCSI command with a protocol error.
141 		 *
142 		 * This scenario only applies to the target.
143 		 *
144 		 * Handle dtrace probe in iscsit so we can find all the
145 		 * pieces of the CDB
146 		 */
147 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
148 		break;
149 	case ISCSI_OP_SCSI_DATA:
150 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
151 		    iscsi_data_hdr_t *,
152 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
153 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
154 		break;
155 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
156 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
157 		    iscsi_scsi_task_mgt_hdr_t *,
158 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
159 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
160 		break;
161 	case ISCSI_OP_NOOP_OUT:
162 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
163 		    iscsi_nop_out_hdr_t *,
164 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
165 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
166 		break;
167 	case ISCSI_OP_TEXT_CMD:
168 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
169 		    iscsi_text_hdr_t *,
170 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
171 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
172 		break;
173 	/* Initiator PDU's */
174 	case ISCSI_OP_SCSI_DATA_RSP:
175 	case ISCSI_OP_RTT_RSP:
176 	case ISCSI_OP_SNACK_CMD:
177 	case ISCSI_OP_NOOP_IN:
178 	case ISCSI_OP_TEXT_RSP:
179 	case ISCSI_OP_REJECT_MSG:
180 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
181 		/* Validate received PDU against current state */
182 		idm_conn_rx_pdu_event(ic, CE_MISC_RX,
183 		    (uintptr_t)pdu);
184 		break;
185 	}
186 	mutex_exit(&ic->ic_state_mutex);
187 }
188 
189 void
190 idm_pdu_tx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
191 {
192 	(*ic->ic_transport_ops->it_tx_pdu)(ic, pdu);
193 }
194 
195 boolean_t
196 idm_pdu_rx_forward_ffp(idm_conn_t *ic, idm_pdu_t *pdu)
197 {
198 	/*
199 	 * If this is an FFP request, call the appropriate handler
200 	 * and return B_TRUE, otherwise return B_FALSE.
201 	 */
202 	switch (IDM_PDU_OPCODE(pdu)) {
203 	case ISCSI_OP_SCSI_CMD:
204 		(*ic->ic_conn_ops.icb_rx_scsi_cmd)(ic, pdu);
205 		return (B_TRUE);
206 	case ISCSI_OP_SCSI_DATA:
207 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
208 		    iscsi_data_hdr_t *,
209 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
210 		(*ic->ic_transport_ops->it_rx_dataout)(ic, pdu);
211 		return (B_TRUE);
212 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
213 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
214 		    iscsi_scsi_task_mgt_hdr_t *,
215 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
216 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
217 		return (B_TRUE);
218 	case ISCSI_OP_NOOP_OUT:
219 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
220 		    iscsi_nop_out_hdr_t *,
221 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
222 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
223 		return (B_TRUE);
224 	case ISCSI_OP_TEXT_CMD:
225 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
226 		    iscsi_text_hdr_t *,
227 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
228 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
229 		return (B_TRUE);
230 		/* Initiator only */
231 	case ISCSI_OP_SCSI_RSP:
232 		(*ic->ic_conn_ops.icb_rx_scsi_rsp)(ic, pdu);
233 		return (B_TRUE);
234 	case ISCSI_OP_SCSI_DATA_RSP:
235 		(*ic->ic_transport_ops->it_rx_datain)(ic, pdu);
236 		return (B_TRUE);
237 	case ISCSI_OP_RTT_RSP:
238 		(*ic->ic_transport_ops->it_rx_rtt)(ic, pdu);
239 		return (B_TRUE);
240 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
241 	case ISCSI_OP_TEXT_RSP:
242 	case ISCSI_OP_NOOP_IN:
243 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
244 		return (B_TRUE);
245 	default:
246 		return (B_FALSE);
247 	}
248 	/*NOTREACHED*/
249 }
250 
251 void
252 idm_pdu_rx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
253 {
254 	/*
255 	 * Some PDU's specific to FFP get special handling.  This function
256 	 * will normally never be called in FFP with an FFP PDU since this
257 	 * is a slow path but in can happen on the target side during
258 	 * the transition to FFP.  We primarily call
259 	 * idm_pdu_rx_forward_ffp here to avoid code duplication.
260 	 */
261 	if (idm_pdu_rx_forward_ffp(ic, pdu) == B_FALSE) {
262 		/*
263 		 * Non-FFP PDU, use generic RC handler
264 		 */
265 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
266 	}
267 }
268 
269 void
270 idm_parse_login_rsp(idm_conn_t *ic, idm_pdu_t *login_rsp_pdu, boolean_t rx)
271 {
272 	iscsi_login_rsp_hdr_t	*login_rsp =
273 	    (iscsi_login_rsp_hdr_t *)login_rsp_pdu->isp_hdr;
274 	idm_conn_event_t	new_event;
275 
276 	if (login_rsp->status_class == ISCSI_STATUS_CLASS_SUCCESS) {
277 		if (!(login_rsp->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
278 		    (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
279 		    (ISCSI_LOGIN_NEXT_STAGE(login_rsp->flags) ==
280 		    ISCSI_FULL_FEATURE_PHASE)) {
281 			new_event = (rx ? CE_LOGIN_SUCCESS_RCV :
282 			    CE_LOGIN_SUCCESS_SND);
283 		} else {
284 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
285 		}
286 	} else {
287 		new_event = (rx ? CE_LOGIN_FAIL_RCV : CE_LOGIN_FAIL_SND);
288 	}
289 
290 	if (rx) {
291 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
292 	} else {
293 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
294 	}
295 }
296 
297 
298 void
299 idm_parse_logout_req(idm_conn_t *ic, idm_pdu_t *logout_req_pdu, boolean_t rx)
300 {
301 	iscsi_logout_hdr_t 	*logout_req =
302 	    (iscsi_logout_hdr_t *)logout_req_pdu->isp_hdr;
303 	idm_conn_event_t	new_event;
304 	uint8_t			reason =
305 	    (logout_req->flags & ISCSI_FLAG_LOGOUT_REASON_MASK);
306 
307 	/*
308 	 *	For a normal logout (close connection or close session) IDM
309 	 *	will terminate processing of all tasks completing the tasks
310 	 *	back to the client with a status indicating the connection
311 	 *	was logged out.  These tasks do not get completed.
312 	 *
313 	 *	For a "close connection for recovery logout) IDM suspends
314 	 *	processing of all tasks and completes them back to the client
315 	 *	with a status indicating connection was logged out for
316 	 *	recovery.  Both initiator and target hang onto these tasks.
317 	 *	When we add ERL2 support IDM will need to provide mechanisms
318 	 *	to change the task and buffer associations to a new connection.
319 	 *
320 	 *	This code doesn't address the possibility of MC/S.  We'll
321 	 *	need to decide how the separate connections get handled
322 	 *	in that case.  One simple option is to make the client
323 	 *	generate the events for the other connections.
324 	 */
325 	if (reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
326 		new_event =
327 		    (rx ? CE_LOGOUT_SESSION_RCV : CE_LOGOUT_SESSION_SND);
328 	} else if ((reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) ||
329 	    (reason == ISCSI_LOGOUT_REASON_RECOVERY)) {
330 		/* Check logout CID against this connection's CID */
331 		if (ntohs(logout_req->cid) == ic->ic_login_cid) {
332 			/* Logout is for this connection */
333 			new_event = (rx ? CE_LOGOUT_THIS_CONN_RCV :
334 			    CE_LOGOUT_THIS_CONN_SND);
335 		} else {
336 			/*
337 			 * Logout affects another connection.  This is not
338 			 * a relevant event for this connection so we'll
339 			 * just treat it as a normal PDU event.  Client
340 			 * will need to lookup the other connection and
341 			 * generate the event.
342 			 */
343 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
344 		}
345 	} else {
346 		/* Invalid reason code */
347 		new_event = (rx ? CE_RX_PROTOCOL_ERROR : CE_TX_PROTOCOL_ERROR);
348 	}
349 
350 	if (rx) {
351 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
352 	} else {
353 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
354 	}
355 }
356 
357 
358 
359 void
360 idm_parse_logout_rsp(idm_conn_t *ic, idm_pdu_t *logout_rsp_pdu, boolean_t rx)
361 {
362 	idm_conn_event_t	new_event;
363 	iscsi_logout_rsp_hdr_t *logout_rsp =
364 	    (iscsi_logout_rsp_hdr_t *)logout_rsp_pdu->isp_hdr;
365 
366 	if (logout_rsp->response == ISCSI_STATUS_CLASS_SUCCESS) {
367 		new_event = rx ? CE_LOGOUT_SUCCESS_RCV : CE_LOGOUT_SUCCESS_SND;
368 	} else {
369 		new_event = rx ? CE_LOGOUT_FAIL_RCV : CE_LOGOUT_FAIL_SND;
370 	}
371 
372 	if (rx) {
373 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
374 	} else {
375 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
376 	}
377 }
378 
379 /*
380  * idm_svc_conn_create()
381  * Transport-agnostic service connection creation, invoked from the transport
382  * layer.
383  */
384 idm_status_t
385 idm_svc_conn_create(idm_svc_t *is, idm_transport_type_t tt,
386     idm_conn_t **ic_result)
387 {
388 	idm_conn_t	*ic;
389 	idm_status_t	rc;
390 
391 	/*
392 	 * Skip some work if we can already tell we are going offline.
393 	 * Otherwise we will destroy this connection later as part of
394 	 * shutting down the svc.
395 	 */
396 	mutex_enter(&is->is_mutex);
397 	if (!is->is_online) {
398 		mutex_exit(&is->is_mutex);
399 		return (IDM_STATUS_FAIL);
400 	}
401 	mutex_exit(&is->is_mutex);
402 
403 	ic = idm_conn_create_common(CONN_TYPE_TGT, tt,
404 	    &is->is_svc_req.sr_conn_ops);
405 	ic->ic_svc_binding = is;
406 
407 	/*
408 	 * Prepare connection state machine
409 	 */
410 	if ((rc = idm_conn_sm_init(ic)) != 0) {
411 		idm_conn_destroy_common(ic);
412 		return (rc);
413 	}
414 
415 
416 	*ic_result = ic;
417 
418 	mutex_enter(&idm.idm_global_mutex);
419 	list_insert_tail(&idm.idm_tgt_conn_list, ic);
420 	idm.idm_tgt_conn_count++;
421 	mutex_exit(&idm.idm_global_mutex);
422 
423 	return (IDM_STATUS_SUCCESS);
424 }
425 
426 void
427 idm_svc_conn_destroy(idm_conn_t *ic)
428 {
429 	mutex_enter(&idm.idm_global_mutex);
430 	list_remove(&idm.idm_tgt_conn_list, ic);
431 	idm.idm_tgt_conn_count--;
432 	mutex_exit(&idm.idm_global_mutex);
433 
434 	if (ic->ic_transport_private != NULL) {
435 		ic->ic_transport_ops->it_tgt_conn_destroy(ic);
436 	}
437 	idm_conn_destroy_common(ic);
438 }
439 
440 /*
441  * idm_conn_create_common()
442  *
443  * Allocate and initialize IDM connection context
444  */
445 idm_conn_t *
446 idm_conn_create_common(idm_conn_type_t conn_type, idm_transport_type_t tt,
447     idm_conn_ops_t *conn_ops)
448 {
449 	idm_conn_t		*ic;
450 	idm_transport_t		*it;
451 	idm_transport_type_t	type;
452 
453 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
454 		it = &idm_transport_list[type];
455 
456 		if ((it->it_ops != NULL) && (it->it_type == tt))
457 			break;
458 	}
459 	ASSERT(it->it_type == tt);
460 	if (it->it_type != tt)
461 		return (NULL);
462 
463 	ic = kmem_zalloc(sizeof (idm_conn_t), KM_SLEEP);
464 
465 	/* Initialize data */
466 	ic->ic_target_name[0] = '\0';
467 	ic->ic_initiator_name[0] = '\0';
468 	ic->ic_isid[0] = '\0';
469 	ic->ic_tsih[0] = '\0';
470 	ic->ic_conn_type = conn_type;
471 	ic->ic_conn_ops = *conn_ops;
472 	ic->ic_transport_ops = it->it_ops;
473 	ic->ic_transport_type = tt;
474 	ic->ic_transport_private = NULL; /* Set by transport service */
475 	ic->ic_internal_cid = idm_cid_alloc();
476 	if (ic->ic_internal_cid == 0) {
477 		kmem_free(ic, sizeof (idm_conn_t));
478 		return (NULL);
479 	}
480 	mutex_init(&ic->ic_mutex, NULL, MUTEX_DEFAULT, NULL);
481 	cv_init(&ic->ic_cv, NULL, CV_DEFAULT, NULL);
482 	idm_refcnt_init(&ic->ic_refcnt, ic);
483 
484 	return (ic);
485 }
486 
487 void
488 idm_conn_destroy_common(idm_conn_t *ic)
489 {
490 	idm_conn_sm_fini(ic);
491 	idm_refcnt_destroy(&ic->ic_refcnt);
492 	cv_destroy(&ic->ic_cv);
493 	mutex_destroy(&ic->ic_mutex);
494 	idm_cid_free(ic->ic_internal_cid);
495 
496 	kmem_free(ic, sizeof (idm_conn_t));
497 }
498 
499 /*
500  * Invoked from the SM as a result of client's invocation of
501  * idm_ini_conn_connect()
502  */
503 idm_status_t
504 idm_ini_conn_finish(idm_conn_t *ic)
505 {
506 	/* invoke transport-specific connection */
507 	return (ic->ic_transport_ops->it_ini_conn_connect(ic));
508 }
509 
510 idm_status_t
511 idm_tgt_conn_finish(idm_conn_t *ic)
512 {
513 	idm_status_t rc;
514 
515 	rc = idm_notify_client(ic, CN_CONNECT_ACCEPT, NULL);
516 	if (rc != IDM_STATUS_SUCCESS) {
517 		return (IDM_STATUS_REJECT);
518 	}
519 
520 	/* Target client is ready to receive a login, start connection */
521 	return (ic->ic_transport_ops->it_tgt_conn_connect(ic));
522 }
523 
524 idm_transport_t *
525 idm_transport_lookup(idm_conn_req_t *cr)
526 {
527 	idm_transport_type_t	type;
528 	idm_transport_t		*it;
529 	idm_transport_caps_t	caps;
530 
531 	/*
532 	 * Make sure all available transports are setup.  We call this now
533 	 * instead of at initialization time in case IB has become available
534 	 * since we started (hotplug, etc).
535 	 */
536 	idm_transport_setup(cr->cr_li);
537 
538 	/* Determine the transport for this connection */
539 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
540 		it = &idm_transport_list[type];
541 
542 		if (it->it_ops == NULL) {
543 			/* transport is not registered */
544 			continue;
545 		}
546 
547 		if (it->it_ops->it_conn_is_capable(cr, &caps)) {
548 			return (it);
549 		}
550 	}
551 
552 	ASSERT(0);
553 	return (NULL); /* Make gcc happy */
554 }
555 
556 void
557 idm_transport_setup(ldi_ident_t li)
558 {
559 	idm_transport_type_t	type;
560 	idm_transport_t		*it;
561 	int			rc;
562 
563 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
564 		it = &idm_transport_list[type];
565 		/*
566 		 * We may want to store the LDI handle in the idm_svc_t
567 		 * and then allow multiple calls to ldi_open_by_name.  This
568 		 * would enable the LDI code to track who has the device open
569 		 * which could be useful in the case where we have multiple
570 		 * services and perhaps also have initiator and target opening
571 		 * the transport simultaneously.  For now we stick with the
572 		 * plan.
573 		 */
574 		if (it->it_ops == NULL) {
575 			/* transport is not ready, try to initialize it */
576 			if (it->it_type == IDM_TRANSPORT_TYPE_SOCKETS) {
577 				idm_so_init(it);
578 			} else {
579 				rc = ldi_open_by_name(it->it_device_path,
580 				    FREAD | FWRITE, kcred, &it->it_ldi_hdl, li);
581 				/*
582 				 * If the open is successful we will have
583 				 * filled in the LDI handle in the transport
584 				 * table and we expect that the transport
585 				 * registered itself.
586 				 */
587 				if (rc != 0) {
588 					it->it_ldi_hdl = NULL;
589 				}
590 			}
591 		}
592 	}
593 }
594 
595 void
596 idm_transport_teardown()
597 {
598 	idm_transport_type_t	type;
599 	idm_transport_t		*it;
600 
601 	ASSERT(mutex_owned(&idm.idm_global_mutex));
602 
603 	/* Caller holds the IDM global mutex */
604 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
605 		it = &idm_transport_list[type];
606 		/* If we have an open LDI handle on this driver, close it */
607 		if (it->it_ldi_hdl != NULL) {
608 			(void) ldi_close(it->it_ldi_hdl, FNDELAY, kcred);
609 			it->it_ldi_hdl = NULL;
610 		}
611 	}
612 }
613 
614 /*
615  * ID pool code.  We use this to generate unique structure identifiers without
616  * searching the existing structures.  This avoids the need to lock entire
617  * sets of structures at inopportune times.  Adapted from the CIFS server code.
618  *
619  *    A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap.
620  *    A bit set to '1' indicates that that particular value has been allocated.
621  *    The allocation process is done shifting a bit through the whole bitmap.
622  *    The current position of that index bit is kept in the idm_idpool_t
623  *    structure and represented by a byte index (0 to buffer size minus 1) and
624  *    a bit index (0 to 7).
625  *
626  *    The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs
627  *    out of IDs its current size is doubled until it reaches its maximum size
628  *    (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which
629  *    means that a pool can have a maximum number of 65534 IDs available.
630  */
631 
632 static int
633 idm_idpool_increment(
634     idm_idpool_t	*pool)
635 {
636 	uint8_t		*new_pool;
637 	uint32_t	new_size;
638 
639 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
640 
641 	new_size = pool->id_size * 2;
642 	if (new_size <= IDM_IDPOOL_MAX_SIZE) {
643 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
644 		if (new_pool) {
645 			bzero(new_pool, new_size / 8);
646 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
647 			kmem_free(pool->id_pool, pool->id_size / 8);
648 			pool->id_pool = new_pool;
649 			pool->id_free_counter += new_size - pool->id_size;
650 			pool->id_max_free_counter += new_size - pool->id_size;
651 			pool->id_size = new_size;
652 			pool->id_idx_msk = (new_size / 8) - 1;
653 			if (new_size >= IDM_IDPOOL_MAX_SIZE) {
654 				/* id -1 made unavailable */
655 				pool->id_pool[pool->id_idx_msk] = 0x80;
656 				pool->id_free_counter--;
657 				pool->id_max_free_counter--;
658 			}
659 			return (0);
660 		}
661 	}
662 	return (-1);
663 }
664 
665 /*
666  * idm_idpool_constructor
667  *
668  * This function initializes the pool structure provided.
669  */
670 
671 int
672 idm_idpool_create(idm_idpool_t *pool)
673 {
674 
675 	ASSERT(pool->id_magic != IDM_IDPOOL_MAGIC);
676 
677 	pool->id_size = IDM_IDPOOL_MIN_SIZE;
678 	pool->id_idx_msk = (IDM_IDPOOL_MIN_SIZE / 8) - 1;
679 	pool->id_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
680 	pool->id_max_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
681 	pool->id_bit = 0x02;
682 	pool->id_bit_idx = 1;
683 	pool->id_idx = 0;
684 	pool->id_pool = (uint8_t *)kmem_alloc((IDM_IDPOOL_MIN_SIZE / 8),
685 	    KM_SLEEP);
686 	bzero(pool->id_pool, (IDM_IDPOOL_MIN_SIZE / 8));
687 	/* -1 id made unavailable */
688 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
689 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
690 	pool->id_magic = IDM_IDPOOL_MAGIC;
691 	return (0);
692 }
693 
694 /*
695  * idm_idpool_destructor
696  *
697  * This function tears down and frees the resources associated with the
698  * pool provided.
699  */
700 
701 void
702 idm_idpool_destroy(idm_idpool_t *pool)
703 {
704 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
705 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
706 	pool->id_magic = (uint32_t)~IDM_IDPOOL_MAGIC;
707 	mutex_destroy(&pool->id_mutex);
708 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
709 }
710 
711 /*
712  * idm_idpool_alloc
713  *
714  * This function allocates an ID from the pool provided.
715  */
716 int
717 idm_idpool_alloc(idm_idpool_t *pool, uint16_t *id)
718 {
719 	uint32_t	i;
720 	uint8_t		bit;
721 	uint8_t		bit_idx;
722 	uint8_t		byte;
723 
724 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
725 
726 	mutex_enter(&pool->id_mutex);
727 	if ((pool->id_free_counter == 0) && idm_idpool_increment(pool)) {
728 		mutex_exit(&pool->id_mutex);
729 		return (-1);
730 	}
731 
732 	i = pool->id_size;
733 	while (i) {
734 		bit = pool->id_bit;
735 		bit_idx = pool->id_bit_idx;
736 		byte = pool->id_pool[pool->id_idx];
737 		while (bit) {
738 			if (byte & bit) {
739 				bit = bit << 1;
740 				bit_idx++;
741 				continue;
742 			}
743 			pool->id_pool[pool->id_idx] |= bit;
744 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
745 			pool->id_free_counter--;
746 			pool->id_bit = bit;
747 			pool->id_bit_idx = bit_idx;
748 			mutex_exit(&pool->id_mutex);
749 			return (0);
750 		}
751 		pool->id_bit = 1;
752 		pool->id_bit_idx = 0;
753 		pool->id_idx++;
754 		pool->id_idx &= pool->id_idx_msk;
755 		--i;
756 	}
757 	/*
758 	 * This section of code shouldn't be reached. If there are IDs
759 	 * available and none could be found there's a problem.
760 	 */
761 	ASSERT(0);
762 	mutex_exit(&pool->id_mutex);
763 	return (-1);
764 }
765 
766 /*
767  * idm_idpool_free
768  *
769  * This function frees the ID provided.
770  */
771 void
772 idm_idpool_free(idm_idpool_t *pool, uint16_t id)
773 {
774 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
775 	ASSERT(id != 0);
776 	ASSERT(id != 0xFFFF);
777 
778 	mutex_enter(&pool->id_mutex);
779 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
780 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
781 		pool->id_free_counter++;
782 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
783 		mutex_exit(&pool->id_mutex);
784 		return;
785 	}
786 	/* Freeing a free ID. */
787 	ASSERT(0);
788 	mutex_exit(&pool->id_mutex);
789 }
790 
791 uint32_t
792 idm_cid_alloc(void)
793 {
794 	/*
795 	 * ID pool works with 16-bit identifiers right now.  That should
796 	 * be plenty since we will probably never have more than 2^16
797 	 * connections simultaneously.
798 	 */
799 	uint16_t cid16;
800 
801 	if (idm_idpool_alloc(&idm.idm_conn_id_pool, &cid16) == -1) {
802 		return (0); /* Fail */
803 	}
804 
805 	return ((uint32_t)cid16);
806 }
807 
808 void
809 idm_cid_free(uint32_t cid)
810 {
811 	idm_idpool_free(&idm.idm_conn_id_pool, (uint16_t)cid);
812 }
813 
814 
815 /*
816  * Code for generating the header and data digests
817  *
818  * This is the CRC-32C table
819  * Generated with:
820  * width = 32 bits
821  * poly = 0x1EDC6F41
822  * reflect input bytes = true
823  * reflect output bytes = true
824  */
825 
826 uint32_t idm_crc32c_table[256] =
827 {
828 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
829 	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
830 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
831 	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
832 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
833 	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
834 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
835 	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
836 	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
837 	0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
838 	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
839 	0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
840 	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
841 	0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
842 	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
843 	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
844 	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
845 	0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
846 	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
847 	0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
848 	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
849 	0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
850 	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
851 	0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
852 	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
853 	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
854 	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
855 	0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
856 	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
857 	0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
858 	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
859 	0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
860 	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
861 	0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
862 	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
863 	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
864 	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
865 	0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
866 	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
867 	0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
868 	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
869 	0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
870 	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
871 	0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
872 	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
873 	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
874 	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
875 	0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
876 	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
877 	0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
878 	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
879 	0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
880 	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
881 	0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
882 	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
883 	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
884 	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
885 	0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
886 	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
887 	0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
888 	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
889 	0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
890 	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
891 	0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
892 };
893 
894 /*
895  * iscsi_crc32c - Steps through buffer one byte at at time, calculates
896  * reflected crc using table.
897  */
898 uint32_t
899 idm_crc32c(void *address, unsigned long length)
900 {
901 	uint8_t *buffer = address;
902 	uint32_t crc = 0xffffffff, result;
903 #ifdef _BIG_ENDIAN
904 	uint8_t byte0, byte1, byte2, byte3;
905 #endif
906 
907 	ASSERT(address != NULL);
908 
909 	if (iscsi_crc32_hd == -1) {
910 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
911 			iscsi_crc32_hd = 0;
912 		} else {
913 			iscsi_crc32_hd = 1;
914 		}
915 	}
916 	if (iscsi_crc32_hd == 0)
917 		return (HW_CRC32(buffer, length, crc));
918 
919 	while (length--) {
920 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
921 		    (crc >> 8);
922 	}
923 	result = crc ^ 0xffffffff;
924 
925 #ifdef	_BIG_ENDIAN
926 	byte0 = (uint8_t)(result & 0xFF);
927 	byte1 = (uint8_t)((result >> 8) & 0xFF);
928 	byte2 = (uint8_t)((result >> 16) & 0xFF);
929 	byte3 = (uint8_t)((result >> 24) & 0xFF);
930 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
931 #endif	/* _BIG_ENDIAN */
932 
933 	return (result);
934 }
935 
936 
937 /*
938  * idm_crc32c_continued - Continues stepping through buffer one
939  * byte at at time, calculates reflected crc using table.
940  */
941 uint32_t
942 idm_crc32c_continued(void *address, unsigned long length, uint32_t crc)
943 {
944 	uint8_t *buffer = address;
945 	uint32_t result;
946 #ifdef	_BIG_ENDIAN
947 	uint8_t byte0, byte1, byte2, byte3;
948 #endif
949 
950 	ASSERT(address != NULL);
951 
952 	if (iscsi_crc32_hd == -1) {
953 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
954 			iscsi_crc32_hd = 0;
955 		} else {
956 			iscsi_crc32_hd = 1;
957 		}
958 	}
959 	if (iscsi_crc32_hd == 0)
960 		return (HW_CRC32_CONT(buffer, length, crc));
961 
962 
963 #ifdef	_BIG_ENDIAN
964 	byte0 = (uint8_t)((crc >> 24) & 0xFF);
965 	byte1 = (uint8_t)((crc >> 16) & 0xFF);
966 	byte2 = (uint8_t)((crc >> 8) & 0xFF);
967 	byte3 = (uint8_t)(crc & 0xFF);
968 	crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0);
969 #endif
970 
971 	crc = crc ^ 0xffffffff;
972 	while (length--) {
973 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
974 		    (crc >> 8);
975 	}
976 	result = crc ^ 0xffffffff;
977 
978 #ifdef	_BIG_ENDIAN
979 	byte0 = (uint8_t)(result & 0xFF);
980 	byte1 = (uint8_t)((result >> 8) & 0xFF);
981 	byte2 = (uint8_t)((result >> 16) & 0xFF);
982 	byte3 = (uint8_t)((result >> 24) & 0xFF);
983 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
984 #endif
985 	return (result);
986 }
987 
988 /* ARGSUSED */
989 int
990 idm_task_constructor(void *hdl, void *arg, int flags)
991 {
992 	idm_task_t *idt = (idm_task_t *)hdl;
993 	uint32_t next_task;
994 
995 	mutex_init(&idt->idt_mutex, NULL, MUTEX_DEFAULT, NULL);
996 
997 	/* Find the next free task ID */
998 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
999 	next_task = idm.idm_taskid_next;
1000 	while (idm.idm_taskid_table[next_task]) {
1001 		next_task++;
1002 		if (next_task == idm.idm_taskid_max)
1003 			next_task = 0;
1004 		if (next_task == idm.idm_taskid_next) {
1005 			rw_exit(&idm.idm_taskid_table_lock);
1006 			return (-1);
1007 		}
1008 	}
1009 
1010 	idm.idm_taskid_table[next_task] = idt;
1011 	idm.idm_taskid_next = (next_task + 1) % idm.idm_taskid_max;
1012 	rw_exit(&idm.idm_taskid_table_lock);
1013 
1014 	idt->idt_tt = next_task;
1015 
1016 	list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
1017 	    offsetof(idm_buf_t, idb_buflink));
1018 	list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
1019 	    offsetof(idm_buf_t, idb_buflink));
1020 	idm_refcnt_init(&idt->idt_refcnt, idt);
1021 
1022 	/*
1023 	 * Set the transport header pointer explicitly.  This removes the
1024 	 * need for per-transport header allocation, which simplifies cache
1025 	 * init considerably.  If at a later date we have an additional IDM
1026 	 * transport that requires a different size, we'll revisit this.
1027 	 */
1028 	idt->idt_transport_hdr = (void *)(idt + 1); /* pointer arithmetic */
1029 	idt->idt_flags = 0;
1030 	return (0);
1031 }
1032 
1033 /* ARGSUSED */
1034 void
1035 idm_task_destructor(void *hdl, void *arg)
1036 {
1037 	idm_task_t *idt = (idm_task_t *)hdl;
1038 
1039 	/* Remove the task from the ID table */
1040 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1041 	idm.idm_taskid_table[idt->idt_tt] = NULL;
1042 	rw_exit(&idm.idm_taskid_table_lock);
1043 
1044 	/* free the inbuf and outbuf */
1045 	idm_refcnt_destroy(&idt->idt_refcnt);
1046 	list_destroy(&idt->idt_inbufv);
1047 	list_destroy(&idt->idt_outbufv);
1048 
1049 	/*
1050 	 * The final call to idm_task_rele may happen with the task
1051 	 * mutex held which may invoke this destructor immediately.
1052 	 * Stall here until the task mutex owner lets go.
1053 	 */
1054 	mutex_enter(&idt->idt_mutex);
1055 	mutex_destroy(&idt->idt_mutex);
1056 }
1057 
1058 /*
1059  * idm_listbuf_insert searches from the back of the list looking for the
1060  * insertion point.
1061  */
1062 void
1063 idm_listbuf_insert(list_t *lst, idm_buf_t *buf)
1064 {
1065 	idm_buf_t	*idb;
1066 
1067 	/* iterate through the list to find the insertion point */
1068 	for (idb = list_tail(lst); idb != NULL; idb = list_prev(lst, idb)) {
1069 
1070 		if (idb->idb_bufoffset < buf->idb_bufoffset) {
1071 
1072 			list_insert_after(lst, idb, buf);
1073 			return;
1074 		}
1075 	}
1076 
1077 	/* add the buf to the head of the list */
1078 	list_insert_head(lst, buf);
1079 
1080 }
1081 
1082 /*ARGSUSED*/
1083 void
1084 idm_wd_thread(void *arg)
1085 {
1086 	idm_conn_t	*ic;
1087 	clock_t		wake_time = SEC_TO_TICK(IDM_WD_INTERVAL);
1088 	clock_t		idle_time;
1089 
1090 	/* Record the thread id for thread_join() */
1091 	idm.idm_wd_thread_did = curthread->t_did;
1092 	mutex_enter(&idm.idm_global_mutex);
1093 	idm.idm_wd_thread_running = B_TRUE;
1094 	cv_signal(&idm.idm_wd_cv);
1095 
1096 	while (idm.idm_wd_thread_running) {
1097 		for (ic = list_head(&idm.idm_tgt_conn_list);
1098 		    ic != NULL;
1099 		    ic = list_next(&idm.idm_tgt_conn_list, ic)) {
1100 			idle_time = ddi_get_lbolt() - ic->ic_timestamp;
1101 
1102 			/*
1103 			 * If this connection is in FFP then grab a hold
1104 			 * and check the various timeout thresholds.  Otherwise
1105 			 * the connection is closing and we should just
1106 			 * move on to the next one.
1107 			 */
1108 			mutex_enter(&ic->ic_state_mutex);
1109 			if (ic->ic_ffp) {
1110 				idm_conn_hold(ic);
1111 			} else {
1112 				mutex_exit(&ic->ic_state_mutex);
1113 				continue;
1114 			}
1115 
1116 			/*
1117 			 * If there hasn't been any activity on this
1118 			 * connection for the keepalive timeout period
1119 			 * and if the client has provided a keepalive
1120 			 * callback then call the keepalive callback.
1121 			 * This allows the client to take action to keep
1122 			 * the link alive (like send a nop PDU).
1123 			 */
1124 			if ((TICK_TO_SEC(idle_time) >=
1125 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT) &&
1126 			    !ic->ic_keepalive) {
1127 				ic->ic_keepalive = B_TRUE;
1128 				if (ic->ic_conn_ops.icb_keepalive) {
1129 					mutex_exit(&ic->ic_state_mutex);
1130 					mutex_exit(&idm.idm_global_mutex);
1131 					(*ic->ic_conn_ops.icb_keepalive)(ic);
1132 					mutex_enter(&idm.idm_global_mutex);
1133 					mutex_enter(&ic->ic_state_mutex);
1134 				}
1135 			} else if ((TICK_TO_SEC(idle_time) <
1136 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT)) {
1137 				/* Reset keepalive */
1138 				ic->ic_keepalive = B_FALSE;
1139 			}
1140 
1141 			/*
1142 			 * If there hasn't been any activity on this
1143 			 * connection for the failure timeout period then
1144 			 * drop the connection.  We expect the initiator
1145 			 * to keep the connection alive if it wants the
1146 			 * connection to stay open.
1147 			 *
1148 			 * If it turns out to be desireable to take a
1149 			 * more active role in maintaining the connect
1150 			 * we could add a client callback to send
1151 			 * a "keepalive" kind of message (no doubt a nop)
1152 			 * and fire that on a shorter timer.
1153 			 */
1154 			if (TICK_TO_SEC(idle_time) >
1155 			    IDM_TRANSPORT_FAIL_IDLE_TIMEOUT) {
1156 				mutex_exit(&ic->ic_state_mutex);
1157 				mutex_exit(&idm.idm_global_mutex);
1158 				IDM_SM_LOG(CE_WARN, "idm_wd_thread: "
1159 				    "conn %p idle for %d seconds, "
1160 				    "sending CE_TRANSPORT_FAIL",
1161 				    (void *)ic, (int)idle_time);
1162 				idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
1163 				mutex_enter(&idm.idm_global_mutex);
1164 				mutex_enter(&ic->ic_state_mutex);
1165 			}
1166 
1167 			idm_conn_rele(ic);
1168 
1169 			mutex_exit(&ic->ic_state_mutex);
1170 		}
1171 
1172 		(void) cv_reltimedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
1173 		    wake_time, TR_CLOCK_TICK);
1174 	}
1175 	mutex_exit(&idm.idm_global_mutex);
1176 
1177 	thread_exit();
1178 }
1179