1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/conf.h> 28 #include <sys/file.h> 29 #include <sys/ddi.h> 30 #include <sys/sunddi.h> 31 #include <sys/cpuvar.h> 32 #include <sys/sdt.h> 33 34 #include <sys/socket.h> 35 #include <sys/strsubr.h> 36 #include <sys/socketvar.h> 37 #include <sys/sysmacros.h> 38 39 #include <sys/idm/idm.h> 40 #include <sys/idm/idm_so.h> 41 #include <hd_crc.h> 42 43 extern idm_transport_t idm_transport_list[]; 44 /* 45 * -1 - uninitialized 46 * 0 - applicable 47 * others - NA 48 */ 49 static int iscsi_crc32_hd = -1; 50 51 void 52 idm_pdu_rx(idm_conn_t *ic, idm_pdu_t *pdu) 53 { 54 iscsi_async_evt_hdr_t *async_evt; 55 56 /* 57 * If we are in full-featured mode then route SCSI-related 58 * commands to the appropriate function vector 59 */ 60 ic->ic_timestamp = ddi_get_lbolt(); 61 mutex_enter(&ic->ic_state_mutex); 62 if (ic->ic_ffp && ic->ic_pdu_events == 0) { 63 mutex_exit(&ic->ic_state_mutex); 64 65 if (idm_pdu_rx_forward_ffp(ic, pdu) == B_TRUE) { 66 /* Forwarded SCSI-related commands */ 67 return; 68 } 69 mutex_enter(&ic->ic_state_mutex); 70 } 71 72 /* 73 * If we get here with a SCSI-related PDU then we are not in 74 * full-feature mode and the PDU is a protocol error (SCSI command 75 * PDU's may sometimes be an exception, see below). All 76 * non-SCSI PDU's get treated them the same regardless of whether 77 * we are in full-feature mode. 78 * 79 * Look at the opcode and in some cases the PDU status and 80 * determine the appropriate event to send to the connection 81 * state machine. Generate the event, passing the PDU as data. 82 * If the current connection state allows reception of the event 83 * the PDU will be submitted to the IDM client for processing, 84 * otherwise the PDU will be dropped. 85 */ 86 switch (IDM_PDU_OPCODE(pdu)) { 87 case ISCSI_OP_LOGIN_CMD: 88 DTRACE_ISCSI_2(login__command, idm_conn_t *, ic, 89 iscsi_login_hdr_t *, (iscsi_login_hdr_t *)pdu->isp_hdr); 90 idm_conn_rx_pdu_event(ic, CE_LOGIN_RCV, (uintptr_t)pdu); 91 break; 92 case ISCSI_OP_LOGIN_RSP: 93 idm_parse_login_rsp(ic, pdu, /* RX */ B_TRUE); 94 break; 95 case ISCSI_OP_LOGOUT_CMD: 96 DTRACE_ISCSI_2(logout__command, idm_conn_t *, ic, 97 iscsi_logout_hdr_t *, 98 (iscsi_logout_hdr_t *)pdu->isp_hdr); 99 idm_parse_logout_req(ic, pdu, /* RX */ B_TRUE); 100 break; 101 case ISCSI_OP_LOGOUT_RSP: 102 idm_parse_logout_rsp(ic, pdu, /* RX */ B_TRUE); 103 break; 104 case ISCSI_OP_ASYNC_EVENT: 105 async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr; 106 switch (async_evt->async_event) { 107 case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT: 108 idm_conn_rx_pdu_event(ic, CE_ASYNC_LOGOUT_RCV, 109 (uintptr_t)pdu); 110 break; 111 case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION: 112 idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_CONN_RCV, 113 (uintptr_t)pdu); 114 break; 115 case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS: 116 idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_RCV, 117 (uintptr_t)pdu); 118 break; 119 case ISCSI_ASYNC_EVENT_SCSI_EVENT: 120 case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION: 121 default: 122 idm_conn_rx_pdu_event(ic, CE_MISC_RX, 123 (uintptr_t)pdu); 124 break; 125 } 126 break; 127 case ISCSI_OP_SCSI_CMD: 128 /* 129 * Consider this scenario: We are a target connection 130 * in "in login" state and a "login success sent" event has 131 * been generated but not yet handled. Since we've sent 132 * the login response but we haven't actually transitioned 133 * to FFP mode we might conceivably receive a SCSI command 134 * from the initiator before we are ready. We are actually 135 * in FFP we just don't know it yet -- to address this we 136 * can generate an event corresponding to the SCSI command. 137 * At the point when the event is handled by the state 138 * machine the login request will have been handled and we 139 * should be in FFP. If we are not in FFP by that time 140 * we can reject the SCSI command with a protocol error. 141 * 142 * This scenario only applies to the target. 143 * 144 * Handle dtrace probe in iscsit so we can find all the 145 * pieces of the CDB 146 */ 147 idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu); 148 break; 149 case ISCSI_OP_SCSI_DATA: 150 DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic, 151 iscsi_data_hdr_t *, 152 (iscsi_data_hdr_t *)pdu->isp_hdr); 153 idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu); 154 break; 155 case ISCSI_OP_SCSI_TASK_MGT_MSG: 156 DTRACE_ISCSI_2(task__command, idm_conn_t *, ic, 157 iscsi_scsi_task_mgt_hdr_t *, 158 (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr); 159 idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu); 160 break; 161 case ISCSI_OP_NOOP_OUT: 162 DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic, 163 iscsi_nop_out_hdr_t *, 164 (iscsi_nop_out_hdr_t *)pdu->isp_hdr); 165 idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu); 166 break; 167 case ISCSI_OP_TEXT_CMD: 168 DTRACE_ISCSI_2(text__command, idm_conn_t *, ic, 169 iscsi_text_hdr_t *, 170 (iscsi_text_hdr_t *)pdu->isp_hdr); 171 idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu); 172 break; 173 /* Initiator PDU's */ 174 case ISCSI_OP_SCSI_DATA_RSP: 175 case ISCSI_OP_RTT_RSP: 176 case ISCSI_OP_SNACK_CMD: 177 case ISCSI_OP_NOOP_IN: 178 case ISCSI_OP_TEXT_RSP: 179 case ISCSI_OP_REJECT_MSG: 180 case ISCSI_OP_SCSI_TASK_MGT_RSP: 181 /* Validate received PDU against current state */ 182 idm_conn_rx_pdu_event(ic, CE_MISC_RX, 183 (uintptr_t)pdu); 184 break; 185 } 186 mutex_exit(&ic->ic_state_mutex); 187 } 188 189 void 190 idm_pdu_tx_forward(idm_conn_t *ic, idm_pdu_t *pdu) 191 { 192 (*ic->ic_transport_ops->it_tx_pdu)(ic, pdu); 193 } 194 195 boolean_t 196 idm_pdu_rx_forward_ffp(idm_conn_t *ic, idm_pdu_t *pdu) 197 { 198 /* 199 * If this is an FFP request, call the appropriate handler 200 * and return B_TRUE, otherwise return B_FALSE. 201 */ 202 switch (IDM_PDU_OPCODE(pdu)) { 203 case ISCSI_OP_SCSI_CMD: 204 (*ic->ic_conn_ops.icb_rx_scsi_cmd)(ic, pdu); 205 return (B_TRUE); 206 case ISCSI_OP_SCSI_DATA: 207 DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic, 208 iscsi_data_hdr_t *, 209 (iscsi_data_hdr_t *)pdu->isp_hdr); 210 (*ic->ic_transport_ops->it_rx_dataout)(ic, pdu); 211 return (B_TRUE); 212 case ISCSI_OP_SCSI_TASK_MGT_MSG: 213 DTRACE_ISCSI_2(task__command, idm_conn_t *, ic, 214 iscsi_scsi_task_mgt_hdr_t *, 215 (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr); 216 (*ic->ic_conn_ops.icb_rx_misc)(ic, pdu); 217 return (B_TRUE); 218 case ISCSI_OP_NOOP_OUT: 219 DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic, 220 iscsi_nop_out_hdr_t *, 221 (iscsi_nop_out_hdr_t *)pdu->isp_hdr); 222 (*ic->ic_conn_ops.icb_rx_misc)(ic, pdu); 223 return (B_TRUE); 224 case ISCSI_OP_TEXT_CMD: 225 DTRACE_ISCSI_2(text__command, idm_conn_t *, ic, 226 iscsi_text_hdr_t *, 227 (iscsi_text_hdr_t *)pdu->isp_hdr); 228 (*ic->ic_conn_ops.icb_rx_misc)(ic, pdu); 229 return (B_TRUE); 230 /* Initiator only */ 231 case ISCSI_OP_SCSI_RSP: 232 (*ic->ic_conn_ops.icb_rx_scsi_rsp)(ic, pdu); 233 return (B_TRUE); 234 case ISCSI_OP_SCSI_DATA_RSP: 235 (*ic->ic_transport_ops->it_rx_datain)(ic, pdu); 236 return (B_TRUE); 237 case ISCSI_OP_RTT_RSP: 238 (*ic->ic_transport_ops->it_rx_rtt)(ic, pdu); 239 return (B_TRUE); 240 case ISCSI_OP_SCSI_TASK_MGT_RSP: 241 case ISCSI_OP_TEXT_RSP: 242 case ISCSI_OP_NOOP_IN: 243 (*ic->ic_conn_ops.icb_rx_misc)(ic, pdu); 244 return (B_TRUE); 245 default: 246 return (B_FALSE); 247 } 248 /*NOTREACHED*/ 249 } 250 251 void 252 idm_pdu_rx_forward(idm_conn_t *ic, idm_pdu_t *pdu) 253 { 254 /* 255 * Some PDU's specific to FFP get special handling. This function 256 * will normally never be called in FFP with an FFP PDU since this 257 * is a slow path but in can happen on the target side during 258 * the transition to FFP. We primarily call 259 * idm_pdu_rx_forward_ffp here to avoid code duplication. 260 */ 261 if (idm_pdu_rx_forward_ffp(ic, pdu) == B_FALSE) { 262 /* 263 * Non-FFP PDU, use generic RC handler 264 */ 265 (*ic->ic_conn_ops.icb_rx_misc)(ic, pdu); 266 } 267 } 268 269 void 270 idm_parse_login_rsp(idm_conn_t *ic, idm_pdu_t *login_rsp_pdu, boolean_t rx) 271 { 272 iscsi_login_rsp_hdr_t *login_rsp = 273 (iscsi_login_rsp_hdr_t *)login_rsp_pdu->isp_hdr; 274 idm_conn_event_t new_event; 275 276 if (login_rsp->status_class == ISCSI_STATUS_CLASS_SUCCESS) { 277 if (!(login_rsp->flags & ISCSI_FLAG_LOGIN_CONTINUE) && 278 (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) && 279 (ISCSI_LOGIN_NEXT_STAGE(login_rsp->flags) == 280 ISCSI_FULL_FEATURE_PHASE)) { 281 new_event = (rx ? CE_LOGIN_SUCCESS_RCV : 282 CE_LOGIN_SUCCESS_SND); 283 } else { 284 new_event = (rx ? CE_MISC_RX : CE_MISC_TX); 285 } 286 } else { 287 new_event = (rx ? CE_LOGIN_FAIL_RCV : CE_LOGIN_FAIL_SND); 288 } 289 290 if (rx) { 291 idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu); 292 } else { 293 idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu); 294 } 295 } 296 297 298 void 299 idm_parse_logout_req(idm_conn_t *ic, idm_pdu_t *logout_req_pdu, boolean_t rx) 300 { 301 iscsi_logout_hdr_t *logout_req = 302 (iscsi_logout_hdr_t *)logout_req_pdu->isp_hdr; 303 idm_conn_event_t new_event; 304 uint8_t reason = 305 (logout_req->flags & ISCSI_FLAG_LOGOUT_REASON_MASK); 306 307 /* 308 * For a normal logout (close connection or close session) IDM 309 * will terminate processing of all tasks completing the tasks 310 * back to the client with a status indicating the connection 311 * was logged out. These tasks do not get completed. 312 * 313 * For a "close connection for recovery logout) IDM suspends 314 * processing of all tasks and completes them back to the client 315 * with a status indicating connection was logged out for 316 * recovery. Both initiator and target hang onto these tasks. 317 * When we add ERL2 support IDM will need to provide mechanisms 318 * to change the task and buffer associations to a new connection. 319 * 320 * This code doesn't address the possibility of MC/S. We'll 321 * need to decide how the separate connections get handled 322 * in that case. One simple option is to make the client 323 * generate the events for the other connections. 324 */ 325 if (reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { 326 new_event = 327 (rx ? CE_LOGOUT_SESSION_RCV : CE_LOGOUT_SESSION_SND); 328 } else if ((reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) || 329 (reason == ISCSI_LOGOUT_REASON_RECOVERY)) { 330 /* Check logout CID against this connection's CID */ 331 if (ntohs(logout_req->cid) == ic->ic_login_cid) { 332 /* Logout is for this connection */ 333 new_event = (rx ? CE_LOGOUT_THIS_CONN_RCV : 334 CE_LOGOUT_THIS_CONN_SND); 335 } else { 336 /* 337 * Logout affects another connection. This is not 338 * a relevant event for this connection so we'll 339 * just treat it as a normal PDU event. Client 340 * will need to lookup the other connection and 341 * generate the event. 342 */ 343 new_event = (rx ? CE_MISC_RX : CE_MISC_TX); 344 } 345 } else { 346 /* Invalid reason code */ 347 new_event = (rx ? CE_RX_PROTOCOL_ERROR : CE_TX_PROTOCOL_ERROR); 348 } 349 350 if (rx) { 351 idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu); 352 } else { 353 idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu); 354 } 355 } 356 357 358 359 void 360 idm_parse_logout_rsp(idm_conn_t *ic, idm_pdu_t *logout_rsp_pdu, boolean_t rx) 361 { 362 idm_conn_event_t new_event; 363 iscsi_logout_rsp_hdr_t *logout_rsp = 364 (iscsi_logout_rsp_hdr_t *)logout_rsp_pdu->isp_hdr; 365 366 if (logout_rsp->response == ISCSI_STATUS_CLASS_SUCCESS) { 367 new_event = rx ? CE_LOGOUT_SUCCESS_RCV : CE_LOGOUT_SUCCESS_SND; 368 } else { 369 new_event = rx ? CE_LOGOUT_FAIL_RCV : CE_LOGOUT_FAIL_SND; 370 } 371 372 if (rx) { 373 idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu); 374 } else { 375 idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu); 376 } 377 } 378 379 /* 380 * idm_svc_conn_create() 381 * Transport-agnostic service connection creation, invoked from the transport 382 * layer. 383 */ 384 idm_status_t 385 idm_svc_conn_create(idm_svc_t *is, idm_transport_type_t tt, 386 idm_conn_t **ic_result) 387 { 388 idm_conn_t *ic; 389 idm_status_t rc; 390 391 /* 392 * Skip some work if we can already tell we are going offline. 393 * Otherwise we will destroy this connection later as part of 394 * shutting down the svc. 395 */ 396 mutex_enter(&is->is_mutex); 397 if (!is->is_online) { 398 mutex_exit(&is->is_mutex); 399 return (IDM_STATUS_FAIL); 400 } 401 mutex_exit(&is->is_mutex); 402 403 ic = idm_conn_create_common(CONN_TYPE_TGT, tt, 404 &is->is_svc_req.sr_conn_ops); 405 ic->ic_svc_binding = is; 406 407 /* 408 * Prepare connection state machine 409 */ 410 if ((rc = idm_conn_sm_init(ic)) != 0) { 411 idm_conn_destroy_common(ic); 412 return (rc); 413 } 414 415 416 *ic_result = ic; 417 418 mutex_enter(&idm.idm_global_mutex); 419 list_insert_tail(&idm.idm_tgt_conn_list, ic); 420 idm.idm_tgt_conn_count++; 421 mutex_exit(&idm.idm_global_mutex); 422 423 return (IDM_STATUS_SUCCESS); 424 } 425 426 void 427 idm_svc_conn_destroy(idm_conn_t *ic) 428 { 429 mutex_enter(&idm.idm_global_mutex); 430 list_remove(&idm.idm_tgt_conn_list, ic); 431 idm.idm_tgt_conn_count--; 432 mutex_exit(&idm.idm_global_mutex); 433 434 if (ic->ic_transport_private != NULL) { 435 ic->ic_transport_ops->it_tgt_conn_destroy(ic); 436 } 437 idm_conn_destroy_common(ic); 438 } 439 440 /* 441 * idm_conn_create_common() 442 * 443 * Allocate and initialize IDM connection context 444 */ 445 idm_conn_t * 446 idm_conn_create_common(idm_conn_type_t conn_type, idm_transport_type_t tt, 447 idm_conn_ops_t *conn_ops) 448 { 449 idm_conn_t *ic; 450 idm_transport_t *it; 451 idm_transport_type_t type; 452 453 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) { 454 it = &idm_transport_list[type]; 455 456 if ((it->it_ops != NULL) && (it->it_type == tt)) 457 break; 458 } 459 ASSERT(it->it_type == tt); 460 if (it->it_type != tt) 461 return (NULL); 462 463 ic = kmem_zalloc(sizeof (idm_conn_t), KM_SLEEP); 464 465 /* Initialize data */ 466 ic->ic_target_name[0] = '\0'; 467 ic->ic_initiator_name[0] = '\0'; 468 ic->ic_isid[0] = '\0'; 469 ic->ic_tsih[0] = '\0'; 470 ic->ic_conn_type = conn_type; 471 ic->ic_conn_ops = *conn_ops; 472 ic->ic_transport_ops = it->it_ops; 473 ic->ic_transport_type = tt; 474 ic->ic_transport_private = NULL; /* Set by transport service */ 475 ic->ic_internal_cid = idm_cid_alloc(); 476 if (ic->ic_internal_cid == 0) { 477 kmem_free(ic, sizeof (idm_conn_t)); 478 return (NULL); 479 } 480 mutex_init(&ic->ic_mutex, NULL, MUTEX_DEFAULT, NULL); 481 cv_init(&ic->ic_cv, NULL, CV_DEFAULT, NULL); 482 idm_refcnt_init(&ic->ic_refcnt, ic); 483 484 return (ic); 485 } 486 487 void 488 idm_conn_destroy_common(idm_conn_t *ic) 489 { 490 idm_conn_sm_fini(ic); 491 idm_refcnt_destroy(&ic->ic_refcnt); 492 cv_destroy(&ic->ic_cv); 493 mutex_destroy(&ic->ic_mutex); 494 idm_cid_free(ic->ic_internal_cid); 495 496 kmem_free(ic, sizeof (idm_conn_t)); 497 } 498 499 /* 500 * Invoked from the SM as a result of client's invocation of 501 * idm_ini_conn_connect() 502 */ 503 idm_status_t 504 idm_ini_conn_finish(idm_conn_t *ic) 505 { 506 /* invoke transport-specific connection */ 507 return (ic->ic_transport_ops->it_ini_conn_connect(ic)); 508 } 509 510 idm_status_t 511 idm_tgt_conn_finish(idm_conn_t *ic) 512 { 513 idm_status_t rc; 514 515 rc = idm_notify_client(ic, CN_CONNECT_ACCEPT, NULL); 516 if (rc != IDM_STATUS_SUCCESS) { 517 return (IDM_STATUS_REJECT); 518 } 519 520 /* Target client is ready to receive a login, start connection */ 521 return (ic->ic_transport_ops->it_tgt_conn_connect(ic)); 522 } 523 524 idm_transport_t * 525 idm_transport_lookup(idm_conn_req_t *cr) 526 { 527 idm_transport_type_t type; 528 idm_transport_t *it; 529 idm_transport_caps_t caps; 530 531 /* 532 * Make sure all available transports are setup. We call this now 533 * instead of at initialization time in case IB has become available 534 * since we started (hotplug, etc). 535 */ 536 idm_transport_setup(cr->cr_li, cr->cr_boot_conn); 537 538 /* Determine the transport for this connection */ 539 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) { 540 it = &idm_transport_list[type]; 541 542 if (it->it_ops == NULL) { 543 /* transport is not registered */ 544 continue; 545 } 546 547 if (it->it_ops->it_conn_is_capable(cr, &caps)) { 548 return (it); 549 } 550 } 551 552 ASSERT(0); 553 return (NULL); /* Make gcc happy */ 554 } 555 556 void 557 idm_transport_setup(ldi_ident_t li, boolean_t boot_conn) 558 { 559 idm_transport_type_t type; 560 idm_transport_t *it; 561 int rc; 562 563 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) { 564 it = &idm_transport_list[type]; 565 /* 566 * We may want to store the LDI handle in the idm_svc_t 567 * and then allow multiple calls to ldi_open_by_name. This 568 * would enable the LDI code to track who has the device open 569 * which could be useful in the case where we have multiple 570 * services and perhaps also have initiator and target opening 571 * the transport simultaneously. For now we stick with the 572 * plan. 573 */ 574 if (it->it_ops == NULL) { 575 /* transport is not ready, try to initialize it */ 576 if (it->it_type == IDM_TRANSPORT_TYPE_SOCKETS) { 577 idm_so_init(it); 578 } else { 579 if (boot_conn == B_TRUE) { 580 /* 581 * iSCSI boot doesn't need iSER. 582 * Open iSER here may drive IO to 583 * a failed session and cause 584 * deadlock 585 */ 586 continue; 587 } 588 rc = ldi_open_by_name(it->it_device_path, 589 FREAD | FWRITE, kcred, &it->it_ldi_hdl, li); 590 /* 591 * If the open is successful we will have 592 * filled in the LDI handle in the transport 593 * table and we expect that the transport 594 * registered itself. 595 */ 596 if (rc != 0) { 597 it->it_ldi_hdl = NULL; 598 } 599 } 600 } 601 } 602 } 603 604 void 605 idm_transport_teardown() 606 { 607 idm_transport_type_t type; 608 idm_transport_t *it; 609 610 ASSERT(mutex_owned(&idm.idm_global_mutex)); 611 612 /* Caller holds the IDM global mutex */ 613 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) { 614 it = &idm_transport_list[type]; 615 /* If we have an open LDI handle on this driver, close it */ 616 if (it->it_ldi_hdl != NULL) { 617 (void) ldi_close(it->it_ldi_hdl, FNDELAY, kcred); 618 it->it_ldi_hdl = NULL; 619 } 620 } 621 } 622 623 /* 624 * ID pool code. We use this to generate unique structure identifiers without 625 * searching the existing structures. This avoids the need to lock entire 626 * sets of structures at inopportune times. Adapted from the CIFS server code. 627 * 628 * A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap. 629 * A bit set to '1' indicates that that particular value has been allocated. 630 * The allocation process is done shifting a bit through the whole bitmap. 631 * The current position of that index bit is kept in the idm_idpool_t 632 * structure and represented by a byte index (0 to buffer size minus 1) and 633 * a bit index (0 to 7). 634 * 635 * The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs 636 * out of IDs its current size is doubled until it reaches its maximum size 637 * (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which 638 * means that a pool can have a maximum number of 65534 IDs available. 639 */ 640 641 static int 642 idm_idpool_increment( 643 idm_idpool_t *pool) 644 { 645 uint8_t *new_pool; 646 uint32_t new_size; 647 648 ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC); 649 650 new_size = pool->id_size * 2; 651 if (new_size <= IDM_IDPOOL_MAX_SIZE) { 652 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP); 653 if (new_pool) { 654 bzero(new_pool, new_size / 8); 655 bcopy(pool->id_pool, new_pool, pool->id_size / 8); 656 kmem_free(pool->id_pool, pool->id_size / 8); 657 pool->id_pool = new_pool; 658 pool->id_free_counter += new_size - pool->id_size; 659 pool->id_max_free_counter += new_size - pool->id_size; 660 pool->id_size = new_size; 661 pool->id_idx_msk = (new_size / 8) - 1; 662 if (new_size >= IDM_IDPOOL_MAX_SIZE) { 663 /* id -1 made unavailable */ 664 pool->id_pool[pool->id_idx_msk] = 0x80; 665 pool->id_free_counter--; 666 pool->id_max_free_counter--; 667 } 668 return (0); 669 } 670 } 671 return (-1); 672 } 673 674 /* 675 * idm_idpool_constructor 676 * 677 * This function initializes the pool structure provided. 678 */ 679 680 int 681 idm_idpool_create(idm_idpool_t *pool) 682 { 683 684 ASSERT(pool->id_magic != IDM_IDPOOL_MAGIC); 685 686 pool->id_size = IDM_IDPOOL_MIN_SIZE; 687 pool->id_idx_msk = (IDM_IDPOOL_MIN_SIZE / 8) - 1; 688 pool->id_free_counter = IDM_IDPOOL_MIN_SIZE - 1; 689 pool->id_max_free_counter = IDM_IDPOOL_MIN_SIZE - 1; 690 pool->id_bit = 0x02; 691 pool->id_bit_idx = 1; 692 pool->id_idx = 0; 693 pool->id_pool = (uint8_t *)kmem_alloc((IDM_IDPOOL_MIN_SIZE / 8), 694 KM_SLEEP); 695 bzero(pool->id_pool, (IDM_IDPOOL_MIN_SIZE / 8)); 696 /* -1 id made unavailable */ 697 pool->id_pool[0] = 0x01; /* id 0 made unavailable */ 698 mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL); 699 pool->id_magic = IDM_IDPOOL_MAGIC; 700 return (0); 701 } 702 703 /* 704 * idm_idpool_destructor 705 * 706 * This function tears down and frees the resources associated with the 707 * pool provided. 708 */ 709 710 void 711 idm_idpool_destroy(idm_idpool_t *pool) 712 { 713 ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC); 714 ASSERT(pool->id_free_counter == pool->id_max_free_counter); 715 pool->id_magic = (uint32_t)~IDM_IDPOOL_MAGIC; 716 mutex_destroy(&pool->id_mutex); 717 kmem_free(pool->id_pool, (size_t)(pool->id_size / 8)); 718 } 719 720 /* 721 * idm_idpool_alloc 722 * 723 * This function allocates an ID from the pool provided. 724 */ 725 int 726 idm_idpool_alloc(idm_idpool_t *pool, uint16_t *id) 727 { 728 uint32_t i; 729 uint8_t bit; 730 uint8_t bit_idx; 731 uint8_t byte; 732 733 ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC); 734 735 mutex_enter(&pool->id_mutex); 736 if ((pool->id_free_counter == 0) && idm_idpool_increment(pool)) { 737 mutex_exit(&pool->id_mutex); 738 return (-1); 739 } 740 741 i = pool->id_size; 742 while (i) { 743 bit = pool->id_bit; 744 bit_idx = pool->id_bit_idx; 745 byte = pool->id_pool[pool->id_idx]; 746 while (bit) { 747 if (byte & bit) { 748 bit = bit << 1; 749 bit_idx++; 750 continue; 751 } 752 pool->id_pool[pool->id_idx] |= bit; 753 *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx); 754 pool->id_free_counter--; 755 pool->id_bit = bit; 756 pool->id_bit_idx = bit_idx; 757 mutex_exit(&pool->id_mutex); 758 return (0); 759 } 760 pool->id_bit = 1; 761 pool->id_bit_idx = 0; 762 pool->id_idx++; 763 pool->id_idx &= pool->id_idx_msk; 764 --i; 765 } 766 /* 767 * This section of code shouldn't be reached. If there are IDs 768 * available and none could be found there's a problem. 769 */ 770 ASSERT(0); 771 mutex_exit(&pool->id_mutex); 772 return (-1); 773 } 774 775 /* 776 * idm_idpool_free 777 * 778 * This function frees the ID provided. 779 */ 780 void 781 idm_idpool_free(idm_idpool_t *pool, uint16_t id) 782 { 783 ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC); 784 ASSERT(id != 0); 785 ASSERT(id != 0xFFFF); 786 787 mutex_enter(&pool->id_mutex); 788 if (pool->id_pool[id >> 3] & (1 << (id & 7))) { 789 pool->id_pool[id >> 3] &= ~(1 << (id & 7)); 790 pool->id_free_counter++; 791 ASSERT(pool->id_free_counter <= pool->id_max_free_counter); 792 mutex_exit(&pool->id_mutex); 793 return; 794 } 795 /* Freeing a free ID. */ 796 ASSERT(0); 797 mutex_exit(&pool->id_mutex); 798 } 799 800 uint32_t 801 idm_cid_alloc(void) 802 { 803 /* 804 * ID pool works with 16-bit identifiers right now. That should 805 * be plenty since we will probably never have more than 2^16 806 * connections simultaneously. 807 */ 808 uint16_t cid16; 809 810 if (idm_idpool_alloc(&idm.idm_conn_id_pool, &cid16) == -1) { 811 return (0); /* Fail */ 812 } 813 814 return ((uint32_t)cid16); 815 } 816 817 void 818 idm_cid_free(uint32_t cid) 819 { 820 idm_idpool_free(&idm.idm_conn_id_pool, (uint16_t)cid); 821 } 822 823 824 /* 825 * Code for generating the header and data digests 826 * 827 * This is the CRC-32C table 828 * Generated with: 829 * width = 32 bits 830 * poly = 0x1EDC6F41 831 * reflect input bytes = true 832 * reflect output bytes = true 833 */ 834 835 uint32_t idm_crc32c_table[256] = 836 { 837 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 838 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, 839 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 840 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, 841 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 842 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, 843 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 844 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, 845 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 846 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, 847 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 848 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, 849 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 850 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, 851 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 852 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, 853 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 854 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, 855 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 856 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, 857 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 858 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, 859 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 860 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, 861 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 862 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, 863 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 864 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, 865 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 866 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, 867 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 868 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, 869 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 870 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, 871 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 872 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, 873 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 874 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, 875 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 876 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, 877 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 878 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, 879 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 880 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, 881 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 882 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, 883 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 884 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, 885 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 886 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, 887 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 888 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, 889 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 890 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, 891 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 892 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, 893 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 894 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, 895 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 896 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, 897 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 898 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, 899 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 900 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351 901 }; 902 903 /* 904 * iscsi_crc32c - Steps through buffer one byte at at time, calculates 905 * reflected crc using table. 906 */ 907 uint32_t 908 idm_crc32c(void *address, unsigned long length) 909 { 910 uint8_t *buffer = address; 911 uint32_t crc = 0xffffffff, result; 912 #ifdef _BIG_ENDIAN 913 uint8_t byte0, byte1, byte2, byte3; 914 #endif 915 916 ASSERT(address != NULL); 917 918 if (iscsi_crc32_hd == -1) { 919 if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) { 920 iscsi_crc32_hd = 0; 921 } else { 922 iscsi_crc32_hd = 1; 923 } 924 } 925 if (iscsi_crc32_hd == 0) 926 return (HW_CRC32(buffer, length, crc)); 927 928 while (length--) { 929 crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^ 930 (crc >> 8); 931 } 932 result = crc ^ 0xffffffff; 933 934 #ifdef _BIG_ENDIAN 935 byte0 = (uint8_t)(result & 0xFF); 936 byte1 = (uint8_t)((result >> 8) & 0xFF); 937 byte2 = (uint8_t)((result >> 16) & 0xFF); 938 byte3 = (uint8_t)((result >> 24) & 0xFF); 939 result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3); 940 #endif /* _BIG_ENDIAN */ 941 942 return (result); 943 } 944 945 946 /* 947 * idm_crc32c_continued - Continues stepping through buffer one 948 * byte at at time, calculates reflected crc using table. 949 */ 950 uint32_t 951 idm_crc32c_continued(void *address, unsigned long length, uint32_t crc) 952 { 953 uint8_t *buffer = address; 954 uint32_t result; 955 #ifdef _BIG_ENDIAN 956 uint8_t byte0, byte1, byte2, byte3; 957 #endif 958 959 ASSERT(address != NULL); 960 961 if (iscsi_crc32_hd == -1) { 962 if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) { 963 iscsi_crc32_hd = 0; 964 } else { 965 iscsi_crc32_hd = 1; 966 } 967 } 968 if (iscsi_crc32_hd == 0) 969 return (HW_CRC32_CONT(buffer, length, crc)); 970 971 972 #ifdef _BIG_ENDIAN 973 byte0 = (uint8_t)((crc >> 24) & 0xFF); 974 byte1 = (uint8_t)((crc >> 16) & 0xFF); 975 byte2 = (uint8_t)((crc >> 8) & 0xFF); 976 byte3 = (uint8_t)(crc & 0xFF); 977 crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0); 978 #endif 979 980 crc = crc ^ 0xffffffff; 981 while (length--) { 982 crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^ 983 (crc >> 8); 984 } 985 result = crc ^ 0xffffffff; 986 987 #ifdef _BIG_ENDIAN 988 byte0 = (uint8_t)(result & 0xFF); 989 byte1 = (uint8_t)((result >> 8) & 0xFF); 990 byte2 = (uint8_t)((result >> 16) & 0xFF); 991 byte3 = (uint8_t)((result >> 24) & 0xFF); 992 result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3); 993 #endif 994 return (result); 995 } 996 997 /* ARGSUSED */ 998 int 999 idm_task_constructor(void *hdl, void *arg, int flags) 1000 { 1001 idm_task_t *idt = (idm_task_t *)hdl; 1002 uint32_t next_task; 1003 1004 mutex_init(&idt->idt_mutex, NULL, MUTEX_DEFAULT, NULL); 1005 1006 /* Find the next free task ID */ 1007 rw_enter(&idm.idm_taskid_table_lock, RW_WRITER); 1008 next_task = idm.idm_taskid_next; 1009 while (idm.idm_taskid_table[next_task]) { 1010 next_task++; 1011 if (next_task == idm.idm_taskid_max) 1012 next_task = 0; 1013 if (next_task == idm.idm_taskid_next) { 1014 rw_exit(&idm.idm_taskid_table_lock); 1015 return (-1); 1016 } 1017 } 1018 1019 idm.idm_taskid_table[next_task] = idt; 1020 idm.idm_taskid_next = (next_task + 1) % idm.idm_taskid_max; 1021 rw_exit(&idm.idm_taskid_table_lock); 1022 1023 idt->idt_tt = next_task; 1024 1025 list_create(&idt->idt_inbufv, sizeof (idm_buf_t), 1026 offsetof(idm_buf_t, idb_buflink)); 1027 list_create(&idt->idt_outbufv, sizeof (idm_buf_t), 1028 offsetof(idm_buf_t, idb_buflink)); 1029 idm_refcnt_init(&idt->idt_refcnt, idt); 1030 1031 /* 1032 * Set the transport header pointer explicitly. This removes the 1033 * need for per-transport header allocation, which simplifies cache 1034 * init considerably. If at a later date we have an additional IDM 1035 * transport that requires a different size, we'll revisit this. 1036 */ 1037 idt->idt_transport_hdr = (void *)(idt + 1); /* pointer arithmetic */ 1038 idt->idt_flags = 0; 1039 return (0); 1040 } 1041 1042 /* ARGSUSED */ 1043 void 1044 idm_task_destructor(void *hdl, void *arg) 1045 { 1046 idm_task_t *idt = (idm_task_t *)hdl; 1047 1048 /* Remove the task from the ID table */ 1049 rw_enter(&idm.idm_taskid_table_lock, RW_WRITER); 1050 idm.idm_taskid_table[idt->idt_tt] = NULL; 1051 rw_exit(&idm.idm_taskid_table_lock); 1052 1053 /* free the inbuf and outbuf */ 1054 idm_refcnt_destroy(&idt->idt_refcnt); 1055 list_destroy(&idt->idt_inbufv); 1056 list_destroy(&idt->idt_outbufv); 1057 1058 /* 1059 * The final call to idm_task_rele may happen with the task 1060 * mutex held which may invoke this destructor immediately. 1061 * Stall here until the task mutex owner lets go. 1062 */ 1063 mutex_enter(&idt->idt_mutex); 1064 mutex_destroy(&idt->idt_mutex); 1065 } 1066 1067 /* 1068 * idm_listbuf_insert searches from the back of the list looking for the 1069 * insertion point. 1070 */ 1071 void 1072 idm_listbuf_insert(list_t *lst, idm_buf_t *buf) 1073 { 1074 idm_buf_t *idb; 1075 1076 /* iterate through the list to find the insertion point */ 1077 for (idb = list_tail(lst); idb != NULL; idb = list_prev(lst, idb)) { 1078 1079 if (idb->idb_bufoffset < buf->idb_bufoffset) { 1080 1081 list_insert_after(lst, idb, buf); 1082 return; 1083 } 1084 } 1085 1086 /* add the buf to the head of the list */ 1087 list_insert_head(lst, buf); 1088 1089 } 1090 1091 /*ARGSUSED*/ 1092 void 1093 idm_wd_thread(void *arg) 1094 { 1095 idm_conn_t *ic; 1096 clock_t wake_time = SEC_TO_TICK(IDM_WD_INTERVAL); 1097 clock_t idle_time; 1098 1099 /* Record the thread id for thread_join() */ 1100 idm.idm_wd_thread_did = curthread->t_did; 1101 mutex_enter(&idm.idm_global_mutex); 1102 idm.idm_wd_thread_running = B_TRUE; 1103 cv_signal(&idm.idm_wd_cv); 1104 1105 while (idm.idm_wd_thread_running) { 1106 for (ic = list_head(&idm.idm_tgt_conn_list); 1107 ic != NULL; 1108 ic = list_next(&idm.idm_tgt_conn_list, ic)) { 1109 idle_time = ddi_get_lbolt() - ic->ic_timestamp; 1110 1111 /* 1112 * If this connection is in FFP then grab a hold 1113 * and check the various timeout thresholds. Otherwise 1114 * the connection is closing and we should just 1115 * move on to the next one. 1116 */ 1117 mutex_enter(&ic->ic_state_mutex); 1118 if (ic->ic_ffp) { 1119 idm_conn_hold(ic); 1120 } else { 1121 mutex_exit(&ic->ic_state_mutex); 1122 continue; 1123 } 1124 1125 /* 1126 * If there hasn't been any activity on this 1127 * connection for the keepalive timeout period 1128 * and if the client has provided a keepalive 1129 * callback then call the keepalive callback. 1130 * This allows the client to take action to keep 1131 * the link alive (like send a nop PDU). 1132 */ 1133 if ((TICK_TO_SEC(idle_time) >= 1134 IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT) && 1135 !ic->ic_keepalive) { 1136 ic->ic_keepalive = B_TRUE; 1137 if (ic->ic_conn_ops.icb_keepalive) { 1138 mutex_exit(&ic->ic_state_mutex); 1139 mutex_exit(&idm.idm_global_mutex); 1140 (*ic->ic_conn_ops.icb_keepalive)(ic); 1141 mutex_enter(&idm.idm_global_mutex); 1142 mutex_enter(&ic->ic_state_mutex); 1143 } 1144 } else if ((TICK_TO_SEC(idle_time) < 1145 IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT)) { 1146 /* Reset keepalive */ 1147 ic->ic_keepalive = B_FALSE; 1148 } 1149 1150 /* 1151 * If there hasn't been any activity on this 1152 * connection for the failure timeout period then 1153 * drop the connection. We expect the initiator 1154 * to keep the connection alive if it wants the 1155 * connection to stay open. 1156 * 1157 * If it turns out to be desireable to take a 1158 * more active role in maintaining the connect 1159 * we could add a client callback to send 1160 * a "keepalive" kind of message (no doubt a nop) 1161 * and fire that on a shorter timer. 1162 */ 1163 if (TICK_TO_SEC(idle_time) > 1164 IDM_TRANSPORT_FAIL_IDLE_TIMEOUT) { 1165 mutex_exit(&ic->ic_state_mutex); 1166 mutex_exit(&idm.idm_global_mutex); 1167 IDM_SM_LOG(CE_WARN, "idm_wd_thread: " 1168 "conn %p idle for %d seconds, " 1169 "sending CE_TRANSPORT_FAIL", 1170 (void *)ic, (int)idle_time); 1171 idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL); 1172 mutex_enter(&idm.idm_global_mutex); 1173 mutex_enter(&ic->ic_state_mutex); 1174 } 1175 1176 idm_conn_rele(ic); 1177 1178 mutex_exit(&ic->ic_state_mutex); 1179 } 1180 1181 (void) cv_reltimedwait(&idm.idm_wd_cv, &idm.idm_global_mutex, 1182 wake_time, TR_CLOCK_TICK); 1183 } 1184 mutex_exit(&idm.idm_global_mutex); 1185 1186 thread_exit(); 1187 } 1188