1 /*- 2 * Copyright (c) 2017 Broadcom. All rights reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the copyright holder nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 /** 35 * @file 36 * Implement remote device state machine for target and initiator. 37 */ 38 39 /*! 40 @defgroup device_sm Node State Machine: Remote Device States 41 */ 42 43 #include "ocs.h" 44 #include "ocs_device.h" 45 #include "ocs_fabric.h" 46 #include "ocs_els.h" 47 48 static void *__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg); 49 static void *__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg); 50 static void *__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg); 51 static int32_t ocs_process_abts(ocs_io_t *io, fc_header_t *hdr); 52 53 /** 54 * @ingroup device_sm 55 * @brief Send response to PRLI. 56 * 57 * <h3 class="desc">Description</h3> 58 * For device nodes, this function sends a PRLI response. 59 * 60 * @param io Pointer to a SCSI IO object. 61 * @param ox_id OX_ID of PRLI 62 * 63 * @return Returns None. 64 */ 65 66 void 67 ocs_d_send_prli_rsp(ocs_io_t *io, uint16_t ox_id) 68 { 69 ocs_t *ocs = io->ocs; 70 ocs_node_t *node = io->node; 71 72 /* If the back-end doesn't support the fc-type, we send an LS_RJT */ 73 if (ocs->fc_type != node->fc_type) { 74 node_printf(node, "PRLI rejected by target-server, fc-type not supported\n"); 75 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM, 76 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL); 77 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 78 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 79 } 80 81 /* If the back-end doesn't want to talk to this initiator, we send an LS_RJT */ 82 if (node->sport->enable_tgt && (ocs_scsi_validate_initiator(node) == 0)) { 83 node_printf(node, "PRLI rejected by target-server\n"); 84 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM, 85 FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL); 86 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 87 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 88 } else { 89 /*sm: process PRLI payload, send PRLI acc */ 90 ocs_send_prli_acc(io, ox_id, ocs->fc_type, NULL, NULL); 91 92 /* Immediately go to ready state to avoid window where we're 93 * waiting for the PRLI LS_ACC to complete while holding FCP_CMNDs 94 */ 95 ocs_node_transition(node, __ocs_d_device_ready, NULL); 96 } 97 } 98 99 /** 100 * @ingroup device_sm 101 * @brief Device node state machine: Initiate node shutdown 102 * 103 * @param ctx Remote node state machine context. 104 * @param evt Event to process. 105 * @param arg Per event optional argument. 106 * 107 * @return Returns NULL. 108 */ 109 110 void * 111 __ocs_d_initiate_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 112 { 113 std_node_state_decl(); 114 115 node_sm_trace(); 116 117 switch(evt) { 118 case OCS_EVT_ENTER: { 119 int32_t rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */ 120 121 ocs_scsi_io_alloc_disable(node); 122 123 /* make necessary delete upcall(s) */ 124 if (node->init && !node->targ) { 125 ocs_log_debug(node->ocs, 126 "[%s] delete (initiator) WWPN %s WWNN %s\n", 127 node->display_name, node->wwpn, node->wwnn); 128 ocs_node_transition(node, __ocs_d_wait_del_node, NULL); 129 if (node->sport->enable_tgt) { 130 rc = ocs_scsi_del_initiator(node, 131 OCS_SCSI_INITIATOR_DELETED); 132 } 133 if (rc == OCS_SCSI_CALL_COMPLETE) { 134 ocs_node_post_event(node, 135 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL); 136 } 137 } else if (node->targ && !node->init) { 138 ocs_log_debug(node->ocs, 139 "[%s] delete (target) WWPN %s WWNN %s\n", 140 node->display_name, node->wwpn, node->wwnn); 141 ocs_node_transition(node, __ocs_d_wait_del_node, NULL); 142 if (node->sport->enable_ini) { 143 rc = ocs_scsi_del_target(node, 144 OCS_SCSI_TARGET_DELETED); 145 } 146 if (rc == OCS_SCSI_CALL_COMPLETE) { 147 ocs_node_post_event(node, 148 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL); 149 } 150 } else if (node->init && node->targ) { 151 ocs_log_debug(node->ocs, 152 "[%s] delete (initiator+target) WWPN %s WWNN %s\n", 153 node->display_name, node->wwpn, node->wwnn); 154 ocs_node_transition(node, __ocs_d_wait_del_ini_tgt, NULL); 155 if (node->sport->enable_tgt) { 156 rc = ocs_scsi_del_initiator(node, 157 OCS_SCSI_INITIATOR_DELETED); 158 } 159 if (rc == OCS_SCSI_CALL_COMPLETE) { 160 ocs_node_post_event(node, 161 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL); 162 } 163 rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */ 164 if (node->sport->enable_ini) { 165 rc = ocs_scsi_del_target(node, 166 OCS_SCSI_TARGET_DELETED); 167 } 168 if (rc == OCS_SCSI_CALL_COMPLETE) { 169 ocs_node_post_event(node, 170 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL); 171 } 172 } 173 174 /* we've initiated the upcalls as needed, now kick off the node 175 * detach to precipitate the aborting of outstanding exchanges 176 * associated with said node 177 * 178 * Beware: if we've made upcall(s), we've already transitioned 179 * to a new state by the time we execute this. 180 * TODO: consider doing this before the upcalls... 181 */ 182 if (node->attached) { 183 /* issue hw node free; don't care if succeeds right away 184 * or sometime later, will check node->attached later in 185 * shutdown process 186 */ 187 rc = ocs_hw_node_detach(&ocs->hw, &node->rnode); 188 if (node->rnode.free_group) { 189 ocs_remote_node_group_free(node->node_group); 190 node->node_group = NULL; 191 node->rnode.free_group = FALSE; 192 } 193 if (rc != OCS_HW_RTN_SUCCESS && 194 rc != OCS_HW_RTN_SUCCESS_SYNC) { 195 node_printf(node, 196 "Failed freeing HW node, rc=%d\n", rc); 197 } 198 } 199 200 /* if neither initiator nor target, proceed to cleanup */ 201 if (!node->init && !node->targ){ 202 /* 203 * node has either been detached or is in the process 204 * of being detached, call common node's initiate 205 * cleanup function. 206 */ 207 ocs_node_initiate_cleanup(node); 208 } 209 break; 210 } 211 case OCS_EVT_ALL_CHILD_NODES_FREE: 212 /* Ignore, this can happen if an ELS is aborted, 213 * while in a delay/retry state */ 214 break; 215 default: 216 __ocs_d_common(__func__, ctx, evt, arg); 217 return NULL; 218 } 219 return NULL; 220 } 221 222 /** 223 * @ingroup device_sm 224 * @brief Device node state machine: Common device event handler. 225 * 226 * <h3 class="desc">Description</h3> 227 * For device nodes, this event handler manages default and common events. 228 * 229 * @param funcname Function name text. 230 * @param ctx Remote node state machine context. 231 * @param evt Event to process. 232 * @param arg Per event optional argument. 233 * 234 * @return Returns NULL. 235 */ 236 237 static void * 238 __ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 239 { 240 ocs_node_t *node = NULL; 241 ocs_t *ocs = NULL; 242 ocs_assert(ctx, NULL); 243 node = ctx->app; 244 ocs_assert(node, NULL); 245 ocs = node->ocs; 246 ocs_assert(ocs, NULL); 247 248 switch(evt) { 249 250 /* Handle shutdown events */ 251 case OCS_EVT_SHUTDOWN: 252 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt)); 253 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 254 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 255 break; 256 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: 257 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt)); 258 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO; 259 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 260 break; 261 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: 262 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt)); 263 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO; 264 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 265 break; 266 267 default: 268 /* call default event handler common to all nodes */ 269 __ocs_node_common(funcname, ctx, evt, arg); 270 break; 271 } 272 return NULL; 273 } 274 275 /** 276 * @ingroup device_sm 277 * @brief Device node state machine: Wait for a domain-attach completion in loop topology. 278 * 279 * <h3 class="desc">Description</h3> 280 * State waits for a domain-attached completion while in loop topology. 281 * 282 * @param ctx Remote node state machine context. 283 * @param evt Event to process. 284 * @param arg Per event optional argument. 285 * 286 * @return Returns NULL. 287 */ 288 289 void * 290 __ocs_d_wait_loop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 291 { 292 std_node_state_decl(); 293 294 node_sm_trace(); 295 296 switch(evt) { 297 case OCS_EVT_ENTER: 298 ocs_node_hold_frames(node); 299 break; 300 301 case OCS_EVT_EXIT: 302 ocs_node_accept_frames(node); 303 break; 304 305 case OCS_EVT_DOMAIN_ATTACH_OK: { 306 /* send PLOGI automatically if initiator */ 307 ocs_node_init_device(node, TRUE); 308 break; 309 } 310 default: 311 __ocs_d_common(__func__, ctx, evt, arg); 312 return NULL; 313 } 314 315 return NULL; 316 } 317 318 319 320 321 /** 322 * @ingroup device_sm 323 * @brief state: wait for node resume event 324 * 325 * State is entered when a node is in I+T mode and sends a delete initiator/target 326 * call to the target-server/initiator-client and needs to wait for that work to complete. 327 * 328 * @param ctx Remote node state machine context. 329 * @param evt Event to process. 330 * @param arg per event optional argument 331 * 332 * @return returns NULL 333 */ 334 335 void * 336 __ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 337 { 338 std_node_state_decl(); 339 340 node_sm_trace(); 341 342 switch(evt) { 343 case OCS_EVT_ENTER: 344 ocs_node_hold_frames(node); 345 /* Fall through */ 346 347 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: 348 case OCS_EVT_ALL_CHILD_NODES_FREE: 349 /* These are expected events. */ 350 break; 351 352 case OCS_EVT_NODE_DEL_INI_COMPLETE: 353 case OCS_EVT_NODE_DEL_TGT_COMPLETE: 354 ocs_node_transition(node, __ocs_d_wait_del_node, NULL); 355 break; 356 357 case OCS_EVT_EXIT: 358 ocs_node_accept_frames(node); 359 break; 360 361 case OCS_EVT_SRRS_ELS_REQ_FAIL: 362 /* Can happen as ELS IO IO's complete */ 363 ocs_assert(node->els_req_cnt, NULL); 364 node->els_req_cnt--; 365 break; 366 367 /* ignore shutdown events as we're already in shutdown path */ 368 case OCS_EVT_SHUTDOWN: 369 /* have default shutdown event take precedence */ 370 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 371 /* fall through */ 372 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: 373 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: 374 node_printf(node, "%s received\n", ocs_sm_event_name(evt)); 375 break; 376 case OCS_EVT_DOMAIN_ATTACH_OK: 377 /* don't care about domain_attach_ok */ 378 break; 379 default: 380 __ocs_d_common(__func__, ctx, evt, arg); 381 return NULL; 382 } 383 384 return NULL; 385 } 386 387 388 /** 389 * @ingroup device_sm 390 * @brief state: Wait for node resume event. 391 * 392 * State is entered when a node sends a delete initiator/target call to the 393 * target-server/initiator-client and needs to wait for that work to complete. 394 * 395 * @param ctx Remote node state machine context. 396 * @param evt Event to process. 397 * @param arg Per event optional argument. 398 * 399 * @return Returns NULL. 400 */ 401 402 void * 403 __ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 404 { 405 std_node_state_decl(); 406 407 node_sm_trace(); 408 409 switch(evt) { 410 case OCS_EVT_ENTER: 411 ocs_node_hold_frames(node); 412 /* Fall through */ 413 414 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: 415 case OCS_EVT_ALL_CHILD_NODES_FREE: 416 /* These are expected events. */ 417 break; 418 419 case OCS_EVT_NODE_DEL_INI_COMPLETE: 420 case OCS_EVT_NODE_DEL_TGT_COMPLETE: 421 /* 422 * node has either been detached or is in the process of being detached, 423 * call common node's initiate cleanup function 424 */ 425 ocs_node_initiate_cleanup(node); 426 break; 427 428 case OCS_EVT_EXIT: 429 ocs_node_accept_frames(node); 430 break; 431 432 case OCS_EVT_SRRS_ELS_REQ_FAIL: 433 /* Can happen as ELS IO IO's complete */ 434 ocs_assert(node->els_req_cnt, NULL); 435 node->els_req_cnt--; 436 break; 437 438 /* ignore shutdown events as we're already in shutdown path */ 439 case OCS_EVT_SHUTDOWN: 440 /* have default shutdown event take precedence */ 441 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 442 /* fall through */ 443 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: 444 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: 445 node_printf(node, "%s received\n", ocs_sm_event_name(evt)); 446 break; 447 case OCS_EVT_DOMAIN_ATTACH_OK: 448 /* don't care about domain_attach_ok */ 449 break; 450 default: 451 __ocs_d_common(__func__, ctx, evt, arg); 452 return NULL; 453 } 454 455 return NULL; 456 } 457 458 459 460 /** 461 * @brief Save the OX_ID for sending LS_ACC sometime later. 462 * 463 * <h3 class="desc">Description</h3> 464 * When deferring the response to an ELS request, the OX_ID of the request 465 * is saved using this function. 466 * 467 * @param io Pointer to a SCSI IO object. 468 * @param hdr Pointer to the FC header. 469 * @param ls Defines the type of ELS to send: LS_ACC, LS_ACC for PLOGI; 470 * or LSS_ACC for PRLI. 471 * 472 * @return None. 473 */ 474 475 void 476 ocs_send_ls_acc_after_attach(ocs_io_t *io, fc_header_t *hdr, ocs_node_send_ls_acc_e ls) 477 { 478 ocs_node_t *node = io->node; 479 uint16_t ox_id = ocs_be16toh(hdr->ox_id); 480 481 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_NONE); 482 483 node->ls_acc_oxid = ox_id; 484 node->send_ls_acc = ls; 485 node->ls_acc_io = io; 486 node->ls_acc_did = fc_be24toh(hdr->d_id); 487 } 488 489 /** 490 * @brief Process the PRLI payload. 491 * 492 * <h3 class="desc">Description</h3> 493 * The PRLI payload is processed; the initiator/target capabilities of the 494 * remote node are extracted and saved in the node object. 495 * 496 * @param node Pointer to the node object. 497 * @param prli Pointer to the PRLI payload. 498 * 499 * @return None. 500 */ 501 502 void 503 ocs_process_prli_payload(ocs_node_t *node, fc_prli_payload_t *prli) 504 { 505 node->init = (ocs_be16toh(prli->service_params) & FC_PRLI_INITIATOR_FUNCTION) != 0; 506 node->targ = (ocs_be16toh(prli->service_params) & FC_PRLI_TARGET_FUNCTION) != 0; 507 node->fcp2device = (ocs_be16toh(prli->service_params) & FC_PRLI_RETRY) != 0; 508 node->fc_type = prli->type; 509 } 510 511 /** 512 * @brief Process the ABTS. 513 * 514 * <h3 class="desc">Description</h3> 515 * Common code to process a received ABTS. If an active IO can be found 516 * that matches the OX_ID of the ABTS request, a call is made to the 517 * backend. Otherwise, a BA_ACC is returned to the initiator. 518 * 519 * @param io Pointer to a SCSI IO object. 520 * @param hdr Pointer to the FC header. 521 * 522 * @return Returns 0 on success, or a negative error value on failure. 523 */ 524 525 static int32_t 526 ocs_process_abts(ocs_io_t *io, fc_header_t *hdr) 527 { 528 ocs_node_t *node = io->node; 529 ocs_t *ocs = node->ocs; 530 uint16_t ox_id = ocs_be16toh(hdr->ox_id); 531 uint16_t rx_id = ocs_be16toh(hdr->rx_id); 532 ocs_io_t *abortio; 533 534 abortio = ocs_io_find_tgt_io(ocs, node, ox_id, rx_id); 535 536 /* If an IO was found, attempt to take a reference on it */ 537 if (abortio != NULL && (ocs_ref_get_unless_zero(&abortio->ref) != 0)) { 538 539 /* Got a reference on the IO. Hold it until backend is notified below */ 540 node_printf(node, "Abort request: ox_id [%04x] rx_id [%04x]\n", 541 ox_id, rx_id); 542 543 /* 544 * Save the ox_id for the ABTS as the init_task_tag in our manufactured 545 * TMF IO object 546 */ 547 io->display_name = "abts"; 548 io->init_task_tag = ox_id; 549 /* don't set tgt_task_tag, don't want to confuse with XRI */ 550 551 /* 552 * Save the rx_id from the ABTS as it is needed for the BLS response, 553 * regardless of the IO context's rx_id 554 */ 555 io->abort_rx_id = rx_id; 556 557 /* Call target server command abort */ 558 io->tmf_cmd = OCS_SCSI_TMF_ABORT_TASK; 559 ocs_scsi_recv_tmf(io, abortio->tgt_io.lun, OCS_SCSI_TMF_ABORT_TASK, abortio, 0); 560 561 /* 562 * Backend will have taken an additional reference on the IO if needed; 563 * done with current reference. 564 */ 565 ocs_ref_put(&abortio->ref); /* ocs_ref_get(): same function */ 566 } else { 567 /* 568 * Either IO was not found or it has been freed between finding it 569 * and attempting to get the reference, 570 */ 571 node_printf(node, "Abort request: ox_id [%04x], IO not found (exists=%d)\n", 572 ox_id, (abortio != NULL)); 573 574 /* Send a BA_ACC */ 575 ocs_bls_send_acc_hdr(io, hdr); 576 } 577 return 0; 578 } 579 580 /** 581 * @ingroup device_sm 582 * @brief Device node state machine: Wait for the PLOGI accept to complete. 583 * 584 * @param ctx Remote node state machine context. 585 * @param evt Event to process. 586 * @param arg Per event optional argument. 587 * 588 * @return Returns NULL. 589 */ 590 591 void * 592 __ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 593 { 594 std_node_state_decl(); 595 596 node_sm_trace(); 597 598 switch(evt) { 599 case OCS_EVT_ENTER: 600 ocs_node_hold_frames(node); 601 break; 602 603 case OCS_EVT_EXIT: 604 ocs_node_accept_frames(node); 605 break; 606 607 case OCS_EVT_SRRS_ELS_CMPL_FAIL: 608 ocs_assert(node->els_cmpl_cnt, NULL); 609 node->els_cmpl_cnt--; 610 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 611 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 612 break; 613 614 case OCS_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */ 615 ocs_assert(node->els_cmpl_cnt, NULL); 616 node->els_cmpl_cnt--; 617 ocs_node_transition(node, __ocs_d_port_logged_in, NULL); 618 break; 619 620 default: 621 __ocs_d_common(__func__, ctx, evt, arg); 622 return NULL; 623 } 624 625 return NULL; 626 } 627 628 /** 629 * @ingroup device_sm 630 * @brief Device node state machine: Wait for the LOGO response. 631 * 632 * @param ctx Remote node state machine context. 633 * @param evt Event to process. 634 * @param arg Per event optional argument. 635 * 636 * @return Returns NULL. 637 */ 638 639 void * 640 __ocs_d_wait_logo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 641 { 642 std_node_state_decl(); 643 644 node_sm_trace(); 645 646 switch(evt) { 647 case OCS_EVT_ENTER: 648 /* TODO: may want to remove this; 649 * if we'll want to know about PLOGI */ 650 ocs_node_hold_frames(node); 651 break; 652 653 case OCS_EVT_EXIT: 654 ocs_node_accept_frames(node); 655 break; 656 657 case OCS_EVT_SRRS_ELS_REQ_OK: 658 case OCS_EVT_SRRS_ELS_REQ_RJT: 659 case OCS_EVT_SRRS_ELS_REQ_FAIL: 660 /* LOGO response received, sent shutdown */ 661 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_LOGO, __ocs_d_common, __func__)) { 662 return NULL; 663 } 664 ocs_assert(node->els_req_cnt, NULL); 665 node->els_req_cnt--; 666 node_printf(node, "LOGO sent (evt=%s), shutdown node\n", ocs_sm_event_name(evt)); 667 /* sm: post explicit logout */ 668 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 669 break; 670 671 /* TODO: PLOGI: abort LOGO and process PLOGI? (SHUTDOWN_EXPLICIT/IMPLICIT_LOGO?) */ 672 673 default: 674 __ocs_d_common(__func__, ctx, evt, arg); 675 return NULL; 676 } 677 return NULL; 678 } 679 680 681 /** 682 * @ingroup device_sm 683 * @brief Device node state machine: Wait for the PRLO response. 684 * 685 * @param ctx Remote node state machine context. 686 * @param evt Event to process. 687 * @param arg Per event optional argument. 688 * 689 * @return Returns NULL. 690 */ 691 692 void * 693 __ocs_d_wait_prlo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 694 { 695 std_node_state_decl(); 696 697 node_sm_trace(); 698 699 switch(evt) { 700 case OCS_EVT_ENTER: 701 ocs_node_hold_frames(node); 702 break; 703 704 case OCS_EVT_EXIT: 705 ocs_node_accept_frames(node); 706 break; 707 708 case OCS_EVT_SRRS_ELS_REQ_OK: 709 case OCS_EVT_SRRS_ELS_REQ_RJT: 710 case OCS_EVT_SRRS_ELS_REQ_FAIL: 711 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLO, __ocs_d_common, __func__)) { 712 return NULL; 713 } 714 ocs_assert(node->els_req_cnt, NULL); 715 node->els_req_cnt--; 716 node_printf(node, "PRLO sent (evt=%s)\n", ocs_sm_event_name(evt)); 717 ocs_node_transition(node, __ocs_d_port_logged_in, NULL); 718 break; 719 720 default: 721 __ocs_node_common(__func__, ctx, evt, arg); 722 return NULL; 723 } 724 return NULL; 725 } 726 727 728 /** 729 * @brief Initialize device node. 730 * 731 * Initialize device node. If a node is an initiator, then send a PLOGI and transition 732 * to __ocs_d_wait_plogi_rsp, otherwise transition to __ocs_d_init. 733 * 734 * @param node Pointer to the node object. 735 * @param send_plogi Boolean indicating to send PLOGI command or not. 736 * 737 * @return none 738 */ 739 740 void 741 ocs_node_init_device(ocs_node_t *node, int send_plogi) 742 { 743 node->send_plogi = send_plogi; 744 if ((node->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NEW_NODES) && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)) { 745 node->nodedb_state = __ocs_d_init; 746 ocs_node_transition(node, __ocs_node_paused, NULL); 747 } else { 748 ocs_node_transition(node, __ocs_d_init, NULL); 749 } 750 } 751 752 /** 753 * @ingroup device_sm 754 * @brief Device node state machine: Initial node state for an initiator or a target. 755 * 756 * <h3 class="desc">Description</h3> 757 * This state is entered when a node is instantiated, either having been 758 * discovered from a name services query, or having received a PLOGI/FLOGI. 759 * 760 * @param ctx Remote node state machine context. 761 * @param evt Event to process. 762 * @param arg Per event optional argument. 763 * - OCS_EVT_ENTER: (uint8_t *) - 1 to send a PLOGI on 764 * entry (initiator-only); 0 indicates a PLOGI is 765 * not sent on entry (initiator-only). Not applicable for a target. 766 * 767 * @return Returns NULL. 768 */ 769 770 void * 771 __ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 772 { 773 int32_t rc; 774 ocs_node_cb_t *cbdata = arg; 775 std_node_state_decl(); 776 777 node_sm_trace(); 778 779 switch(evt) { 780 case OCS_EVT_ENTER: 781 /* check if we need to send PLOGI */ 782 if (node->send_plogi) { 783 /* only send if we have initiator capability, and domain is attached */ 784 if (node->sport->enable_ini && node->sport->domain->attached) { 785 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 786 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL); 787 ocs_node_transition(node, __ocs_d_wait_plogi_rsp, NULL); 788 } else { 789 node_printf(node, "not sending plogi sport.ini=%d, domain attached=%d\n", 790 node->sport->enable_ini, node->sport->domain->attached); 791 } 792 } 793 break; 794 case OCS_EVT_PLOGI_RCVD: { 795 /* T, or I+T */ 796 fc_header_t *hdr = cbdata->header->dma.virt; 797 uint32_t d_id = fc_be24toh(hdr->d_id); 798 799 ocs_node_save_sparms(node, cbdata->payload->dma.virt); 800 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI); 801 802 /* domain already attached */ 803 if (node->sport->domain->attached) { 804 rc = ocs_node_attach(node); 805 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); 806 if (rc == OCS_HW_RTN_SUCCESS_SYNC) { 807 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); 808 } 809 break; 810 } 811 812 /* domain not attached; several possibilities: */ 813 switch (node->sport->topology) { 814 case OCS_SPORT_TOPOLOGY_P2P: 815 /* we're not attached and sport is p2p, need to attach */ 816 ocs_domain_attach(node->sport->domain, d_id); 817 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL); 818 break; 819 case OCS_SPORT_TOPOLOGY_FABRIC: 820 /* we're not attached and sport is fabric, domain attach should have 821 * already been requested as part of the fabric state machine, wait for it 822 */ 823 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL); 824 break; 825 case OCS_SPORT_TOPOLOGY_UNKNOWN: 826 /* Two possibilities: 827 * 1. received a PLOGI before our FLOGI has completed (possible since 828 * completion comes in on another CQ), thus we don't know what we're 829 * connected to yet; transition to a state to wait for the fabric 830 * node to tell us; 831 * 2. PLOGI received before link went down and we haven't performed 832 * domain attach yet. 833 * Note: we cannot distinguish between 1. and 2. so have to assume PLOGI 834 * was received after link back up. 835 */ 836 node_printf(node, "received PLOGI, with unknown topology did=0x%x\n", d_id); 837 ocs_node_transition(node, __ocs_d_wait_topology_notify, NULL); 838 break; 839 default: 840 node_printf(node, "received PLOGI, with unexpectd topology %d\n", 841 node->sport->topology); 842 ocs_assert(FALSE, NULL); 843 break; 844 } 845 break; 846 } 847 848 case OCS_EVT_FDISC_RCVD: { 849 __ocs_d_common(__func__, ctx, evt, arg); 850 break; 851 } 852 853 case OCS_EVT_FLOGI_RCVD: { 854 fc_header_t *hdr = cbdata->header->dma.virt; 855 856 /* this better be coming from an NPort */ 857 ocs_assert(ocs_rnode_is_nport(cbdata->payload->dma.virt), NULL); 858 859 /* sm: save sparams, send FLOGI acc */ 860 ocs_domain_save_sparms(node->sport->domain, cbdata->payload->dma.virt); 861 862 /* send FC LS_ACC response, override s_id */ 863 ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P); 864 ocs_send_flogi_p2p_acc(cbdata->io, ocs_be16toh(hdr->ox_id), fc_be24toh(hdr->d_id), NULL, NULL); 865 if (ocs_p2p_setup(node->sport)) { 866 node_printf(node, "p2p setup failed, shutting down node\n"); 867 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL); 868 } else { 869 ocs_node_transition(node, __ocs_p2p_wait_flogi_acc_cmpl, NULL); 870 } 871 872 break; 873 } 874 875 case OCS_EVT_LOGO_RCVD: { 876 fc_header_t *hdr = cbdata->header->dma.virt; 877 878 if (!node->sport->domain->attached) { 879 /* most likely a frame left over from before a link down; drop and 880 * shut node down w/ "explicit logout" so pending frames are processed */ 881 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt)); 882 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 883 break; 884 } 885 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 886 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL); 887 break; 888 } 889 890 case OCS_EVT_PRLI_RCVD: 891 case OCS_EVT_PRLO_RCVD: 892 case OCS_EVT_PDISC_RCVD: 893 case OCS_EVT_ADISC_RCVD: 894 case OCS_EVT_RSCN_RCVD: { 895 fc_header_t *hdr = cbdata->header->dma.virt; 896 if (!node->sport->domain->attached) { 897 /* most likely a frame left over from before a link down; drop and 898 * shut node down w/ "explicit logout" so pending frames are processed */ 899 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt)); 900 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 901 break; 902 } 903 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt)); 904 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), 905 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0, 906 NULL, NULL); 907 908 break; 909 } 910 911 case OCS_EVT_FCP_CMD_RCVD: { 912 /* note: problem, we're now expecting an ELS REQ completion 913 * from both the LOGO and PLOGI */ 914 if (!node->sport->domain->attached) { 915 /* most likely a frame left over from before a link down; drop and 916 * shut node down w/ "explicit logout" so pending frames are processed */ 917 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt)); 918 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 919 break; 920 } 921 922 /* Send LOGO */ 923 node_printf(node, "FCP_CMND received, send LOGO\n"); 924 if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 0, NULL, NULL) == NULL) { 925 /* failed to send LOGO, go ahead and cleanup node anyways */ 926 node_printf(node, "Failed to send LOGO\n"); 927 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 928 } else { 929 /* sent LOGO, wait for response */ 930 ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL); 931 } 932 break; 933 } 934 case OCS_EVT_DOMAIN_ATTACH_OK: 935 /* don't care about domain_attach_ok */ 936 break; 937 938 default: 939 __ocs_d_common(__func__, ctx, evt, arg); 940 return NULL; 941 } 942 943 return NULL; 944 } 945 946 /** 947 * @ingroup device_sm 948 * @brief Device node state machine: Wait on a response for a sent PLOGI. 949 * 950 * <h3 class="desc">Description</h3> 951 * State is entered when an initiator-capable node has sent 952 * a PLOGI and is waiting for a response. 953 * 954 * @param ctx Remote node state machine context. 955 * @param evt Event to process. 956 * @param arg Per event optional argument. 957 * 958 * @return Returns NULL. 959 */ 960 961 void * 962 __ocs_d_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 963 { 964 int32_t rc; 965 ocs_node_cb_t *cbdata = arg; 966 std_node_state_decl(); 967 968 node_sm_trace(); 969 970 switch(evt) { 971 case OCS_EVT_PLOGI_RCVD: { 972 /* T, or I+T */ 973 /* received PLOGI with svc parms, go ahead and attach node 974 * when PLOGI that was sent ultimately completes, it'll be a no-op 975 */ 976 977 /* TODO: there is an outstanding PLOGI sent, can we set a flag 978 * to indicate that we don't want to retry it if it times out? */ 979 ocs_node_save_sparms(node, cbdata->payload->dma.virt); 980 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI); 981 /* sm: domain->attached / ocs_node_attach */ 982 rc = ocs_node_attach(node); 983 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); 984 if (rc == OCS_HW_RTN_SUCCESS_SYNC) { 985 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); 986 } 987 break; 988 } 989 990 case OCS_EVT_PRLI_RCVD: 991 /* I, or I+T */ 992 /* sent PLOGI and before completion was seen, received the 993 * PRLI from the remote node (WCQEs and RCQEs come in on 994 * different queues and order of processing cannot be assumed) 995 * Save OXID so PRLI can be sent after the attach and continue 996 * to wait for PLOGI response 997 */ 998 ocs_process_prli_payload(node, cbdata->payload->dma.virt); 999 if (ocs->fc_type == node->fc_type) { 1000 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI); 1001 ocs_node_transition(node, __ocs_d_wait_plogi_rsp_recvd_prli, NULL); 1002 } else { 1003 /* TODO this need to be looked at. What do we do here ? */ 1004 } 1005 break; 1006 1007 /* TODO this need to be looked at. we could very well be logged in */ 1008 case OCS_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */ 1009 case OCS_EVT_PRLO_RCVD: 1010 case OCS_EVT_PDISC_RCVD: 1011 case OCS_EVT_FDISC_RCVD: 1012 case OCS_EVT_ADISC_RCVD: 1013 case OCS_EVT_RSCN_RCVD: 1014 case OCS_EVT_SCR_RCVD: { 1015 fc_header_t *hdr = cbdata->header->dma.virt; 1016 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt)); 1017 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), 1018 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0, 1019 NULL, NULL); 1020 1021 break; 1022 } 1023 1024 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ 1025 /* Completion from PLOGI sent */ 1026 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) { 1027 return NULL; 1028 } 1029 ocs_assert(node->els_req_cnt, NULL); 1030 node->els_req_cnt--; 1031 /* sm: save sparams, ocs_node_attach */ 1032 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt); 1033 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL, 1034 ((uint8_t*)cbdata->els->els_rsp.virt) + 4); 1035 rc = ocs_node_attach(node); 1036 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); 1037 if (rc == OCS_HW_RTN_SUCCESS_SYNC) { 1038 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); 1039 } 1040 break; 1041 1042 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ 1043 /* PLOGI failed, shutdown the node */ 1044 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) { 1045 return NULL; 1046 } 1047 ocs_assert(node->els_req_cnt, NULL); 1048 node->els_req_cnt--; 1049 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL); 1050 break; 1051 1052 case OCS_EVT_SRRS_ELS_REQ_RJT: /* Our PLOGI was rejected, this is ok in some cases */ 1053 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) { 1054 return NULL; 1055 } 1056 ocs_assert(node->els_req_cnt, NULL); 1057 node->els_req_cnt--; 1058 break; 1059 1060 case OCS_EVT_FCP_CMD_RCVD: { 1061 /* not logged in yet and outstanding PLOGI so don't send LOGO, 1062 * just drop 1063 */ 1064 node_printf(node, "FCP_CMND received, drop\n"); 1065 break; 1066 } 1067 1068 default: 1069 __ocs_d_common(__func__, ctx, evt, arg); 1070 return NULL; 1071 } 1072 1073 return NULL; 1074 } 1075 1076 /** 1077 * @ingroup device_sm 1078 * @brief Device node state machine: Waiting on a response for a 1079 * sent PLOGI. 1080 * 1081 * <h3 class="desc">Description</h3> 1082 * State is entered when an initiator-capable node has sent 1083 * a PLOGI and is waiting for a response. Before receiving the 1084 * response, a PRLI was received, implying that the PLOGI was 1085 * successful. 1086 * 1087 * @param ctx Remote node state machine context. 1088 * @param evt Event to process. 1089 * @param arg Per event optional argument. 1090 * 1091 * @return Returns NULL. 1092 */ 1093 1094 void * 1095 __ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1096 { 1097 int32_t rc; 1098 ocs_node_cb_t *cbdata = arg; 1099 std_node_state_decl(); 1100 1101 node_sm_trace(); 1102 1103 switch(evt) { 1104 case OCS_EVT_ENTER: 1105 /* 1106 * Since we've received a PRLI, we have a port login and will 1107 * just need to wait for the PLOGI response to do the node 1108 * attach and then we can send the LS_ACC for the PRLI. If, 1109 * during this time, we receive FCP_CMNDs (which is possible 1110 * since we've already sent a PRLI and our peer may have accepted). 1111 * At this time, we are not waiting on any other unsolicited 1112 * frames to continue with the login process. Thus, it will not 1113 * hurt to hold frames here. 1114 */ 1115 ocs_node_hold_frames(node); 1116 break; 1117 1118 case OCS_EVT_EXIT: 1119 ocs_node_accept_frames(node); 1120 break; 1121 1122 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ 1123 /* Completion from PLOGI sent */ 1124 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) { 1125 return NULL; 1126 } 1127 ocs_assert(node->els_req_cnt, NULL); 1128 node->els_req_cnt--; 1129 /* sm: save sparams, ocs_node_attach */ 1130 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt); 1131 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL, 1132 ((uint8_t*)cbdata->els->els_rsp.virt) + 4); 1133 rc = ocs_node_attach(node); 1134 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); 1135 if (rc == OCS_HW_RTN_SUCCESS_SYNC) { 1136 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); 1137 } 1138 break; 1139 1140 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ 1141 case OCS_EVT_SRRS_ELS_REQ_RJT: 1142 /* PLOGI failed, shutdown the node */ 1143 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) { 1144 return NULL; 1145 } 1146 ocs_assert(node->els_req_cnt, NULL); 1147 node->els_req_cnt--; 1148 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL); 1149 break; 1150 1151 default: 1152 __ocs_d_common(__func__, ctx, evt, arg); 1153 return NULL; 1154 } 1155 1156 return NULL; 1157 } 1158 1159 /** 1160 * @ingroup device_sm 1161 * @brief Device node state machine: Wait for a domain attach. 1162 * 1163 * <h3 class="desc">Description</h3> 1164 * Waits for a domain-attach complete ok event. 1165 * 1166 * @param ctx Remote node state machine context. 1167 * @param evt Event to process. 1168 * @param arg Per event optional argument. 1169 * 1170 * @return Returns NULL. 1171 */ 1172 1173 void * 1174 __ocs_d_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1175 { 1176 int32_t rc; 1177 std_node_state_decl(); 1178 1179 node_sm_trace(); 1180 1181 switch(evt) { 1182 case OCS_EVT_ENTER: 1183 ocs_node_hold_frames(node); 1184 break; 1185 1186 case OCS_EVT_EXIT: 1187 ocs_node_accept_frames(node); 1188 break; 1189 1190 case OCS_EVT_DOMAIN_ATTACH_OK: 1191 ocs_assert(node->sport->domain->attached, NULL); 1192 /* sm: ocs_node_attach */ 1193 rc = ocs_node_attach(node); 1194 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); 1195 if (rc == OCS_HW_RTN_SUCCESS_SYNC) { 1196 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); 1197 } 1198 break; 1199 1200 default: 1201 __ocs_d_common(__func__, ctx, evt, arg); 1202 return NULL; 1203 } 1204 return NULL; 1205 } 1206 1207 /** 1208 * @ingroup device_sm 1209 * @brief Device node state machine: Wait for topology 1210 * notification 1211 * 1212 * <h3 class="desc">Description</h3> 1213 * Waits for topology notification from fabric node, then 1214 * attaches domain and node. 1215 * 1216 * @param ctx Remote node state machine context. 1217 * @param evt Event to process. 1218 * @param arg Per event optional argument. 1219 * 1220 * @return Returns NULL. 1221 */ 1222 1223 void * 1224 __ocs_d_wait_topology_notify(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1225 { 1226 int32_t rc; 1227 std_node_state_decl(); 1228 1229 node_sm_trace(); 1230 1231 switch(evt) { 1232 case OCS_EVT_ENTER: 1233 ocs_node_hold_frames(node); 1234 break; 1235 1236 case OCS_EVT_EXIT: 1237 ocs_node_accept_frames(node); 1238 break; 1239 1240 case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: { 1241 ocs_sport_topology_e topology = (ocs_sport_topology_e)(uintptr_t)arg; 1242 ocs_assert(!node->sport->domain->attached, NULL); 1243 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL); 1244 node_printf(node, "topology notification, topology=%d\n", topology); 1245 1246 /* At the time the PLOGI was received, the topology was unknown, 1247 * so we didn't know which node would perform the domain attach: 1248 * 1. The node from which the PLOGI was sent (p2p) or 1249 * 2. The node to which the FLOGI was sent (fabric). 1250 */ 1251 if (topology == OCS_SPORT_TOPOLOGY_P2P) { 1252 /* if this is p2p, need to attach to the domain using the 1253 * d_id from the PLOGI received 1254 */ 1255 ocs_domain_attach(node->sport->domain, node->ls_acc_did); 1256 } 1257 /* else, if this is fabric, the domain attach should be performed 1258 * by the fabric node (node sending FLOGI); just wait for attach 1259 * to complete 1260 */ 1261 1262 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL); 1263 break; 1264 } 1265 case OCS_EVT_DOMAIN_ATTACH_OK: 1266 ocs_assert(node->sport->domain->attached, NULL); 1267 node_printf(node, "domain attach ok\n"); 1268 /*sm: ocs_node_attach */ 1269 rc = ocs_node_attach(node); 1270 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); 1271 if (rc == OCS_HW_RTN_SUCCESS_SYNC) { 1272 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); 1273 } 1274 break; 1275 1276 default: 1277 __ocs_d_common(__func__, ctx, evt, arg); 1278 return NULL; 1279 } 1280 return NULL; 1281 } 1282 1283 /** 1284 * @ingroup device_sm 1285 * @brief Device node state machine: Wait for a node attach when found by a remote node. 1286 * 1287 * @param ctx Remote node state machine context. 1288 * @param evt Event to process. 1289 * @param arg Per event optional argument. 1290 * 1291 * @return Returns NULL. 1292 */ 1293 1294 void * 1295 __ocs_d_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1296 { 1297 std_node_state_decl(); 1298 1299 node_sm_trace(); 1300 1301 switch(evt) { 1302 case OCS_EVT_ENTER: 1303 ocs_node_hold_frames(node); 1304 break; 1305 1306 case OCS_EVT_EXIT: 1307 ocs_node_accept_frames(node); 1308 break; 1309 1310 case OCS_EVT_NODE_ATTACH_OK: 1311 node->attached = TRUE; 1312 switch (node->send_ls_acc) { 1313 case OCS_NODE_SEND_LS_ACC_PLOGI: { 1314 /* sm: send_plogi_acc is set / send PLOGI acc */ 1315 /* Normal case for T, or I+T */ 1316 ocs_send_plogi_acc(node->ls_acc_io, node->ls_acc_oxid, NULL, NULL); 1317 ocs_node_transition(node, __ocs_d_wait_plogi_acc_cmpl, NULL); 1318 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE; 1319 node->ls_acc_io = NULL; 1320 break; 1321 } 1322 case OCS_NODE_SEND_LS_ACC_PRLI: { 1323 ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid); 1324 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE; 1325 node->ls_acc_io = NULL; 1326 break; 1327 } 1328 case OCS_NODE_SEND_LS_ACC_NONE: 1329 default: 1330 /* Normal case for I */ 1331 /* sm: send_plogi_acc is not set / send PLOGI acc */ 1332 ocs_node_transition(node, __ocs_d_port_logged_in, NULL); 1333 break; 1334 } 1335 break; 1336 1337 case OCS_EVT_NODE_ATTACH_FAIL: 1338 /* node attach failed, shutdown the node */ 1339 node->attached = FALSE; 1340 node_printf(node, "node attach failed\n"); 1341 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 1342 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 1343 break; 1344 1345 /* Handle shutdown events */ 1346 case OCS_EVT_SHUTDOWN: 1347 node_printf(node, "%s received\n", ocs_sm_event_name(evt)); 1348 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 1349 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL); 1350 break; 1351 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: 1352 node_printf(node, "%s received\n", ocs_sm_event_name(evt)); 1353 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO; 1354 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL); 1355 break; 1356 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: 1357 node_printf(node, "%s received\n", ocs_sm_event_name(evt)); 1358 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO; 1359 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL); 1360 break; 1361 default: 1362 __ocs_d_common(__func__, ctx, evt, arg); 1363 return NULL; 1364 } 1365 1366 return NULL; 1367 } 1368 1369 /** 1370 * @ingroup device_sm 1371 * @brief Device node state machine: Wait for a node/domain 1372 * attach then shutdown node. 1373 * 1374 * @param ctx Remote node state machine context. 1375 * @param evt Event to process. 1376 * @param arg Per event optional argument. 1377 * 1378 * @return Returns NULL. 1379 */ 1380 1381 void * 1382 __ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1383 { 1384 std_node_state_decl(); 1385 1386 node_sm_trace(); 1387 1388 switch(evt) { 1389 case OCS_EVT_ENTER: 1390 ocs_node_hold_frames(node); 1391 break; 1392 1393 case OCS_EVT_EXIT: 1394 ocs_node_accept_frames(node); 1395 break; 1396 1397 /* wait for any of these attach events and then shutdown */ 1398 case OCS_EVT_NODE_ATTACH_OK: 1399 node->attached = TRUE; 1400 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt)); 1401 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 1402 break; 1403 1404 case OCS_EVT_NODE_ATTACH_FAIL: 1405 /* node attach failed, shutdown the node */ 1406 node->attached = FALSE; 1407 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt)); 1408 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL); 1409 break; 1410 1411 /* ignore shutdown events as we're already in shutdown path */ 1412 case OCS_EVT_SHUTDOWN: 1413 /* have default shutdown event take precedence */ 1414 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; 1415 /* fall through */ 1416 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: 1417 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: 1418 node_printf(node, "%s received\n", ocs_sm_event_name(evt)); 1419 break; 1420 1421 default: 1422 __ocs_d_common(__func__, ctx, evt, arg); 1423 return NULL; 1424 } 1425 1426 return NULL; 1427 } 1428 1429 /** 1430 * @ingroup device_sm 1431 * @brief Device node state machine: Port is logged in. 1432 * 1433 * <h3 class="desc">Description</h3> 1434 * This state is entered when a remote port has completed port login (PLOGI). 1435 * 1436 * @param ctx Remote node state machine context. 1437 * @param evt Event to process 1438 * @param arg Per event optional argument 1439 * 1440 * @return Returns NULL. 1441 */ 1442 void * 1443 __ocs_d_port_logged_in(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1444 { 1445 ocs_node_cb_t *cbdata = arg; 1446 std_node_state_decl(); 1447 1448 node_sm_trace(); 1449 1450 /* TODO: I+T: what if PLOGI response not yet received ? */ 1451 1452 switch(evt) { 1453 case OCS_EVT_ENTER: 1454 /* Normal case for I or I+T */ 1455 if (node->sport->enable_ini && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id) 1456 && !node->sent_prli) { 1457 /* sm: if enable_ini / send PRLI */ 1458 ocs_send_prli(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL); 1459 node->sent_prli = TRUE; 1460 /* can now expect ELS_REQ_OK/FAIL/RJT */ 1461 } 1462 break; 1463 1464 case OCS_EVT_FCP_CMD_RCVD: { 1465 /* For target functionality send PRLO and drop the CMD frame. */ 1466 if (node->sport->enable_tgt) { 1467 if (ocs_send_prlo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 1468 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL)) { 1469 ocs_node_transition(node, __ocs_d_wait_prlo_rsp, NULL); 1470 } 1471 } 1472 break; 1473 } 1474 1475 case OCS_EVT_PRLI_RCVD: { 1476 fc_header_t *hdr = cbdata->header->dma.virt; 1477 1478 /* Normal for T or I+T */ 1479 1480 ocs_process_prli_payload(node, cbdata->payload->dma.virt); 1481 ocs_d_send_prli_rsp(cbdata->io, ocs_be16toh(hdr->ox_id)); 1482 break; 1483 } 1484 1485 case OCS_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */ 1486 /* Normal case for I or I+T */ 1487 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) { 1488 return NULL; 1489 } 1490 ocs_assert(node->els_req_cnt, NULL); 1491 node->els_req_cnt--; 1492 /* sm: process PRLI payload */ 1493 ocs_process_prli_payload(node, cbdata->els->els_rsp.virt); 1494 ocs_node_transition(node, __ocs_d_device_ready, NULL); 1495 break; 1496 } 1497 1498 case OCS_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */ 1499 /* I, I+T, assume some link failure, shutdown node */ 1500 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) { 1501 return NULL; 1502 } 1503 ocs_assert(node->els_req_cnt, NULL); 1504 node->els_req_cnt--; 1505 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL); 1506 break; 1507 } 1508 1509 case OCS_EVT_SRRS_ELS_REQ_RJT: {/* PRLI rejected by remote */ 1510 /* Normal for I, I+T (connected to an I) */ 1511 /* Node doesn't want to be a target, stay here and wait for a PRLI from the remote node 1512 * if it really wants to connect to us as target */ 1513 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) { 1514 return NULL; 1515 } 1516 ocs_assert(node->els_req_cnt, NULL); 1517 node->els_req_cnt--; 1518 break; 1519 } 1520 1521 case OCS_EVT_SRRS_ELS_CMPL_OK: { 1522 /* Normal T, I+T, target-server rejected the process login */ 1523 /* This would be received only in the case where we sent LS_RJT for the PRLI, so 1524 * do nothing. (note: as T only we could shutdown the node) 1525 */ 1526 ocs_assert(node->els_cmpl_cnt, NULL); 1527 node->els_cmpl_cnt--; 1528 break; 1529 } 1530 1531 case OCS_EVT_PLOGI_RCVD: { 1532 /* sm: save sparams, set send_plogi_acc, post implicit logout 1533 * Save plogi parameters */ 1534 ocs_node_save_sparms(node, cbdata->payload->dma.virt); 1535 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI); 1536 1537 /* Restart node attach with new service parameters, and send ACC */ 1538 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); 1539 break; 1540 } 1541 1542 case OCS_EVT_LOGO_RCVD: { 1543 /* I, T, I+T */ 1544 fc_header_t *hdr = cbdata->header->dma.virt; 1545 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached); 1546 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 1547 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL); 1548 break; 1549 } 1550 1551 default: 1552 __ocs_d_common(__func__, ctx, evt, arg); 1553 return NULL; 1554 } 1555 1556 return NULL; 1557 } 1558 1559 /** 1560 * @ingroup device_sm 1561 * @brief Device node state machine: Wait for a LOGO accept. 1562 * 1563 * <h3 class="desc">Description</h3> 1564 * Waits for a LOGO accept completion. 1565 * 1566 * @param ctx Remote node state machine context. 1567 * @param evt Event to process 1568 * @param arg Per event optional argument 1569 * 1570 * @return Returns NULL. 1571 */ 1572 1573 void * 1574 __ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1575 { 1576 std_node_state_decl(); 1577 1578 node_sm_trace(); 1579 1580 switch(evt) { 1581 case OCS_EVT_ENTER: 1582 ocs_node_hold_frames(node); 1583 break; 1584 1585 case OCS_EVT_EXIT: 1586 ocs_node_accept_frames(node); 1587 break; 1588 1589 case OCS_EVT_SRRS_ELS_CMPL_OK: 1590 case OCS_EVT_SRRS_ELS_CMPL_FAIL: 1591 /* sm: / post explicit logout */ 1592 ocs_assert(node->els_cmpl_cnt, NULL); 1593 node->els_cmpl_cnt--; 1594 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 1595 break; 1596 default: 1597 __ocs_d_common(__func__, ctx, evt, arg); 1598 return NULL; 1599 } 1600 1601 return NULL; 1602 } 1603 1604 /** 1605 * @ingroup device_sm 1606 * @brief Device node state machine: Device is ready. 1607 * 1608 * @param ctx Remote node state machine context. 1609 * @param evt Event to process. 1610 * @param arg Per event optional argument. 1611 * 1612 * @return Returns NULL. 1613 */ 1614 1615 void * 1616 __ocs_d_device_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1617 { 1618 ocs_node_cb_t *cbdata = arg; 1619 std_node_state_decl(); 1620 1621 if (evt != OCS_EVT_FCP_CMD_RCVD) { 1622 node_sm_trace(); 1623 } 1624 1625 switch(evt) { 1626 case OCS_EVT_ENTER: 1627 node->fcp_enabled = TRUE; 1628 if (node->init) { 1629 device_printf(ocs->dev, "[%s] found (initiator) WWPN %s WWNN %s\n", node->display_name, 1630 node->wwpn, node->wwnn); 1631 if (node->sport->enable_tgt) 1632 ocs_scsi_new_initiator(node); 1633 } 1634 if (node->targ) { 1635 device_printf(ocs->dev, "[%s] found (target) WWPN %s WWNN %s\n", node->display_name, 1636 node->wwpn, node->wwnn); 1637 if (node->sport->enable_ini) 1638 ocs_scsi_new_target(node); 1639 } 1640 break; 1641 1642 case OCS_EVT_EXIT: 1643 node->fcp_enabled = FALSE; 1644 break; 1645 1646 case OCS_EVT_PLOGI_RCVD: { 1647 /* sm: save sparams, set send_plogi_acc, post implicit logout 1648 * Save plogi parameters */ 1649 ocs_node_save_sparms(node, cbdata->payload->dma.virt); 1650 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI); 1651 1652 /* Restart node attach with new service parameters, and send ACC */ 1653 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); 1654 break; 1655 } 1656 1657 1658 case OCS_EVT_PDISC_RCVD: { 1659 fc_header_t *hdr = cbdata->header->dma.virt; 1660 ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 1661 break; 1662 } 1663 1664 case OCS_EVT_PRLI_RCVD: { 1665 /* T, I+T: remote initiator is slow to get started */ 1666 fc_header_t *hdr = cbdata->header->dma.virt; 1667 1668 ocs_process_prli_payload(node, cbdata->payload->dma.virt); 1669 1670 /* sm: send PRLI acc/reject */ 1671 if (ocs->fc_type == node->fc_type) 1672 ocs_send_prli_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL); 1673 else 1674 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM, 1675 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL); 1676 break; 1677 } 1678 1679 case OCS_EVT_PRLO_RCVD: { 1680 fc_header_t *hdr = cbdata->header->dma.virt; 1681 fc_prlo_payload_t *prlo = cbdata->payload->dma.virt; 1682 1683 /* sm: send PRLO acc/reject */ 1684 if (ocs->fc_type == prlo->type) 1685 ocs_send_prlo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL); 1686 else 1687 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM, 1688 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL); 1689 /*TODO: need implicit logout */ 1690 break; 1691 } 1692 1693 case OCS_EVT_LOGO_RCVD: { 1694 fc_header_t *hdr = cbdata->header->dma.virt; 1695 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached); 1696 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 1697 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL); 1698 break; 1699 } 1700 1701 case OCS_EVT_ADISC_RCVD: { 1702 fc_header_t *hdr = cbdata->header->dma.virt; 1703 ocs_send_adisc_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 1704 break; 1705 } 1706 1707 case OCS_EVT_RRQ_RCVD: { 1708 fc_header_t *hdr = cbdata->header->dma.virt; 1709 /* Send LS_ACC */ 1710 ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 1711 break; 1712 } 1713 1714 case OCS_EVT_ABTS_RCVD: 1715 ocs_process_abts(cbdata->io, cbdata->header->dma.virt); 1716 break; 1717 1718 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: 1719 break; 1720 1721 case OCS_EVT_NODE_REFOUND: 1722 break; 1723 1724 case OCS_EVT_NODE_MISSING: 1725 if (node->sport->enable_rscn) { 1726 ocs_node_transition(node, __ocs_d_device_gone, NULL); 1727 } 1728 break; 1729 1730 case OCS_EVT_SRRS_ELS_CMPL_OK: 1731 /* T, or I+T, PRLI accept completed ok */ 1732 ocs_assert(node->els_cmpl_cnt, NULL); 1733 node->els_cmpl_cnt--; 1734 break; 1735 1736 case OCS_EVT_SRRS_ELS_CMPL_FAIL: 1737 /* T, or I+T, PRLI accept failed to complete */ 1738 ocs_assert(node->els_cmpl_cnt, NULL); 1739 node->els_cmpl_cnt--; 1740 node_printf(node, "Failed to send PRLI LS_ACC\n"); 1741 break; 1742 1743 default: 1744 __ocs_d_common(__func__, ctx, evt, arg); 1745 return NULL; 1746 } 1747 1748 return NULL; 1749 } 1750 1751 /** 1752 * @ingroup device_sm 1753 * @brief Device node state machine: Node is gone (absent from GID_PT). 1754 * 1755 * <h3 class="desc">Description</h3> 1756 * State entered when a node is detected as being gone (absent from GID_PT). 1757 * 1758 * @param ctx Remote node state machine context. 1759 * @param evt Event to process 1760 * @param arg Per event optional argument 1761 * 1762 * @return Returns NULL. 1763 */ 1764 1765 void * 1766 __ocs_d_device_gone(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1767 { 1768 int32_t rc = OCS_SCSI_CALL_COMPLETE; 1769 int32_t rc_2 = OCS_SCSI_CALL_COMPLETE; 1770 ocs_node_cb_t *cbdata = arg; 1771 std_node_state_decl(); 1772 1773 node_sm_trace(); 1774 1775 switch(evt) { 1776 case OCS_EVT_ENTER: { 1777 const char *labels[] = {"none", "initiator", "target", "initiator+target"}; 1778 1779 device_printf(ocs->dev, "[%s] missing (%s) WWPN %s WWNN %s\n", node->display_name, 1780 labels[(node->targ << 1) | (node->init)], node->wwpn, node->wwnn); 1781 1782 switch(ocs_node_get_enable(node)) { 1783 case OCS_NODE_ENABLE_T_TO_T: 1784 case OCS_NODE_ENABLE_I_TO_T: 1785 case OCS_NODE_ENABLE_IT_TO_T: 1786 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING); 1787 break; 1788 1789 case OCS_NODE_ENABLE_T_TO_I: 1790 case OCS_NODE_ENABLE_I_TO_I: 1791 case OCS_NODE_ENABLE_IT_TO_I: 1792 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING); 1793 break; 1794 1795 case OCS_NODE_ENABLE_T_TO_IT: 1796 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING); 1797 break; 1798 1799 case OCS_NODE_ENABLE_I_TO_IT: 1800 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING); 1801 break; 1802 1803 case OCS_NODE_ENABLE_IT_TO_IT: 1804 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING); 1805 rc_2 = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING); 1806 break; 1807 1808 default: 1809 rc = OCS_SCSI_CALL_COMPLETE; 1810 break; 1811 1812 } 1813 1814 if ((rc == OCS_SCSI_CALL_COMPLETE) && (rc_2 == OCS_SCSI_CALL_COMPLETE)) { 1815 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL); 1816 } 1817 1818 break; 1819 } 1820 case OCS_EVT_NODE_REFOUND: 1821 /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */ 1822 1823 /* reauthenticate with PLOGI/PRLI */ 1824 /* ocs_node_transition(node, __ocs_d_discovered, NULL); */ 1825 1826 /* reauthenticate with ADISC 1827 * sm: send ADISC */ 1828 ocs_send_adisc(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL); 1829 ocs_node_transition(node, __ocs_d_wait_adisc_rsp, NULL); 1830 break; 1831 1832 case OCS_EVT_PLOGI_RCVD: { 1833 /* sm: save sparams, set send_plogi_acc, post implicit logout 1834 * Save plogi parameters */ 1835 ocs_node_save_sparms(node, cbdata->payload->dma.virt); 1836 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI); 1837 1838 /* Restart node attach with new service parameters, and send ACC */ 1839 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); 1840 break; 1841 } 1842 1843 case OCS_EVT_FCP_CMD_RCVD: { 1844 /* most likely a stale frame (received prior to link down), if attempt 1845 * to send LOGO, will probably timeout and eat up 20s; thus, drop FCP_CMND 1846 */ 1847 node_printf(node, "FCP_CMND received, drop\n"); 1848 break; 1849 } 1850 case OCS_EVT_LOGO_RCVD: { 1851 /* I, T, I+T */ 1852 fc_header_t *hdr = cbdata->header->dma.virt; 1853 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached); 1854 /* sm: send LOGO acc */ 1855 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 1856 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL); 1857 break; 1858 } 1859 default: 1860 __ocs_d_common(__func__, ctx, evt, arg); 1861 return NULL; 1862 } 1863 1864 return NULL; 1865 } 1866 1867 /** 1868 * @ingroup device_sm 1869 * @brief Device node state machine: Wait for the ADISC response. 1870 * 1871 * <h3 class="desc">Description</h3> 1872 * Waits for the ADISC response from the remote node. 1873 * 1874 * @param ctx Remote node state machine context. 1875 * @param evt Event to process. 1876 * @param arg Per event optional argument. 1877 * 1878 * @return Returns NULL. 1879 */ 1880 1881 void * 1882 __ocs_d_wait_adisc_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) 1883 { 1884 ocs_node_cb_t *cbdata = arg; 1885 std_node_state_decl(); 1886 1887 node_sm_trace(); 1888 1889 switch(evt) { 1890 case OCS_EVT_SRRS_ELS_REQ_OK: 1891 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) { 1892 return NULL; 1893 } 1894 ocs_assert(node->els_req_cnt, NULL); 1895 node->els_req_cnt--; 1896 ocs_node_transition(node, __ocs_d_device_ready, NULL); 1897 break; 1898 1899 case OCS_EVT_SRRS_ELS_REQ_RJT: 1900 /* received an LS_RJT, in this case, send shutdown (explicit logo) 1901 * event which will unregister the node, and start over with PLOGI 1902 */ 1903 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) { 1904 return NULL; 1905 } 1906 ocs_assert(node->els_req_cnt, NULL); 1907 node->els_req_cnt--; 1908 /*sm: post explicit logout */ 1909 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 1910 break; 1911 1912 case OCS_EVT_LOGO_RCVD: { 1913 /* In this case, we have the equivalent of an LS_RJT for the ADISC, 1914 * so we need to abort the ADISC, and re-login with PLOGI 1915 */ 1916 /*sm: request abort, send LOGO acc */ 1917 fc_header_t *hdr = cbdata->header->dma.virt; 1918 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached); 1919 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL); 1920 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL); 1921 break; 1922 } 1923 default: 1924 __ocs_d_common(__func__, ctx, evt, arg); 1925 return NULL; 1926 } 1927 1928 return NULL; 1929 } 1930