1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term 4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 */ 6 7 /* 8 * device_sm Node State Machine: Remote Device States 9 */ 10 11 #include "efc.h" 12 #include "efc_device.h" 13 #include "efc_fabric.h" 14 15 void 16 efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id) 17 { 18 int rc = EFC_SCSI_CALL_COMPLETE; 19 struct efc *efc = node->efc; 20 21 node->ls_acc_oxid = ox_id; 22 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI; 23 24 /* 25 * Wait for backend session registration 26 * to complete before sending PRLI resp 27 */ 28 29 if (node->init) { 30 efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n", 31 node->display_name, node->wwpn, node->wwnn); 32 if (node->nport->enable_tgt) 33 rc = efc->tt.scsi_new_node(efc, node); 34 } 35 36 if (rc < 0) 37 efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL); 38 39 if (rc == EFC_SCSI_CALL_COMPLETE) 40 efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL); 41 } 42 43 static void 44 __efc_d_common(const char *funcname, struct efc_sm_ctx *ctx, 45 enum efc_sm_event evt, void *arg) 46 { 47 struct efc_node *node = NULL; 48 struct efc *efc = NULL; 49 50 node = ctx->app; 51 efc = node->efc; 52 53 switch (evt) { 54 /* Handle shutdown events */ 55 case EFC_EVT_SHUTDOWN: 56 efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name, 57 funcname, efc_sm_event_name(evt)); 58 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; 59 efc_node_transition(node, __efc_d_initiate_shutdown, NULL); 60 break; 61 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: 62 efc_log_debug(efc, "[%s] %-20s %-20s\n", 63 node->display_name, funcname, 64 efc_sm_event_name(evt)); 65 node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO; 66 efc_node_transition(node, __efc_d_initiate_shutdown, NULL); 67 break; 68 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: 69 efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name, 70 funcname, efc_sm_event_name(evt)); 71 node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO; 72 efc_node_transition(node, __efc_d_initiate_shutdown, NULL); 73 break; 74 75 default: 76 /* call default event handler common to all nodes */ 77 __efc_node_common(funcname, ctx, evt, arg); 78 } 79 } 80 81 static void 82 __efc_d_wait_del_node(struct efc_sm_ctx *ctx, 83 enum efc_sm_event evt, void *arg) 84 { 85 struct efc_node *node = ctx->app; 86 87 efc_node_evt_set(ctx, evt, __func__); 88 89 /* 90 * State is entered when a node sends a delete initiator/target call 91 * to the target-server/initiator-client and needs to wait for that 92 * work to complete. 93 */ 94 node_sm_trace(); 95 96 switch (evt) { 97 case EFC_EVT_ENTER: 98 efc_node_hold_frames(node); 99 fallthrough; 100 101 case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: 102 case EFC_EVT_ALL_CHILD_NODES_FREE: 103 /* These are expected events. */ 104 break; 105 106 case EFC_EVT_NODE_DEL_INI_COMPLETE: 107 case EFC_EVT_NODE_DEL_TGT_COMPLETE: 108 /* 109 * node has either been detached or is in the process 110 * of being detached, 111 * call common node's initiate cleanup function 112 */ 113 efc_node_initiate_cleanup(node); 114 break; 115 116 case EFC_EVT_EXIT: 117 efc_node_accept_frames(node); 118 break; 119 120 case EFC_EVT_SRRS_ELS_REQ_FAIL: 121 /* Can happen as ELS IO IO's complete */ 122 WARN_ON(!node->els_req_cnt); 123 node->els_req_cnt--; 124 break; 125 126 /* ignore shutdown events as we're already in shutdown path */ 127 case EFC_EVT_SHUTDOWN: 128 /* have default shutdown event take precedence */ 129 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; 130 fallthrough; 131 132 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: 133 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: 134 node_printf(node, "%s received\n", efc_sm_event_name(evt)); 135 break; 136 case EFC_EVT_DOMAIN_ATTACH_OK: 137 /* don't care about domain_attach_ok */ 138 break; 139 default: 140 __efc_d_common(__func__, ctx, evt, arg); 141 } 142 } 143 144 static void 145 __efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx, 146 enum efc_sm_event evt, void *arg) 147 { 148 struct efc_node *node = ctx->app; 149 150 efc_node_evt_set(ctx, evt, __func__); 151 152 node_sm_trace(); 153 154 switch (evt) { 155 case EFC_EVT_ENTER: 156 efc_node_hold_frames(node); 157 fallthrough; 158 159 case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: 160 case EFC_EVT_ALL_CHILD_NODES_FREE: 161 /* These are expected events. */ 162 break; 163 164 case EFC_EVT_NODE_DEL_INI_COMPLETE: 165 case EFC_EVT_NODE_DEL_TGT_COMPLETE: 166 efc_node_transition(node, __efc_d_wait_del_node, NULL); 167 break; 168 169 case EFC_EVT_EXIT: 170 efc_node_accept_frames(node); 171 break; 172 173 case EFC_EVT_SRRS_ELS_REQ_FAIL: 174 /* Can happen as ELS IO IO's complete */ 175 WARN_ON(!node->els_req_cnt); 176 node->els_req_cnt--; 177 break; 178 179 /* ignore shutdown events as we're already in shutdown path */ 180 case EFC_EVT_SHUTDOWN: 181 /* have default shutdown event take precedence */ 182 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; 183 fallthrough; 184 185 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: 186 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: 187 node_printf(node, "%s received\n", efc_sm_event_name(evt)); 188 break; 189 case EFC_EVT_DOMAIN_ATTACH_OK: 190 /* don't care about domain_attach_ok */ 191 break; 192 default: 193 __efc_d_common(__func__, ctx, evt, arg); 194 } 195 } 196 197 void 198 __efc_d_initiate_shutdown(struct efc_sm_ctx *ctx, 199 enum efc_sm_event evt, void *arg) 200 { 201 struct efc_node *node = ctx->app; 202 struct efc *efc = node->efc; 203 204 efc_node_evt_set(ctx, evt, __func__); 205 206 node_sm_trace(); 207 208 switch (evt) { 209 case EFC_EVT_ENTER: { 210 int rc = EFC_SCSI_CALL_COMPLETE; 211 212 /* assume no wait needed */ 213 node->els_io_enabled = false; 214 215 /* make necessary delete upcall(s) */ 216 if (node->init && !node->targ) { 217 efc_log_info(node->efc, 218 "[%s] delete (initiator) WWPN %s WWNN %s\n", 219 node->display_name, 220 node->wwpn, node->wwnn); 221 efc_node_transition(node, 222 __efc_d_wait_del_node, 223 NULL); 224 if (node->nport->enable_tgt) 225 rc = efc->tt.scsi_del_node(efc, node, 226 EFC_SCSI_INITIATOR_DELETED); 227 228 if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0) 229 efc_node_post_event(node, 230 EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); 231 232 } else if (node->targ && !node->init) { 233 efc_log_info(node->efc, 234 "[%s] delete (target) WWPN %s WWNN %s\n", 235 node->display_name, 236 node->wwpn, node->wwnn); 237 efc_node_transition(node, 238 __efc_d_wait_del_node, 239 NULL); 240 if (node->nport->enable_ini) 241 rc = efc->tt.scsi_del_node(efc, node, 242 EFC_SCSI_TARGET_DELETED); 243 244 if (rc == EFC_SCSI_CALL_COMPLETE) 245 efc_node_post_event(node, 246 EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); 247 248 } else if (node->init && node->targ) { 249 efc_log_info(node->efc, 250 "[%s] delete (I+T) WWPN %s WWNN %s\n", 251 node->display_name, node->wwpn, node->wwnn); 252 efc_node_transition(node, __efc_d_wait_del_ini_tgt, 253 NULL); 254 if (node->nport->enable_tgt) 255 rc = efc->tt.scsi_del_node(efc, node, 256 EFC_SCSI_INITIATOR_DELETED); 257 258 if (rc == EFC_SCSI_CALL_COMPLETE) 259 efc_node_post_event(node, 260 EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); 261 /* assume no wait needed */ 262 rc = EFC_SCSI_CALL_COMPLETE; 263 if (node->nport->enable_ini) 264 rc = efc->tt.scsi_del_node(efc, node, 265 EFC_SCSI_TARGET_DELETED); 266 267 if (rc == EFC_SCSI_CALL_COMPLETE) 268 efc_node_post_event(node, 269 EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); 270 } 271 272 /* we've initiated the upcalls as needed, now kick off the node 273 * detach to precipitate the aborting of outstanding exchanges 274 * associated with said node 275 * 276 * Beware: if we've made upcall(s), we've already transitioned 277 * to a new state by the time we execute this. 278 * consider doing this before the upcalls? 279 */ 280 if (node->attached) { 281 /* issue hw node free; don't care if succeeds right 282 * away or sometime later, will check node->attached 283 * later in shutdown process 284 */ 285 rc = efc_cmd_node_detach(efc, &node->rnode); 286 if (rc < 0) 287 node_printf(node, 288 "Failed freeing HW node, rc=%d\n", 289 rc); 290 } 291 292 /* if neither initiator nor target, proceed to cleanup */ 293 if (!node->init && !node->targ) { 294 /* 295 * node has either been detached or is in 296 * the process of being detached, 297 * call common node's initiate cleanup function 298 */ 299 efc_node_initiate_cleanup(node); 300 } 301 break; 302 } 303 case EFC_EVT_ALL_CHILD_NODES_FREE: 304 /* Ignore, this can happen if an ELS is 305 * aborted while in a delay/retry state 306 */ 307 break; 308 default: 309 __efc_d_common(__func__, ctx, evt, arg); 310 } 311 } 312 313 void 314 __efc_d_wait_loop(struct efc_sm_ctx *ctx, 315 enum efc_sm_event evt, void *arg) 316 { 317 struct efc_node *node = ctx->app; 318 319 efc_node_evt_set(ctx, evt, __func__); 320 321 node_sm_trace(); 322 323 switch (evt) { 324 case EFC_EVT_ENTER: 325 efc_node_hold_frames(node); 326 break; 327 328 case EFC_EVT_EXIT: 329 efc_node_accept_frames(node); 330 break; 331 332 case EFC_EVT_DOMAIN_ATTACH_OK: { 333 /* send PLOGI automatically if initiator */ 334 efc_node_init_device(node, true); 335 break; 336 } 337 default: 338 __efc_d_common(__func__, ctx, evt, arg); 339 } 340 } 341 342 void 343 efc_send_ls_acc_after_attach(struct efc_node *node, 344 struct fc_frame_header *hdr, 345 enum efc_node_send_ls_acc ls) 346 { 347 u16 ox_id = be16_to_cpu(hdr->fh_ox_id); 348 349 /* Save the OX_ID for sending LS_ACC sometime later */ 350 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE); 351 352 node->ls_acc_oxid = ox_id; 353 node->send_ls_acc = ls; 354 node->ls_acc_did = ntoh24(hdr->fh_d_id); 355 } 356 357 void 358 efc_process_prli_payload(struct efc_node *node, void *prli) 359 { 360 struct { 361 struct fc_els_prli prli; 362 struct fc_els_spp sp; 363 } *pp; 364 365 pp = prli; 366 node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0; 367 node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0; 368 } 369 370 void 371 __efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx, 372 enum efc_sm_event evt, void *arg) 373 { 374 struct efc_node *node = ctx->app; 375 376 efc_node_evt_set(ctx, evt, __func__); 377 378 node_sm_trace(); 379 380 switch (evt) { 381 case EFC_EVT_ENTER: 382 efc_node_hold_frames(node); 383 break; 384 385 case EFC_EVT_EXIT: 386 efc_node_accept_frames(node); 387 break; 388 389 case EFC_EVT_SRRS_ELS_CMPL_FAIL: 390 WARN_ON(!node->els_cmpl_cnt); 391 node->els_cmpl_cnt--; 392 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; 393 efc_node_transition(node, __efc_d_initiate_shutdown, NULL); 394 break; 395 396 case EFC_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */ 397 WARN_ON(!node->els_cmpl_cnt); 398 node->els_cmpl_cnt--; 399 efc_node_transition(node, __efc_d_port_logged_in, NULL); 400 break; 401 402 default: 403 __efc_d_common(__func__, ctx, evt, arg); 404 } 405 } 406 407 void 408 __efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx, 409 enum efc_sm_event evt, void *arg) 410 { 411 struct efc_node *node = ctx->app; 412 413 efc_node_evt_set(ctx, evt, __func__); 414 415 node_sm_trace(); 416 417 switch (evt) { 418 case EFC_EVT_ENTER: 419 efc_node_hold_frames(node); 420 break; 421 422 case EFC_EVT_EXIT: 423 efc_node_accept_frames(node); 424 break; 425 426 case EFC_EVT_SRRS_ELS_REQ_OK: 427 case EFC_EVT_SRRS_ELS_REQ_RJT: 428 case EFC_EVT_SRRS_ELS_REQ_FAIL: 429 /* LOGO response received, sent shutdown */ 430 if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO, 431 __efc_d_common, __func__)) 432 return; 433 434 WARN_ON(!node->els_req_cnt); 435 node->els_req_cnt--; 436 node_printf(node, 437 "LOGO sent (evt=%s), shutdown node\n", 438 efc_sm_event_name(evt)); 439 /* sm: / post explicit logout */ 440 efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, 441 NULL); 442 break; 443 444 default: 445 __efc_d_common(__func__, ctx, evt, arg); 446 } 447 } 448 449 void 450 efc_node_init_device(struct efc_node *node, bool send_plogi) 451 { 452 node->send_plogi = send_plogi; 453 if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) && 454 (node->rnode.fc_id != FC_FID_DOM_MGR)) { 455 node->nodedb_state = __efc_d_init; 456 efc_node_transition(node, __efc_node_paused, NULL); 457 } else { 458 efc_node_transition(node, __efc_d_init, NULL); 459 } 460 } 461 462 static void 463 efc_d_check_plogi_topology(struct efc_node *node, u32 d_id) 464 { 465 switch (node->nport->topology) { 466 case EFC_NPORT_TOPO_P2P: 467 /* we're not attached and nport is p2p, 468 * need to attach 469 */ 470 efc_domain_attach(node->nport->domain, d_id); 471 efc_node_transition(node, __efc_d_wait_domain_attach, NULL); 472 break; 473 case EFC_NPORT_TOPO_FABRIC: 474 /* we're not attached and nport is fabric, domain 475 * attach should have already been requested as part 476 * of the fabric state machine, wait for it 477 */ 478 efc_node_transition(node, __efc_d_wait_domain_attach, NULL); 479 break; 480 case EFC_NPORT_TOPO_UNKNOWN: 481 /* Two possibilities: 482 * 1. received a PLOGI before our FLOGI has completed 483 * (possible since completion comes in on another 484 * CQ), thus we don't know what we're connected to 485 * yet; transition to a state to wait for the 486 * fabric node to tell us; 487 * 2. PLOGI received before link went down and we 488 * haven't performed domain attach yet. 489 * Note: we cannot distinguish between 1. and 2. 490 * so have to assume PLOGI 491 * was received after link back up. 492 */ 493 node_printf(node, "received PLOGI, unknown topology did=0x%x\n", 494 d_id); 495 efc_node_transition(node, __efc_d_wait_topology_notify, NULL); 496 break; 497 default: 498 node_printf(node, "received PLOGI, unexpected topology %d\n", 499 node->nport->topology); 500 } 501 } 502 503 void 504 __efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) 505 { 506 struct efc_node_cb *cbdata = arg; 507 struct efc_node *node = ctx->app; 508 509 efc_node_evt_set(ctx, evt, __func__); 510 511 node_sm_trace(); 512 513 /* 514 * This state is entered when a node is instantiated, 515 * either having been discovered from a name services query, 516 * or having received a PLOGI/FLOGI. 517 */ 518 switch (evt) { 519 case EFC_EVT_ENTER: 520 if (!node->send_plogi) 521 break; 522 /* only send if we have initiator capability, 523 * and domain is attached 524 */ 525 if (node->nport->enable_ini && 526 node->nport->domain->attached) { 527 efc_send_plogi(node); 528 529 efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL); 530 } else { 531 node_printf(node, "not sending plogi nport.ini=%d,", 532 node->nport->enable_ini); 533 node_printf(node, "domain attached=%d\n", 534 node->nport->domain->attached); 535 } 536 break; 537 case EFC_EVT_PLOGI_RCVD: { 538 /* T, or I+T */ 539 struct fc_frame_header *hdr = cbdata->header->dma.virt; 540 int rc; 541 542 efc_node_save_sparms(node, cbdata->payload->dma.virt); 543 efc_send_ls_acc_after_attach(node, 544 cbdata->header->dma.virt, 545 EFC_NODE_SEND_LS_ACC_PLOGI); 546 547 /* domain not attached; several possibilities: */ 548 if (!node->nport->domain->attached) { 549 efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id)); 550 break; 551 } 552 553 /* domain already attached */ 554 rc = efc_node_attach(node); 555 efc_node_transition(node, __efc_d_wait_node_attach, NULL); 556 if (rc < 0) 557 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); 558 559 break; 560 } 561 562 case EFC_EVT_FDISC_RCVD: { 563 __efc_d_common(__func__, ctx, evt, arg); 564 break; 565 } 566 567 case EFC_EVT_FLOGI_RCVD: { 568 struct fc_frame_header *hdr = cbdata->header->dma.virt; 569 u32 d_id = ntoh24(hdr->fh_d_id); 570 571 /* sm: / save sparams, send FLOGI acc */ 572 memcpy(node->nport->domain->flogi_service_params, 573 cbdata->payload->dma.virt, 574 sizeof(struct fc_els_flogi)); 575 576 /* send FC LS_ACC response, override s_id */ 577 efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P); 578 579 efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id); 580 581 if (efc_p2p_setup(node->nport)) { 582 node_printf(node, "p2p failed, shutting down node\n"); 583 efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); 584 break; 585 } 586 587 efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL); 588 break; 589 } 590 591 case EFC_EVT_LOGO_RCVD: { 592 struct fc_frame_header *hdr = cbdata->header->dma.virt; 593 594 if (!node->nport->domain->attached) { 595 /* most likely a frame left over from before a link 596 * down; drop and 597 * shut node down w/ "explicit logout" so pending 598 * frames are processed 599 */ 600 node_printf(node, "%s domain not attached, dropping\n", 601 efc_sm_event_name(evt)); 602 efc_node_post_event(node, 603 EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 604 break; 605 } 606 607 efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); 608 efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); 609 break; 610 } 611 612 case EFC_EVT_PRLI_RCVD: 613 case EFC_EVT_PRLO_RCVD: 614 case EFC_EVT_PDISC_RCVD: 615 case EFC_EVT_ADISC_RCVD: 616 case EFC_EVT_RSCN_RCVD: { 617 struct fc_frame_header *hdr = cbdata->header->dma.virt; 618 619 if (!node->nport->domain->attached) { 620 /* most likely a frame left over from before a link 621 * down; drop and shut node down w/ "explicit logout" 622 * so pending frames are processed 623 */ 624 node_printf(node, "%s domain not attached, dropping\n", 625 efc_sm_event_name(evt)); 626 627 efc_node_post_event(node, 628 EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, 629 NULL); 630 break; 631 } 632 node_printf(node, "%s received, sending reject\n", 633 efc_sm_event_name(evt)); 634 635 efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), 636 ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0); 637 638 break; 639 } 640 641 case EFC_EVT_FCP_CMD_RCVD: { 642 /* note: problem, we're now expecting an ELS REQ completion 643 * from both the LOGO and PLOGI 644 */ 645 if (!node->nport->domain->attached) { 646 /* most likely a frame left over from before a 647 * link down; drop and 648 * shut node down w/ "explicit logout" so pending 649 * frames are processed 650 */ 651 node_printf(node, "%s domain not attached, dropping\n", 652 efc_sm_event_name(evt)); 653 efc_node_post_event(node, 654 EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, 655 NULL); 656 break; 657 } 658 659 /* Send LOGO */ 660 node_printf(node, "FCP_CMND received, send LOGO\n"); 661 if (efc_send_logo(node)) { 662 /* 663 * failed to send LOGO, go ahead and cleanup node 664 * anyways 665 */ 666 node_printf(node, "Failed to send LOGO\n"); 667 efc_node_post_event(node, 668 EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, 669 NULL); 670 } else { 671 /* sent LOGO, wait for response */ 672 efc_node_transition(node, 673 __efc_d_wait_logo_rsp, NULL); 674 } 675 break; 676 } 677 case EFC_EVT_DOMAIN_ATTACH_OK: 678 /* don't care about domain_attach_ok */ 679 break; 680 681 default: 682 __efc_d_common(__func__, ctx, evt, arg); 683 } 684 } 685 686 void 687 __efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx, 688 enum efc_sm_event evt, void *arg) 689 { 690 int rc; 691 struct efc_node_cb *cbdata = arg; 692 struct efc_node *node = ctx->app; 693 694 efc_node_evt_set(ctx, evt, __func__); 695 696 node_sm_trace(); 697 698 switch (evt) { 699 case EFC_EVT_PLOGI_RCVD: { 700 /* T, or I+T */ 701 /* received PLOGI with svc parms, go ahead and attach node 702 * when PLOGI that was sent ultimately completes, it'll be a 703 * no-op 704 * 705 * If there is an outstanding PLOGI sent, can we set a flag 706 * to indicate that we don't want to retry it if it times out? 707 */ 708 efc_node_save_sparms(node, cbdata->payload->dma.virt); 709 efc_send_ls_acc_after_attach(node, 710 cbdata->header->dma.virt, 711 EFC_NODE_SEND_LS_ACC_PLOGI); 712 /* sm: domain->attached / efc_node_attach */ 713 rc = efc_node_attach(node); 714 efc_node_transition(node, __efc_d_wait_node_attach, NULL); 715 if (rc < 0) 716 efc_node_post_event(node, 717 EFC_EVT_NODE_ATTACH_FAIL, NULL); 718 719 break; 720 } 721 722 case EFC_EVT_PRLI_RCVD: 723 /* I, or I+T */ 724 /* sent PLOGI and before completion was seen, received the 725 * PRLI from the remote node (WCQEs and RCQEs come in on 726 * different queues and order of processing cannot be assumed) 727 * Save OXID so PRLI can be sent after the attach and continue 728 * to wait for PLOGI response 729 */ 730 efc_process_prli_payload(node, cbdata->payload->dma.virt); 731 efc_send_ls_acc_after_attach(node, 732 cbdata->header->dma.virt, 733 EFC_NODE_SEND_LS_ACC_PRLI); 734 efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli, 735 NULL); 736 break; 737 738 case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */ 739 case EFC_EVT_PRLO_RCVD: 740 case EFC_EVT_PDISC_RCVD: 741 case EFC_EVT_FDISC_RCVD: 742 case EFC_EVT_ADISC_RCVD: 743 case EFC_EVT_RSCN_RCVD: 744 case EFC_EVT_SCR_RCVD: { 745 struct fc_frame_header *hdr = cbdata->header->dma.virt; 746 747 node_printf(node, "%s received, sending reject\n", 748 efc_sm_event_name(evt)); 749 750 efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), 751 ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0); 752 753 break; 754 } 755 756 case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ 757 /* Completion from PLOGI sent */ 758 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, 759 __efc_d_common, __func__)) 760 return; 761 762 WARN_ON(!node->els_req_cnt); 763 node->els_req_cnt--; 764 /* sm: / save sparams, efc_node_attach */ 765 efc_node_save_sparms(node, cbdata->els_rsp.virt); 766 rc = efc_node_attach(node); 767 efc_node_transition(node, __efc_d_wait_node_attach, NULL); 768 if (rc < 0) 769 efc_node_post_event(node, 770 EFC_EVT_NODE_ATTACH_FAIL, NULL); 771 772 break; 773 774 case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ 775 /* PLOGI failed, shutdown the node */ 776 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, 777 __efc_d_common, __func__)) 778 return; 779 780 WARN_ON(!node->els_req_cnt); 781 node->els_req_cnt--; 782 efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); 783 break; 784 785 case EFC_EVT_SRRS_ELS_REQ_RJT: 786 /* Our PLOGI was rejected, this is ok in some cases */ 787 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, 788 __efc_d_common, __func__)) 789 return; 790 791 WARN_ON(!node->els_req_cnt); 792 node->els_req_cnt--; 793 break; 794 795 case EFC_EVT_FCP_CMD_RCVD: { 796 /* not logged in yet and outstanding PLOGI so don't send LOGO, 797 * just drop 798 */ 799 node_printf(node, "FCP_CMND received, drop\n"); 800 break; 801 } 802 803 default: 804 __efc_d_common(__func__, ctx, evt, arg); 805 } 806 } 807 808 void 809 __efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx, 810 enum efc_sm_event evt, void *arg) 811 { 812 int rc; 813 struct efc_node_cb *cbdata = arg; 814 struct efc_node *node = ctx->app; 815 816 efc_node_evt_set(ctx, evt, __func__); 817 818 node_sm_trace(); 819 820 switch (evt) { 821 case EFC_EVT_ENTER: 822 /* 823 * Since we've received a PRLI, we have a port login and will 824 * just need to wait for the PLOGI response to do the node 825 * attach and then we can send the LS_ACC for the PRLI. If, 826 * during this time, we receive FCP_CMNDs (which is possible 827 * since we've already sent a PRLI and our peer may have 828 * accepted). At this time, we are not waiting on any other 829 * unsolicited frames to continue with the login process. Thus, 830 * it will not hurt to hold frames here. 831 */ 832 efc_node_hold_frames(node); 833 break; 834 835 case EFC_EVT_EXIT: 836 efc_node_accept_frames(node); 837 break; 838 839 case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ 840 /* Completion from PLOGI sent */ 841 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, 842 __efc_d_common, __func__)) 843 return; 844 845 WARN_ON(!node->els_req_cnt); 846 node->els_req_cnt--; 847 /* sm: / save sparams, efc_node_attach */ 848 efc_node_save_sparms(node, cbdata->els_rsp.virt); 849 rc = efc_node_attach(node); 850 efc_node_transition(node, __efc_d_wait_node_attach, NULL); 851 if (rc < 0) 852 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, 853 NULL); 854 855 break; 856 857 case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ 858 case EFC_EVT_SRRS_ELS_REQ_RJT: 859 /* PLOGI failed, shutdown the node */ 860 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, 861 __efc_d_common, __func__)) 862 return; 863 864 WARN_ON(!node->els_req_cnt); 865 node->els_req_cnt--; 866 efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); 867 break; 868 869 default: 870 __efc_d_common(__func__, ctx, evt, arg); 871 } 872 } 873 874 void 875 __efc_d_wait_domain_attach(struct efc_sm_ctx *ctx, 876 enum efc_sm_event evt, void *arg) 877 { 878 int rc; 879 struct efc_node *node = ctx->app; 880 881 efc_node_evt_set(ctx, evt, __func__); 882 883 node_sm_trace(); 884 885 switch (evt) { 886 case EFC_EVT_ENTER: 887 efc_node_hold_frames(node); 888 break; 889 890 case EFC_EVT_EXIT: 891 efc_node_accept_frames(node); 892 break; 893 894 case EFC_EVT_DOMAIN_ATTACH_OK: 895 WARN_ON(!node->nport->domain->attached); 896 /* sm: / efc_node_attach */ 897 rc = efc_node_attach(node); 898 efc_node_transition(node, __efc_d_wait_node_attach, NULL); 899 if (rc < 0) 900 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, 901 NULL); 902 903 break; 904 905 default: 906 __efc_d_common(__func__, ctx, evt, arg); 907 } 908 } 909 910 void 911 __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx, 912 enum efc_sm_event evt, void *arg) 913 { 914 int rc; 915 struct efc_node *node = ctx->app; 916 917 efc_node_evt_set(ctx, evt, __func__); 918 919 node_sm_trace(); 920 921 switch (evt) { 922 case EFC_EVT_ENTER: 923 efc_node_hold_frames(node); 924 break; 925 926 case EFC_EVT_EXIT: 927 efc_node_accept_frames(node); 928 break; 929 930 case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { 931 enum efc_nport_topology topology = 932 (enum efc_nport_topology)arg; 933 934 WARN_ON(node->nport->domain->attached); 935 936 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); 937 938 node_printf(node, "topology notification, topology=%d\n", 939 topology); 940 941 /* At the time the PLOGI was received, the topology was unknown, 942 * so we didn't know which node would perform the domain attach: 943 * 1. The node from which the PLOGI was sent (p2p) or 944 * 2. The node to which the FLOGI was sent (fabric). 945 */ 946 if (topology == EFC_NPORT_TOPO_P2P) { 947 /* if this is p2p, need to attach to the domain using 948 * the d_id from the PLOGI received 949 */ 950 efc_domain_attach(node->nport->domain, 951 node->ls_acc_did); 952 } 953 /* else, if this is fabric, the domain attach 954 * should be performed by the fabric node (node sending FLOGI); 955 * just wait for attach to complete 956 */ 957 958 efc_node_transition(node, __efc_d_wait_domain_attach, NULL); 959 break; 960 } 961 case EFC_EVT_DOMAIN_ATTACH_OK: 962 WARN_ON(!node->nport->domain->attached); 963 node_printf(node, "domain attach ok\n"); 964 /* sm: / efc_node_attach */ 965 rc = efc_node_attach(node); 966 efc_node_transition(node, __efc_d_wait_node_attach, NULL); 967 if (rc < 0) 968 efc_node_post_event(node, 969 EFC_EVT_NODE_ATTACH_FAIL, NULL); 970 971 break; 972 973 default: 974 __efc_d_common(__func__, ctx, evt, arg); 975 } 976 } 977 978 void 979 __efc_d_wait_node_attach(struct efc_sm_ctx *ctx, 980 enum efc_sm_event evt, void *arg) 981 { 982 struct efc_node *node = ctx->app; 983 984 efc_node_evt_set(ctx, evt, __func__); 985 986 node_sm_trace(); 987 988 switch (evt) { 989 case EFC_EVT_ENTER: 990 efc_node_hold_frames(node); 991 break; 992 993 case EFC_EVT_EXIT: 994 efc_node_accept_frames(node); 995 break; 996 997 case EFC_EVT_NODE_ATTACH_OK: 998 node->attached = true; 999 switch (node->send_ls_acc) { 1000 case EFC_NODE_SEND_LS_ACC_PLOGI: { 1001 /* sm: send_plogi_acc is set / send PLOGI acc */ 1002 /* Normal case for T, or I+T */ 1003 efc_send_plogi_acc(node, node->ls_acc_oxid); 1004 efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl, 1005 NULL); 1006 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; 1007 node->ls_acc_io = NULL; 1008 break; 1009 } 1010 case EFC_NODE_SEND_LS_ACC_PRLI: { 1011 efc_d_send_prli_rsp(node, node->ls_acc_oxid); 1012 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; 1013 node->ls_acc_io = NULL; 1014 break; 1015 } 1016 case EFC_NODE_SEND_LS_ACC_NONE: 1017 default: 1018 /* Normal case for I */ 1019 /* sm: send_plogi_acc is not set / send PLOGI acc */ 1020 efc_node_transition(node, 1021 __efc_d_port_logged_in, NULL); 1022 break; 1023 } 1024 break; 1025 1026 case EFC_EVT_NODE_ATTACH_FAIL: 1027 /* node attach failed, shutdown the node */ 1028 node->attached = false; 1029 node_printf(node, "node attach failed\n"); 1030 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; 1031 efc_node_transition(node, __efc_d_initiate_shutdown, NULL); 1032 break; 1033 1034 /* Handle shutdown events */ 1035 case EFC_EVT_SHUTDOWN: 1036 node_printf(node, "%s received\n", efc_sm_event_name(evt)); 1037 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; 1038 efc_node_transition(node, __efc_d_wait_attach_evt_shutdown, 1039 NULL); 1040 break; 1041 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: 1042 node_printf(node, "%s received\n", efc_sm_event_name(evt)); 1043 node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO; 1044 efc_node_transition(node, __efc_d_wait_attach_evt_shutdown, 1045 NULL); 1046 break; 1047 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: 1048 node_printf(node, "%s received\n", efc_sm_event_name(evt)); 1049 node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO; 1050 efc_node_transition(node, 1051 __efc_d_wait_attach_evt_shutdown, NULL); 1052 break; 1053 default: 1054 __efc_d_common(__func__, ctx, evt, arg); 1055 } 1056 } 1057 1058 void 1059 __efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx, 1060 enum efc_sm_event evt, void *arg) 1061 { 1062 struct efc_node *node = ctx->app; 1063 1064 efc_node_evt_set(ctx, evt, __func__); 1065 1066 node_sm_trace(); 1067 1068 switch (evt) { 1069 case EFC_EVT_ENTER: 1070 efc_node_hold_frames(node); 1071 break; 1072 1073 case EFC_EVT_EXIT: 1074 efc_node_accept_frames(node); 1075 break; 1076 1077 /* wait for any of these attach events and then shutdown */ 1078 case EFC_EVT_NODE_ATTACH_OK: 1079 node->attached = true; 1080 node_printf(node, "Attach evt=%s, proceed to shutdown\n", 1081 efc_sm_event_name(evt)); 1082 efc_node_transition(node, __efc_d_initiate_shutdown, NULL); 1083 break; 1084 1085 case EFC_EVT_NODE_ATTACH_FAIL: 1086 /* node attach failed, shutdown the node */ 1087 node->attached = false; 1088 node_printf(node, "Attach evt=%s, proceed to shutdown\n", 1089 efc_sm_event_name(evt)); 1090 efc_node_transition(node, __efc_d_initiate_shutdown, NULL); 1091 break; 1092 1093 /* ignore shutdown events as we're already in shutdown path */ 1094 case EFC_EVT_SHUTDOWN: 1095 /* have default shutdown event take precedence */ 1096 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; 1097 fallthrough; 1098 1099 case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: 1100 case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: 1101 node_printf(node, "%s received\n", efc_sm_event_name(evt)); 1102 break; 1103 1104 default: 1105 __efc_d_common(__func__, ctx, evt, arg); 1106 } 1107 } 1108 1109 void 1110 __efc_d_port_logged_in(struct efc_sm_ctx *ctx, 1111 enum efc_sm_event evt, void *arg) 1112 { 1113 struct efc_node_cb *cbdata = arg; 1114 struct efc_node *node = ctx->app; 1115 1116 efc_node_evt_set(ctx, evt, __func__); 1117 1118 node_sm_trace(); 1119 1120 switch (evt) { 1121 case EFC_EVT_ENTER: 1122 /* Normal case for I or I+T */ 1123 if (node->nport->enable_ini && 1124 !(node->rnode.fc_id != FC_FID_DOM_MGR)) { 1125 /* sm: if enable_ini / send PRLI */ 1126 efc_send_prli(node); 1127 /* can now expect ELS_REQ_OK/FAIL/RJT */ 1128 } 1129 break; 1130 1131 case EFC_EVT_FCP_CMD_RCVD: { 1132 break; 1133 } 1134 1135 case EFC_EVT_PRLI_RCVD: { 1136 /* Normal case for T or I+T */ 1137 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1138 struct { 1139 struct fc_els_prli prli; 1140 struct fc_els_spp sp; 1141 } *pp; 1142 1143 pp = cbdata->payload->dma.virt; 1144 if (pp->sp.spp_type != FC_TYPE_FCP) { 1145 /*Only FCP is supported*/ 1146 efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), 1147 ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0); 1148 break; 1149 } 1150 1151 efc_process_prli_payload(node, cbdata->payload->dma.virt); 1152 efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id)); 1153 break; 1154 } 1155 1156 case EFC_EVT_NODE_SESS_REG_OK: 1157 if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI) 1158 efc_send_prli_acc(node, node->ls_acc_oxid); 1159 1160 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; 1161 efc_node_transition(node, __efc_d_device_ready, NULL); 1162 break; 1163 1164 case EFC_EVT_NODE_SESS_REG_FAIL: 1165 efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB, 1166 ELS_EXPL_UNSUPR, 0); 1167 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; 1168 break; 1169 1170 case EFC_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */ 1171 /* Normal case for I or I+T */ 1172 if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, 1173 __efc_d_common, __func__)) 1174 return; 1175 1176 WARN_ON(!node->els_req_cnt); 1177 node->els_req_cnt--; 1178 /* sm: / process PRLI payload */ 1179 efc_process_prli_payload(node, cbdata->els_rsp.virt); 1180 efc_node_transition(node, __efc_d_device_ready, NULL); 1181 break; 1182 } 1183 1184 case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */ 1185 /* I, I+T, assume some link failure, shutdown node */ 1186 if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, 1187 __efc_d_common, __func__)) 1188 return; 1189 1190 WARN_ON(!node->els_req_cnt); 1191 node->els_req_cnt--; 1192 efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); 1193 break; 1194 } 1195 1196 case EFC_EVT_SRRS_ELS_REQ_RJT: { 1197 /* PRLI rejected by remote 1198 * Normal for I, I+T (connected to an I) 1199 * Node doesn't want to be a target, stay here and wait for a 1200 * PRLI from the remote node 1201 * if it really wants to connect to us as target 1202 */ 1203 if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, 1204 __efc_d_common, __func__)) 1205 return; 1206 1207 WARN_ON(!node->els_req_cnt); 1208 node->els_req_cnt--; 1209 break; 1210 } 1211 1212 case EFC_EVT_SRRS_ELS_CMPL_OK: { 1213 /* Normal T, I+T, target-server rejected the process login */ 1214 /* This would be received only in the case where we sent 1215 * LS_RJT for the PRLI, so 1216 * do nothing. (note: as T only we could shutdown the node) 1217 */ 1218 WARN_ON(!node->els_cmpl_cnt); 1219 node->els_cmpl_cnt--; 1220 break; 1221 } 1222 1223 case EFC_EVT_PLOGI_RCVD: { 1224 /*sm: / save sparams, set send_plogi_acc, 1225 *post implicit logout 1226 * Save plogi parameters 1227 */ 1228 efc_node_save_sparms(node, cbdata->payload->dma.virt); 1229 efc_send_ls_acc_after_attach(node, 1230 cbdata->header->dma.virt, 1231 EFC_NODE_SEND_LS_ACC_PLOGI); 1232 1233 /* Restart node attach with new service parameters, 1234 * and send ACC 1235 */ 1236 efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, 1237 NULL); 1238 break; 1239 } 1240 1241 case EFC_EVT_LOGO_RCVD: { 1242 /* I, T, I+T */ 1243 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1244 1245 node_printf(node, "%s received attached=%d\n", 1246 efc_sm_event_name(evt), 1247 node->attached); 1248 /* sm: / send LOGO acc */ 1249 efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); 1250 efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); 1251 break; 1252 } 1253 1254 default: 1255 __efc_d_common(__func__, ctx, evt, arg); 1256 } 1257 } 1258 1259 void 1260 __efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx, 1261 enum efc_sm_event evt, void *arg) 1262 { 1263 struct efc_node *node = ctx->app; 1264 1265 efc_node_evt_set(ctx, evt, __func__); 1266 1267 node_sm_trace(); 1268 1269 switch (evt) { 1270 case EFC_EVT_ENTER: 1271 efc_node_hold_frames(node); 1272 break; 1273 1274 case EFC_EVT_EXIT: 1275 efc_node_accept_frames(node); 1276 break; 1277 1278 case EFC_EVT_SRRS_ELS_CMPL_OK: 1279 case EFC_EVT_SRRS_ELS_CMPL_FAIL: 1280 /* sm: / post explicit logout */ 1281 WARN_ON(!node->els_cmpl_cnt); 1282 node->els_cmpl_cnt--; 1283 efc_node_post_event(node, 1284 EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); 1285 break; 1286 default: 1287 __efc_d_common(__func__, ctx, evt, arg); 1288 } 1289 } 1290 1291 void 1292 __efc_d_device_ready(struct efc_sm_ctx *ctx, 1293 enum efc_sm_event evt, void *arg) 1294 { 1295 struct efc_node_cb *cbdata = arg; 1296 struct efc_node *node = ctx->app; 1297 struct efc *efc = node->efc; 1298 1299 efc_node_evt_set(ctx, evt, __func__); 1300 1301 if (evt != EFC_EVT_FCP_CMD_RCVD) 1302 node_sm_trace(); 1303 1304 switch (evt) { 1305 case EFC_EVT_ENTER: 1306 node->fcp_enabled = true; 1307 if (node->targ) { 1308 efc_log_info(efc, 1309 "[%s] found (target) WWPN %s WWNN %s\n", 1310 node->display_name, 1311 node->wwpn, node->wwnn); 1312 if (node->nport->enable_ini) 1313 efc->tt.scsi_new_node(efc, node); 1314 } 1315 break; 1316 1317 case EFC_EVT_EXIT: 1318 node->fcp_enabled = false; 1319 break; 1320 1321 case EFC_EVT_PLOGI_RCVD: { 1322 /* sm: / save sparams, set send_plogi_acc, post implicit 1323 * logout 1324 * Save plogi parameters 1325 */ 1326 efc_node_save_sparms(node, cbdata->payload->dma.virt); 1327 efc_send_ls_acc_after_attach(node, 1328 cbdata->header->dma.virt, 1329 EFC_NODE_SEND_LS_ACC_PLOGI); 1330 1331 /* 1332 * Restart node attach with new service parameters, 1333 * and send ACC 1334 */ 1335 efc_node_post_event(node, 1336 EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); 1337 break; 1338 } 1339 1340 case EFC_EVT_PRLI_RCVD: { 1341 /* T, I+T: remote initiator is slow to get started */ 1342 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1343 struct { 1344 struct fc_els_prli prli; 1345 struct fc_els_spp sp; 1346 } *pp; 1347 1348 pp = cbdata->payload->dma.virt; 1349 if (pp->sp.spp_type != FC_TYPE_FCP) { 1350 /*Only FCP is supported*/ 1351 efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), 1352 ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0); 1353 break; 1354 } 1355 1356 efc_process_prli_payload(node, cbdata->payload->dma.virt); 1357 efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id)); 1358 break; 1359 } 1360 1361 case EFC_EVT_PRLO_RCVD: { 1362 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1363 /* sm: / send PRLO acc */ 1364 efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id)); 1365 /* need implicit logout? */ 1366 break; 1367 } 1368 1369 case EFC_EVT_LOGO_RCVD: { 1370 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1371 1372 node_printf(node, "%s received attached=%d\n", 1373 efc_sm_event_name(evt), node->attached); 1374 /* sm: / send LOGO acc */ 1375 efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); 1376 efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); 1377 break; 1378 } 1379 1380 case EFC_EVT_ADISC_RCVD: { 1381 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1382 /* sm: / send ADISC acc */ 1383 efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id)); 1384 break; 1385 } 1386 1387 case EFC_EVT_ABTS_RCVD: 1388 /* sm: / process ABTS */ 1389 efc_log_err(efc, "Unexpected event:%s\n", 1390 efc_sm_event_name(evt)); 1391 break; 1392 1393 case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: 1394 break; 1395 1396 case EFC_EVT_NODE_REFOUND: 1397 break; 1398 1399 case EFC_EVT_NODE_MISSING: 1400 if (node->nport->enable_rscn) 1401 efc_node_transition(node, __efc_d_device_gone, NULL); 1402 1403 break; 1404 1405 case EFC_EVT_SRRS_ELS_CMPL_OK: 1406 /* T, or I+T, PRLI accept completed ok */ 1407 WARN_ON(!node->els_cmpl_cnt); 1408 node->els_cmpl_cnt--; 1409 break; 1410 1411 case EFC_EVT_SRRS_ELS_CMPL_FAIL: 1412 /* T, or I+T, PRLI accept failed to complete */ 1413 WARN_ON(!node->els_cmpl_cnt); 1414 node->els_cmpl_cnt--; 1415 node_printf(node, "Failed to send PRLI LS_ACC\n"); 1416 break; 1417 1418 default: 1419 __efc_d_common(__func__, ctx, evt, arg); 1420 } 1421 } 1422 1423 void 1424 __efc_d_device_gone(struct efc_sm_ctx *ctx, 1425 enum efc_sm_event evt, void *arg) 1426 { 1427 struct efc_node_cb *cbdata = arg; 1428 struct efc_node *node = ctx->app; 1429 struct efc *efc = node->efc; 1430 1431 efc_node_evt_set(ctx, evt, __func__); 1432 1433 node_sm_trace(); 1434 1435 switch (evt) { 1436 case EFC_EVT_ENTER: { 1437 int rc = EFC_SCSI_CALL_COMPLETE; 1438 int rc_2 = EFC_SCSI_CALL_COMPLETE; 1439 static const char * const labels[] = { 1440 "none", "initiator", "target", "initiator+target" 1441 }; 1442 1443 efc_log_info(efc, "[%s] missing (%s) WWPN %s WWNN %s\n", 1444 node->display_name, 1445 labels[(node->targ << 1) | (node->init)], 1446 node->wwpn, node->wwnn); 1447 1448 switch (efc_node_get_enable(node)) { 1449 case EFC_NODE_ENABLE_T_TO_T: 1450 case EFC_NODE_ENABLE_I_TO_T: 1451 case EFC_NODE_ENABLE_IT_TO_T: 1452 rc = efc->tt.scsi_del_node(efc, node, 1453 EFC_SCSI_TARGET_MISSING); 1454 break; 1455 1456 case EFC_NODE_ENABLE_T_TO_I: 1457 case EFC_NODE_ENABLE_I_TO_I: 1458 case EFC_NODE_ENABLE_IT_TO_I: 1459 rc = efc->tt.scsi_del_node(efc, node, 1460 EFC_SCSI_INITIATOR_MISSING); 1461 break; 1462 1463 case EFC_NODE_ENABLE_T_TO_IT: 1464 rc = efc->tt.scsi_del_node(efc, node, 1465 EFC_SCSI_INITIATOR_MISSING); 1466 break; 1467 1468 case EFC_NODE_ENABLE_I_TO_IT: 1469 rc = efc->tt.scsi_del_node(efc, node, 1470 EFC_SCSI_TARGET_MISSING); 1471 break; 1472 1473 case EFC_NODE_ENABLE_IT_TO_IT: 1474 rc = efc->tt.scsi_del_node(efc, node, 1475 EFC_SCSI_INITIATOR_MISSING); 1476 rc_2 = efc->tt.scsi_del_node(efc, node, 1477 EFC_SCSI_TARGET_MISSING); 1478 break; 1479 1480 default: 1481 rc = EFC_SCSI_CALL_COMPLETE; 1482 break; 1483 } 1484 1485 if (rc == EFC_SCSI_CALL_COMPLETE && 1486 rc_2 == EFC_SCSI_CALL_COMPLETE) 1487 efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); 1488 1489 break; 1490 } 1491 case EFC_EVT_NODE_REFOUND: 1492 /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */ 1493 1494 /* reauthenticate with PLOGI/PRLI */ 1495 /* efc_node_transition(node, __efc_d_discovered, NULL); */ 1496 1497 /* reauthenticate with ADISC */ 1498 /* sm: / send ADISC */ 1499 efc_send_adisc(node); 1500 efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL); 1501 break; 1502 1503 case EFC_EVT_PLOGI_RCVD: { 1504 /* sm: / save sparams, set send_plogi_acc, post implicit 1505 * logout 1506 * Save plogi parameters 1507 */ 1508 efc_node_save_sparms(node, cbdata->payload->dma.virt); 1509 efc_send_ls_acc_after_attach(node, 1510 cbdata->header->dma.virt, 1511 EFC_NODE_SEND_LS_ACC_PLOGI); 1512 1513 /* 1514 * Restart node attach with new service parameters, and send 1515 * ACC 1516 */ 1517 efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, 1518 NULL); 1519 break; 1520 } 1521 1522 case EFC_EVT_FCP_CMD_RCVD: { 1523 /* most likely a stale frame (received prior to link down), 1524 * if attempt to send LOGO, will probably timeout and eat 1525 * up 20s; thus, drop FCP_CMND 1526 */ 1527 node_printf(node, "FCP_CMND received, drop\n"); 1528 break; 1529 } 1530 case EFC_EVT_LOGO_RCVD: { 1531 /* I, T, I+T */ 1532 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1533 1534 node_printf(node, "%s received attached=%d\n", 1535 efc_sm_event_name(evt), node->attached); 1536 /* sm: / send LOGO acc */ 1537 efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); 1538 efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); 1539 break; 1540 } 1541 default: 1542 __efc_d_common(__func__, ctx, evt, arg); 1543 } 1544 } 1545 1546 void 1547 __efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx, 1548 enum efc_sm_event evt, void *arg) 1549 { 1550 struct efc_node_cb *cbdata = arg; 1551 struct efc_node *node = ctx->app; 1552 1553 efc_node_evt_set(ctx, evt, __func__); 1554 1555 node_sm_trace(); 1556 1557 switch (evt) { 1558 case EFC_EVT_SRRS_ELS_REQ_OK: 1559 if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC, 1560 __efc_d_common, __func__)) 1561 return; 1562 1563 WARN_ON(!node->els_req_cnt); 1564 node->els_req_cnt--; 1565 efc_node_transition(node, __efc_d_device_ready, NULL); 1566 break; 1567 1568 case EFC_EVT_SRRS_ELS_REQ_RJT: 1569 /* received an LS_RJT, in this case, send shutdown 1570 * (explicit logo) event which will unregister the node, 1571 * and start over with PLOGI 1572 */ 1573 if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC, 1574 __efc_d_common, __func__)) 1575 return; 1576 1577 WARN_ON(!node->els_req_cnt); 1578 node->els_req_cnt--; 1579 /* sm: / post explicit logout */ 1580 efc_node_post_event(node, 1581 EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, 1582 NULL); 1583 break; 1584 1585 case EFC_EVT_LOGO_RCVD: { 1586 /* In this case, we have the equivalent of an LS_RJT for 1587 * the ADISC, so we need to abort the ADISC, and re-login 1588 * with PLOGI 1589 */ 1590 /* sm: / request abort, send LOGO acc */ 1591 struct fc_frame_header *hdr = cbdata->header->dma.virt; 1592 1593 node_printf(node, "%s received attached=%d\n", 1594 efc_sm_event_name(evt), node->attached); 1595 1596 efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); 1597 efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); 1598 break; 1599 } 1600 default: 1601 __efc_d_common(__func__, ctx, evt, arg); 1602 } 1603 } 1604