1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_hw.h" 32 #include "lpfc_sli.h" 33 #include "lpfc_disc.h" 34 #include "lpfc_scsi.h" 35 #include "lpfc.h" 36 #include "lpfc_logmsg.h" 37 #include "lpfc_crtn.h" 38 39 40 /* Called to verify a rcv'ed ADISC was intended for us. */ 41 static int 42 lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 43 struct lpfc_name * nn, struct lpfc_name * pn) 44 { 45 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 46 * table entry for that node. 47 */ 48 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0) 49 return 0; 50 51 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0) 52 return 0; 53 54 /* we match, return success */ 55 return 1; 56 } 57 58 int 59 lpfc_check_sparm(struct lpfc_hba * phba, 60 struct lpfc_nodelist * ndlp, struct serv_parm * sp, 61 uint32_t class) 62 { 63 volatile struct serv_parm *hsp = &phba->fc_sparam; 64 uint16_t hsp_value, ssp_value = 0; 65 66 /* 67 * The receive data field size and buffer-to-buffer receive data field 68 * size entries are 16 bits but are represented as two 8-bit fields in 69 * the driver data structure to account for rsvd bits and other control 70 * bits. Reconstruct and compare the fields as a 16-bit values before 71 * correcting the byte values. 72 */ 73 if (sp->cls1.classValid) { 74 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 75 hsp->cls1.rcvDataSizeLsb; 76 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 77 sp->cls1.rcvDataSizeLsb; 78 if (ssp_value > hsp_value) { 79 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 80 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 81 } 82 } else if (class == CLASS1) { 83 return 0; 84 } 85 86 if (sp->cls2.classValid) { 87 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 88 hsp->cls2.rcvDataSizeLsb; 89 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 90 sp->cls2.rcvDataSizeLsb; 91 if (ssp_value > hsp_value) { 92 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 93 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 94 } 95 } else if (class == CLASS2) { 96 return 0; 97 } 98 99 if (sp->cls3.classValid) { 100 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 101 hsp->cls3.rcvDataSizeLsb; 102 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 103 sp->cls3.rcvDataSizeLsb; 104 if (ssp_value > hsp_value) { 105 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 106 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 107 } 108 } else if (class == CLASS3) { 109 return 0; 110 } 111 112 /* 113 * Preserve the upper four bits of the MSB from the PLOGI response. 114 * These bits contain the Buffer-to-Buffer State Change Number 115 * from the target and need to be passed to the FW. 116 */ 117 hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; 118 ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; 119 if (ssp_value > hsp_value) { 120 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; 121 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | 122 (hsp->cmn.bbRcvSizeMsb & 0x0F); 123 } 124 125 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 126 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 127 return 1; 128 } 129 130 static void * 131 lpfc_check_elscmpl_iocb(struct lpfc_hba * phba, 132 struct lpfc_iocbq *cmdiocb, 133 struct lpfc_iocbq *rspiocb) 134 { 135 struct lpfc_dmabuf *pcmd, *prsp; 136 uint32_t *lp; 137 void *ptr = NULL; 138 IOCB_t *irsp; 139 140 irsp = &rspiocb->iocb; 141 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 142 143 /* For lpfc_els_abort, context2 could be zero'ed to delay 144 * freeing associated memory till after ABTS completes. 145 */ 146 if (pcmd) { 147 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, 148 list); 149 if (prsp) { 150 lp = (uint32_t *) prsp->virt; 151 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); 152 } 153 } else { 154 /* Force ulpStatus error since we are returning NULL ptr */ 155 if (!(irsp->ulpStatus)) { 156 irsp->ulpStatus = IOSTAT_LOCAL_REJECT; 157 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED; 158 } 159 ptr = NULL; 160 } 161 return ptr; 162 } 163 164 165 /* 166 * Free resources / clean up outstanding I/Os 167 * associated with a LPFC_NODELIST entry. This 168 * routine effectively results in a "software abort". 169 */ 170 int 171 lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 172 int send_abts) 173 { 174 struct lpfc_sli *psli; 175 struct lpfc_sli_ring *pring; 176 struct lpfc_iocbq *iocb, *next_iocb; 177 IOCB_t *icmd; 178 int found = 0; 179 180 /* Abort outstanding I/O on NPort <nlp_DID> */ 181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 182 "%d:0205 Abort outstanding I/O on NPort x%x " 183 "Data: x%x x%x x%x\n", 184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 185 ndlp->nlp_state, ndlp->nlp_rpi); 186 187 psli = &phba->sli; 188 pring = &psli->ring[LPFC_ELS_RING]; 189 190 /* First check the txq */ 191 do { 192 found = 0; 193 spin_lock_irq(phba->host->host_lock); 194 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 195 /* Check to see if iocb matches the nport we are looking 196 for */ 197 if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) { 198 found = 1; 199 /* It matches, so deque and call compl with an 200 error */ 201 list_del(&iocb->list); 202 pring->txq_cnt--; 203 if (iocb->iocb_cmpl) { 204 icmd = &iocb->iocb; 205 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 206 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 207 spin_unlock_irq(phba->host->host_lock); 208 (iocb->iocb_cmpl) (phba, iocb, iocb); 209 spin_lock_irq(phba->host->host_lock); 210 } else 211 lpfc_sli_release_iocbq(phba, iocb); 212 break; 213 } 214 } 215 spin_unlock_irq(phba->host->host_lock); 216 } while (found); 217 218 /* Everything on txcmplq will be returned by firmware 219 * with a no rpi / linkdown / abort error. For ring 0, 220 * ELS discovery, we want to get rid of it right here. 221 */ 222 /* Next check the txcmplq */ 223 do { 224 found = 0; 225 spin_lock_irq(phba->host->host_lock); 226 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 227 list) { 228 /* Check to see if iocb matches the nport we are looking 229 for */ 230 if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) { 231 found = 1; 232 /* It matches, so deque and call compl with an 233 error */ 234 list_del(&iocb->list); 235 pring->txcmplq_cnt--; 236 237 icmd = &iocb->iocb; 238 /* If the driver is completing an ELS 239 * command early, flush it out of the firmware. 240 */ 241 if (send_abts && 242 (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) && 243 (icmd->un.elsreq64.bdl.ulpIoTag32)) { 244 lpfc_sli_issue_abort_iotag32(phba, 245 pring, iocb); 246 } 247 if (iocb->iocb_cmpl) { 248 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 249 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 250 spin_unlock_irq(phba->host->host_lock); 251 (iocb->iocb_cmpl) (phba, iocb, iocb); 252 spin_lock_irq(phba->host->host_lock); 253 } else 254 lpfc_sli_release_iocbq(phba, iocb); 255 break; 256 } 257 } 258 spin_unlock_irq(phba->host->host_lock); 259 } while(found); 260 261 /* If we are delaying issuing an ELS command, cancel it */ 262 if (ndlp->nlp_flag & NLP_DELAY_TMO) 263 lpfc_cancel_retry_delay_tmo(phba, ndlp); 264 return 0; 265 } 266 267 static int 268 lpfc_rcv_plogi(struct lpfc_hba * phba, 269 struct lpfc_nodelist * ndlp, 270 struct lpfc_iocbq *cmdiocb) 271 { 272 struct lpfc_dmabuf *pcmd; 273 uint32_t *lp; 274 IOCB_t *icmd; 275 struct serv_parm *sp; 276 LPFC_MBOXQ_t *mbox; 277 struct ls_rjt stat; 278 int rc; 279 280 memset(&stat, 0, sizeof (struct ls_rjt)); 281 if (phba->hba_state <= LPFC_FLOGI) { 282 /* Before responding to PLOGI, check for pt2pt mode. 283 * If we are pt2pt, with an outstanding FLOGI, abort 284 * the FLOGI and resend it first. 285 */ 286 if (phba->fc_flag & FC_PT2PT) { 287 lpfc_els_abort_flogi(phba); 288 if (!(phba->fc_flag & FC_PT2PT_PLOGI)) { 289 /* If the other side is supposed to initiate 290 * the PLOGI anyway, just ACC it now and 291 * move on with discovery. 292 */ 293 phba->fc_edtov = FF_DEF_EDTOV; 294 phba->fc_ratov = FF_DEF_RATOV; 295 /* Start discovery - this should just do 296 CLEAR_LA */ 297 lpfc_disc_start(phba); 298 } else { 299 lpfc_initial_flogi(phba); 300 } 301 } else { 302 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 303 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 304 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, 305 ndlp); 306 return 0; 307 } 308 } 309 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 310 lp = (uint32_t *) pcmd->virt; 311 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 312 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) { 313 /* Reject this request because invalid parameters */ 314 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 315 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 316 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 317 return 0; 318 } 319 icmd = &cmdiocb->iocb; 320 321 /* PLOGI chkparm OK */ 322 lpfc_printf_log(phba, 323 KERN_INFO, 324 LOG_ELS, 325 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 326 phba->brd_no, 327 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 328 ndlp->nlp_rpi); 329 330 if ((phba->cfg_fcp_class == 2) && 331 (sp->cls2.classValid)) { 332 ndlp->nlp_fcp_info |= CLASS2; 333 } else { 334 ndlp->nlp_fcp_info |= CLASS3; 335 } 336 ndlp->nlp_class_sup = 0; 337 if (sp->cls1.classValid) 338 ndlp->nlp_class_sup |= FC_COS_CLASS1; 339 if (sp->cls2.classValid) 340 ndlp->nlp_class_sup |= FC_COS_CLASS2; 341 if (sp->cls3.classValid) 342 ndlp->nlp_class_sup |= FC_COS_CLASS3; 343 if (sp->cls4.classValid) 344 ndlp->nlp_class_sup |= FC_COS_CLASS4; 345 ndlp->nlp_maxframe = 346 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 347 348 /* no need to reg_login if we are already in one of these states */ 349 switch (ndlp->nlp_state) { 350 case NLP_STE_NPR_NODE: 351 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 352 break; 353 case NLP_STE_REG_LOGIN_ISSUE: 354 case NLP_STE_PRLI_ISSUE: 355 case NLP_STE_UNMAPPED_NODE: 356 case NLP_STE_MAPPED_NODE: 357 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0); 358 return 1; 359 } 360 361 if ((phba->fc_flag & FC_PT2PT) 362 && !(phba->fc_flag & FC_PT2PT_PLOGI)) { 363 /* rcv'ed PLOGI decides what our NPortId will be */ 364 phba->fc_myDID = icmd->un.rcvels.parmRo; 365 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 366 if (mbox == NULL) 367 goto out; 368 lpfc_config_link(phba, mbox); 369 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 370 rc = lpfc_sli_issue_mbox 371 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 372 if (rc == MBX_NOT_FINISHED) { 373 mempool_free( mbox, phba->mbox_mem_pool); 374 goto out; 375 } 376 377 lpfc_can_disctmo(phba); 378 } 379 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 380 if (mbox == NULL) 381 goto out; 382 383 if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID, 384 (uint8_t *) sp, mbox, 0)) { 385 mempool_free( mbox, phba->mbox_mem_pool); 386 goto out; 387 } 388 389 /* ACC PLOGI rsp command needs to execute first, 390 * queue this mbox command to be processed later. 391 */ 392 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 393 mbox->context2 = ndlp; 394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 395 396 /* 397 * If there is an outstanding PLOGI issued, abort it before 398 * sending ACC rsp for received PLOGI. If pending plogi 399 * is not canceled here, the plogi will be rejected by 400 * remote port and will be retried. On a configuration with 401 * single discovery thread, this will cause a huge delay in 402 * discovery. Also this will cause multiple state machines 403 * running in parallel for this node. 404 */ 405 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { 406 /* software abort outstanding PLOGI */ 407 lpfc_els_abort(phba, ndlp, 1); 408 } 409 410 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 411 return 1; 412 413 out: 414 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 415 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 416 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 417 return 0; 418 } 419 420 static int 421 lpfc_rcv_padisc(struct lpfc_hba * phba, 422 struct lpfc_nodelist * ndlp, 423 struct lpfc_iocbq *cmdiocb) 424 { 425 struct lpfc_dmabuf *pcmd; 426 struct serv_parm *sp; 427 struct lpfc_name *pnn, *ppn; 428 struct ls_rjt stat; 429 ADISC *ap; 430 IOCB_t *icmd; 431 uint32_t *lp; 432 uint32_t cmd; 433 434 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 435 lp = (uint32_t *) pcmd->virt; 436 437 cmd = *lp++; 438 if (cmd == ELS_CMD_ADISC) { 439 ap = (ADISC *) lp; 440 pnn = (struct lpfc_name *) & ap->nodeName; 441 ppn = (struct lpfc_name *) & ap->portName; 442 } else { 443 sp = (struct serv_parm *) lp; 444 pnn = (struct lpfc_name *) & sp->nodeName; 445 ppn = (struct lpfc_name *) & sp->portName; 446 } 447 448 icmd = &cmdiocb->iocb; 449 if ((icmd->ulpStatus == 0) && 450 (lpfc_check_adisc(phba, ndlp, pnn, ppn))) { 451 if (cmd == ELS_CMD_ADISC) { 452 lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp); 453 } else { 454 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, 455 NULL, 0); 456 } 457 return 1; 458 } 459 /* Reject this request because invalid parameters */ 460 stat.un.b.lsRjtRsvd0 = 0; 461 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 462 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 463 stat.un.b.vendorUnique = 0; 464 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 465 466 /* 1 sec timeout */ 467 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 468 469 spin_lock_irq(phba->host->host_lock); 470 ndlp->nlp_flag |= NLP_DELAY_TMO; 471 spin_unlock_irq(phba->host->host_lock); 472 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 473 ndlp->nlp_prev_state = ndlp->nlp_state; 474 ndlp->nlp_state = NLP_STE_NPR_NODE; 475 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 476 return 0; 477 } 478 479 static int 480 lpfc_rcv_logo(struct lpfc_hba * phba, 481 struct lpfc_nodelist * ndlp, 482 struct lpfc_iocbq *cmdiocb, 483 uint32_t els_cmd) 484 { 485 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ 486 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 487 * PLOGIs during LOGO storms from a device. 488 */ 489 ndlp->nlp_flag |= NLP_LOGO_ACC; 490 if (els_cmd == ELS_CMD_PRLO) 491 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 492 else 493 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 494 495 if (!(ndlp->nlp_type & NLP_FABRIC) || 496 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 497 /* Only try to re-login if this is NOT a Fabric Node */ 498 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 499 spin_lock_irq(phba->host->host_lock); 500 ndlp->nlp_flag |= NLP_DELAY_TMO; 501 spin_unlock_irq(phba->host->host_lock); 502 503 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 504 ndlp->nlp_prev_state = ndlp->nlp_state; 505 ndlp->nlp_state = NLP_STE_NPR_NODE; 506 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 507 } else { 508 ndlp->nlp_prev_state = ndlp->nlp_state; 509 ndlp->nlp_state = NLP_STE_UNUSED_NODE; 510 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 511 } 512 513 spin_lock_irq(phba->host->host_lock); 514 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 515 spin_unlock_irq(phba->host->host_lock); 516 /* The driver has to wait until the ACC completes before it continues 517 * processing the LOGO. The action will resume in 518 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 519 * unreg_login, the driver waits so the ACC does not get aborted. 520 */ 521 return 0; 522 } 523 524 static void 525 lpfc_rcv_prli(struct lpfc_hba * phba, 526 struct lpfc_nodelist * ndlp, 527 struct lpfc_iocbq *cmdiocb) 528 { 529 struct lpfc_dmabuf *pcmd; 530 uint32_t *lp; 531 PRLI *npr; 532 struct fc_rport *rport = ndlp->rport; 533 u32 roles; 534 535 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 536 lp = (uint32_t *) pcmd->virt; 537 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); 538 539 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 540 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 541 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 542 (npr->prliType == PRLI_FCP_TYPE)) { 543 if (npr->initiatorFunc) 544 ndlp->nlp_type |= NLP_FCP_INITIATOR; 545 if (npr->targetFunc) 546 ndlp->nlp_type |= NLP_FCP_TARGET; 547 if (npr->Retry) 548 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 549 } 550 if (rport) { 551 /* We need to update the rport role values */ 552 roles = FC_RPORT_ROLE_UNKNOWN; 553 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 554 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 555 if (ndlp->nlp_type & NLP_FCP_TARGET) 556 roles |= FC_RPORT_ROLE_FCP_TARGET; 557 fc_remote_port_rolechg(rport, roles); 558 } 559 } 560 561 static uint32_t 562 lpfc_disc_set_adisc(struct lpfc_hba * phba, 563 struct lpfc_nodelist * ndlp) 564 { 565 /* Check config parameter use-adisc or FCP-2 */ 566 if ((phba->cfg_use_adisc == 0) && 567 !(phba->fc_flag & FC_RSCN_MODE)) { 568 if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 569 return 0; 570 } 571 spin_lock_irq(phba->host->host_lock); 572 ndlp->nlp_flag |= NLP_NPR_ADISC; 573 spin_unlock_irq(phba->host->host_lock); 574 return 1; 575 } 576 577 static uint32_t 578 lpfc_disc_illegal(struct lpfc_hba * phba, 579 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 580 { 581 lpfc_printf_log(phba, 582 KERN_ERR, 583 LOG_DISCOVERY, 584 "%d:0253 Illegal State Transition: node x%x event x%x, " 585 "state x%x Data: x%x x%x\n", 586 phba->brd_no, 587 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 588 ndlp->nlp_flag); 589 return ndlp->nlp_state; 590 } 591 592 /* Start of Discovery State Machine routines */ 593 594 static uint32_t 595 lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba, 596 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 597 { 598 struct lpfc_iocbq *cmdiocb; 599 600 cmdiocb = (struct lpfc_iocbq *) arg; 601 602 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 603 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 604 ndlp->nlp_state = NLP_STE_UNUSED_NODE; 605 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 606 return ndlp->nlp_state; 607 } 608 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 609 return NLP_STE_FREED_NODE; 610 } 611 612 static uint32_t 613 lpfc_rcv_els_unused_node(struct lpfc_hba * phba, 614 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 615 { 616 lpfc_issue_els_logo(phba, ndlp, 0); 617 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 618 return ndlp->nlp_state; 619 } 620 621 static uint32_t 622 lpfc_rcv_logo_unused_node(struct lpfc_hba * phba, 623 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 624 { 625 struct lpfc_iocbq *cmdiocb; 626 627 cmdiocb = (struct lpfc_iocbq *) arg; 628 629 spin_lock_irq(phba->host->host_lock); 630 ndlp->nlp_flag |= NLP_LOGO_ACC; 631 spin_unlock_irq(phba->host->host_lock); 632 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 633 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 634 635 return ndlp->nlp_state; 636 } 637 638 static uint32_t 639 lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba, 640 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 641 { 642 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 643 return NLP_STE_FREED_NODE; 644 } 645 646 static uint32_t 647 lpfc_device_rm_unused_node(struct lpfc_hba * phba, 648 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 649 { 650 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 651 return NLP_STE_FREED_NODE; 652 } 653 654 static uint32_t 655 lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 656 void *arg, uint32_t evt) 657 { 658 struct lpfc_iocbq *cmdiocb = arg; 659 struct lpfc_dmabuf *pcmd; 660 struct serv_parm *sp; 661 uint32_t *lp; 662 struct ls_rjt stat; 663 int port_cmp; 664 665 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 666 lp = (uint32_t *) pcmd->virt; 667 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 668 669 memset(&stat, 0, sizeof (struct ls_rjt)); 670 671 /* For a PLOGI, we only accept if our portname is less 672 * than the remote portname. 673 */ 674 phba->fc_stat.elsLogiCol++; 675 port_cmp = memcmp(&phba->fc_portname, &sp->portName, 676 sizeof (struct lpfc_name)); 677 678 if (port_cmp >= 0) { 679 /* Reject this request because the remote node will accept 680 ours */ 681 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 682 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 683 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 684 } else { 685 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 686 } /* if our portname was less */ 687 688 return ndlp->nlp_state; 689 } 690 691 static uint32_t 692 lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba, 693 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 694 { 695 struct lpfc_iocbq *cmdiocb; 696 697 cmdiocb = (struct lpfc_iocbq *) arg; 698 699 /* software abort outstanding PLOGI */ 700 lpfc_els_abort(phba, ndlp, 1); 701 702 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 703 return ndlp->nlp_state; 704 } 705 706 static uint32_t 707 lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba, 708 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 709 { 710 struct lpfc_iocbq *cmdiocb; 711 712 cmdiocb = (struct lpfc_iocbq *) arg; 713 714 /* software abort outstanding PLOGI */ 715 lpfc_els_abort(phba, ndlp, 1); 716 717 if (evt == NLP_EVT_RCV_LOGO) { 718 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 719 } else { 720 lpfc_issue_els_logo(phba, ndlp, 0); 721 } 722 723 /* Put ndlp in npr list set plogi timer for 1 sec */ 724 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 725 spin_lock_irq(phba->host->host_lock); 726 ndlp->nlp_flag |= NLP_DELAY_TMO; 727 spin_unlock_irq(phba->host->host_lock); 728 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 729 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 730 ndlp->nlp_state = NLP_STE_NPR_NODE; 731 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 732 733 return ndlp->nlp_state; 734 } 735 736 static uint32_t 737 lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, 738 struct lpfc_nodelist * ndlp, void *arg, 739 uint32_t evt) 740 { 741 struct lpfc_iocbq *cmdiocb, *rspiocb; 742 struct lpfc_dmabuf *pcmd, *prsp, *mp; 743 uint32_t *lp; 744 IOCB_t *irsp; 745 struct serv_parm *sp; 746 LPFC_MBOXQ_t *mbox; 747 748 cmdiocb = (struct lpfc_iocbq *) arg; 749 rspiocb = cmdiocb->context_un.rsp_iocb; 750 751 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 752 /* Recovery from PLOGI collision logic */ 753 return ndlp->nlp_state; 754 } 755 756 irsp = &rspiocb->iocb; 757 758 if (irsp->ulpStatus) 759 goto out; 760 761 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 762 763 prsp = list_get_first(&pcmd->list, 764 struct lpfc_dmabuf, 765 list); 766 lp = (uint32_t *) prsp->virt; 767 768 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 769 if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3)) 770 goto out; 771 772 /* PLOGI chkparm OK */ 773 lpfc_printf_log(phba, 774 KERN_INFO, 775 LOG_ELS, 776 "%d:0121 PLOGI chkparm OK " 777 "Data: x%x x%x x%x x%x\n", 778 phba->brd_no, 779 ndlp->nlp_DID, ndlp->nlp_state, 780 ndlp->nlp_flag, ndlp->nlp_rpi); 781 782 if ((phba->cfg_fcp_class == 2) && 783 (sp->cls2.classValid)) { 784 ndlp->nlp_fcp_info |= CLASS2; 785 } else { 786 ndlp->nlp_fcp_info |= CLASS3; 787 } 788 ndlp->nlp_class_sup = 0; 789 if (sp->cls1.classValid) 790 ndlp->nlp_class_sup |= FC_COS_CLASS1; 791 if (sp->cls2.classValid) 792 ndlp->nlp_class_sup |= FC_COS_CLASS2; 793 if (sp->cls3.classValid) 794 ndlp->nlp_class_sup |= FC_COS_CLASS3; 795 if (sp->cls4.classValid) 796 ndlp->nlp_class_sup |= FC_COS_CLASS4; 797 ndlp->nlp_maxframe = 798 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 799 sp->cmn.bbRcvSizeLsb; 800 801 if (!(mbox = mempool_alloc(phba->mbox_mem_pool, 802 GFP_KERNEL))) 803 goto out; 804 805 lpfc_unreg_rpi(phba, ndlp); 806 if (lpfc_reg_login 807 (phba, irsp->un.elsreq64.remoteID, 808 (uint8_t *) sp, mbox, 0) == 0) { 809 switch (ndlp->nlp_DID) { 810 case NameServer_DID: 811 mbox->mbox_cmpl = 812 lpfc_mbx_cmpl_ns_reg_login; 813 break; 814 case FDMI_DID: 815 mbox->mbox_cmpl = 816 lpfc_mbx_cmpl_fdmi_reg_login; 817 break; 818 default: 819 mbox->mbox_cmpl = 820 lpfc_mbx_cmpl_reg_login; 821 } 822 mbox->context2 = ndlp; 823 if (lpfc_sli_issue_mbox(phba, mbox, 824 (MBX_NOWAIT | MBX_STOP_IOCB)) 825 != MBX_NOT_FINISHED) { 826 ndlp->nlp_state = 827 NLP_STE_REG_LOGIN_ISSUE; 828 lpfc_nlp_list(phba, ndlp, 829 NLP_REGLOGIN_LIST); 830 return ndlp->nlp_state; 831 } 832 mp = (struct lpfc_dmabuf *)mbox->context1; 833 lpfc_mbuf_free(phba, mp->virt, mp->phys); 834 kfree(mp); 835 mempool_free(mbox, phba->mbox_mem_pool); 836 } else { 837 mempool_free(mbox, phba->mbox_mem_pool); 838 } 839 840 841 out: 842 /* Free this node since the driver cannot login or has the wrong 843 sparm */ 844 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 845 return NLP_STE_FREED_NODE; 846 } 847 848 static uint32_t 849 lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, 850 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 851 { 852 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 853 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 854 return ndlp->nlp_state; 855 } 856 else { 857 /* software abort outstanding PLOGI */ 858 lpfc_els_abort(phba, ndlp, 1); 859 860 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 861 return NLP_STE_FREED_NODE; 862 } 863 } 864 865 static uint32_t 866 lpfc_device_recov_plogi_issue(struct lpfc_hba * phba, 867 struct lpfc_nodelist * ndlp, void *arg, 868 uint32_t evt) 869 { 870 /* software abort outstanding PLOGI */ 871 lpfc_els_abort(phba, ndlp, 1); 872 873 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 874 ndlp->nlp_state = NLP_STE_NPR_NODE; 875 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 876 spin_lock_irq(phba->host->host_lock); 877 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 878 spin_unlock_irq(phba->host->host_lock); 879 880 return ndlp->nlp_state; 881 } 882 883 static uint32_t 884 lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba, 885 struct lpfc_nodelist * ndlp, void *arg, 886 uint32_t evt) 887 { 888 struct lpfc_iocbq *cmdiocb; 889 890 /* software abort outstanding ADISC */ 891 lpfc_els_abort(phba, ndlp, 1); 892 893 cmdiocb = (struct lpfc_iocbq *) arg; 894 895 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 896 return ndlp->nlp_state; 897 } 898 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 899 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 900 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 901 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 902 903 return ndlp->nlp_state; 904 } 905 906 static uint32_t 907 lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba, 908 struct lpfc_nodelist * ndlp, void *arg, 909 uint32_t evt) 910 { 911 struct lpfc_iocbq *cmdiocb; 912 913 cmdiocb = (struct lpfc_iocbq *) arg; 914 915 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 916 return ndlp->nlp_state; 917 } 918 919 static uint32_t 920 lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, 921 struct lpfc_nodelist * ndlp, void *arg, 922 uint32_t evt) 923 { 924 struct lpfc_iocbq *cmdiocb; 925 926 cmdiocb = (struct lpfc_iocbq *) arg; 927 928 /* software abort outstanding ADISC */ 929 lpfc_els_abort(phba, ndlp, 0); 930 931 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 932 return ndlp->nlp_state; 933 } 934 935 static uint32_t 936 lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba, 937 struct lpfc_nodelist * ndlp, void *arg, 938 uint32_t evt) 939 { 940 struct lpfc_iocbq *cmdiocb; 941 942 cmdiocb = (struct lpfc_iocbq *) arg; 943 944 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 945 return ndlp->nlp_state; 946 } 947 948 static uint32_t 949 lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba, 950 struct lpfc_nodelist * ndlp, void *arg, 951 uint32_t evt) 952 { 953 struct lpfc_iocbq *cmdiocb; 954 955 cmdiocb = (struct lpfc_iocbq *) arg; 956 957 /* Treat like rcv logo */ 958 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); 959 return ndlp->nlp_state; 960 } 961 962 static uint32_t 963 lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba, 964 struct lpfc_nodelist * ndlp, void *arg, 965 uint32_t evt) 966 { 967 struct lpfc_iocbq *cmdiocb, *rspiocb; 968 IOCB_t *irsp; 969 ADISC *ap; 970 971 cmdiocb = (struct lpfc_iocbq *) arg; 972 rspiocb = cmdiocb->context_un.rsp_iocb; 973 974 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 975 irsp = &rspiocb->iocb; 976 977 if ((irsp->ulpStatus) || 978 (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) { 979 /* 1 sec timeout */ 980 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 981 spin_lock_irq(phba->host->host_lock); 982 ndlp->nlp_flag |= NLP_DELAY_TMO; 983 spin_unlock_irq(phba->host->host_lock); 984 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 985 986 memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name)); 987 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name)); 988 989 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 990 ndlp->nlp_state = NLP_STE_NPR_NODE; 991 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 992 lpfc_unreg_rpi(phba, ndlp); 993 return ndlp->nlp_state; 994 } 995 996 if (ndlp->nlp_type & NLP_FCP_TARGET) { 997 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 998 ndlp->nlp_state = NLP_STE_MAPPED_NODE; 999 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST); 1000 } else { 1001 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1002 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1003 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 1004 } 1005 return ndlp->nlp_state; 1006 } 1007 1008 static uint32_t 1009 lpfc_device_rm_adisc_issue(struct lpfc_hba * phba, 1010 struct lpfc_nodelist * ndlp, void *arg, 1011 uint32_t evt) 1012 { 1013 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1014 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1015 return ndlp->nlp_state; 1016 } 1017 else { 1018 /* software abort outstanding ADISC */ 1019 lpfc_els_abort(phba, ndlp, 1); 1020 1021 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1022 return NLP_STE_FREED_NODE; 1023 } 1024 } 1025 1026 static uint32_t 1027 lpfc_device_recov_adisc_issue(struct lpfc_hba * phba, 1028 struct lpfc_nodelist * ndlp, void *arg, 1029 uint32_t evt) 1030 { 1031 /* software abort outstanding ADISC */ 1032 lpfc_els_abort(phba, ndlp, 1); 1033 1034 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1035 ndlp->nlp_state = NLP_STE_NPR_NODE; 1036 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1037 spin_lock_irq(phba->host->host_lock); 1038 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1039 ndlp->nlp_flag |= NLP_NPR_ADISC; 1040 spin_unlock_irq(phba->host->host_lock); 1041 1042 return ndlp->nlp_state; 1043 } 1044 1045 static uint32_t 1046 lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba, 1047 struct lpfc_nodelist * ndlp, void *arg, 1048 uint32_t evt) 1049 { 1050 struct lpfc_iocbq *cmdiocb; 1051 1052 cmdiocb = (struct lpfc_iocbq *) arg; 1053 1054 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1055 return ndlp->nlp_state; 1056 } 1057 1058 static uint32_t 1059 lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba, 1060 struct lpfc_nodelist * ndlp, void *arg, 1061 uint32_t evt) 1062 { 1063 struct lpfc_iocbq *cmdiocb; 1064 1065 cmdiocb = (struct lpfc_iocbq *) arg; 1066 1067 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 1068 return ndlp->nlp_state; 1069 } 1070 1071 static uint32_t 1072 lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, 1073 struct lpfc_nodelist * ndlp, void *arg, 1074 uint32_t evt) 1075 { 1076 struct lpfc_iocbq *cmdiocb; 1077 1078 cmdiocb = (struct lpfc_iocbq *) arg; 1079 1080 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1081 return ndlp->nlp_state; 1082 } 1083 1084 static uint32_t 1085 lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba, 1086 struct lpfc_nodelist * ndlp, void *arg, 1087 uint32_t evt) 1088 { 1089 struct lpfc_iocbq *cmdiocb; 1090 1091 cmdiocb = (struct lpfc_iocbq *) arg; 1092 1093 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1094 return ndlp->nlp_state; 1095 } 1096 1097 static uint32_t 1098 lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba, 1099 struct lpfc_nodelist * ndlp, void *arg, 1100 uint32_t evt) 1101 { 1102 struct lpfc_iocbq *cmdiocb; 1103 1104 cmdiocb = (struct lpfc_iocbq *) arg; 1105 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 1106 return ndlp->nlp_state; 1107 } 1108 1109 static uint32_t 1110 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, 1111 struct lpfc_nodelist * ndlp, 1112 void *arg, uint32_t evt) 1113 { 1114 LPFC_MBOXQ_t *pmb; 1115 MAILBOX_t *mb; 1116 uint32_t did; 1117 1118 pmb = (LPFC_MBOXQ_t *) arg; 1119 mb = &pmb->mb; 1120 did = mb->un.varWords[1]; 1121 if (mb->mbxStatus) { 1122 /* RegLogin failed */ 1123 lpfc_printf_log(phba, 1124 KERN_ERR, 1125 LOG_DISCOVERY, 1126 "%d:0246 RegLogin failed Data: x%x x%x x%x\n", 1127 phba->brd_no, 1128 did, mb->mbxStatus, phba->hba_state); 1129 1130 /* 1131 * If RegLogin failed due to lack of HBA resources do not 1132 * retry discovery. 1133 */ 1134 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1135 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 1136 ndlp->nlp_state = NLP_STE_UNUSED_NODE; 1137 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 1138 return ndlp->nlp_state; 1139 } 1140 1141 /* Put ndlp in npr list set plogi timer for 1 sec */ 1142 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1143 spin_lock_irq(phba->host->host_lock); 1144 ndlp->nlp_flag |= NLP_DELAY_TMO; 1145 spin_unlock_irq(phba->host->host_lock); 1146 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1147 1148 lpfc_issue_els_logo(phba, ndlp, 0); 1149 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1150 ndlp->nlp_state = NLP_STE_NPR_NODE; 1151 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1152 return ndlp->nlp_state; 1153 } 1154 1155 ndlp->nlp_rpi = mb->un.varWords[0]; 1156 1157 /* Only if we are not a fabric nport do we issue PRLI */ 1158 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1159 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1160 ndlp->nlp_state = NLP_STE_PRLI_ISSUE; 1161 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST); 1162 lpfc_issue_els_prli(phba, ndlp, 0); 1163 } else { 1164 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1165 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1166 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 1167 } 1168 return ndlp->nlp_state; 1169 } 1170 1171 static uint32_t 1172 lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba, 1173 struct lpfc_nodelist * ndlp, void *arg, 1174 uint32_t evt) 1175 { 1176 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1177 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1178 return ndlp->nlp_state; 1179 } 1180 else { 1181 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1182 return NLP_STE_FREED_NODE; 1183 } 1184 } 1185 1186 static uint32_t 1187 lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba, 1188 struct lpfc_nodelist * ndlp, void *arg, 1189 uint32_t evt) 1190 { 1191 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1192 ndlp->nlp_state = NLP_STE_NPR_NODE; 1193 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1194 spin_lock_irq(phba->host->host_lock); 1195 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1196 spin_unlock_irq(phba->host->host_lock); 1197 return ndlp->nlp_state; 1198 } 1199 1200 static uint32_t 1201 lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba, 1202 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1203 { 1204 struct lpfc_iocbq *cmdiocb; 1205 1206 cmdiocb = (struct lpfc_iocbq *) arg; 1207 1208 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1209 return ndlp->nlp_state; 1210 } 1211 1212 static uint32_t 1213 lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba, 1214 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1215 { 1216 struct lpfc_iocbq *cmdiocb; 1217 1218 cmdiocb = (struct lpfc_iocbq *) arg; 1219 1220 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 1221 return ndlp->nlp_state; 1222 } 1223 1224 static uint32_t 1225 lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba, 1226 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1227 { 1228 struct lpfc_iocbq *cmdiocb; 1229 1230 cmdiocb = (struct lpfc_iocbq *) arg; 1231 1232 /* Software abort outstanding PRLI before sending acc */ 1233 lpfc_els_abort(phba, ndlp, 1); 1234 1235 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1236 return ndlp->nlp_state; 1237 } 1238 1239 static uint32_t 1240 lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba, 1241 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1242 { 1243 struct lpfc_iocbq *cmdiocb; 1244 1245 cmdiocb = (struct lpfc_iocbq *) arg; 1246 1247 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1248 return ndlp->nlp_state; 1249 } 1250 1251 /* This routine is envoked when we rcv a PRLO request from a nport 1252 * we are logged into. We should send back a PRLO rsp setting the 1253 * appropriate bits. 1254 * NEXT STATE = PRLI_ISSUE 1255 */ 1256 static uint32_t 1257 lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba, 1258 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1259 { 1260 struct lpfc_iocbq *cmdiocb; 1261 1262 cmdiocb = (struct lpfc_iocbq *) arg; 1263 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 1264 return ndlp->nlp_state; 1265 } 1266 1267 static uint32_t 1268 lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba, 1269 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1270 { 1271 struct lpfc_iocbq *cmdiocb, *rspiocb; 1272 IOCB_t *irsp; 1273 PRLI *npr; 1274 1275 cmdiocb = (struct lpfc_iocbq *) arg; 1276 rspiocb = cmdiocb->context_un.rsp_iocb; 1277 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1278 1279 irsp = &rspiocb->iocb; 1280 if (irsp->ulpStatus) { 1281 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1282 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1283 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 1284 return ndlp->nlp_state; 1285 } 1286 1287 /* Check out PRLI rsp */ 1288 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1289 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 1290 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1291 (npr->prliType == PRLI_FCP_TYPE)) { 1292 if (npr->initiatorFunc) 1293 ndlp->nlp_type |= NLP_FCP_INITIATOR; 1294 if (npr->targetFunc) 1295 ndlp->nlp_type |= NLP_FCP_TARGET; 1296 if (npr->Retry) 1297 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1298 } 1299 1300 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1301 ndlp->nlp_state = NLP_STE_MAPPED_NODE; 1302 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST); 1303 return ndlp->nlp_state; 1304 } 1305 1306 /*! lpfc_device_rm_prli_issue 1307 * 1308 * \pre 1309 * \post 1310 * \param phba 1311 * \param ndlp 1312 * \param arg 1313 * \param evt 1314 * \return uint32_t 1315 * 1316 * \b Description: 1317 * This routine is envoked when we a request to remove a nport we are in the 1318 * process of PRLIing. We should software abort outstanding prli, unreg 1319 * login, send a logout. We will change node state to UNUSED_NODE, put it 1320 * on plogi list so it can be freed when LOGO completes. 1321 * 1322 */ 1323 static uint32_t 1324 lpfc_device_rm_prli_issue(struct lpfc_hba * phba, 1325 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1326 { 1327 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1328 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1329 return ndlp->nlp_state; 1330 } 1331 else { 1332 /* software abort outstanding PLOGI */ 1333 lpfc_els_abort(phba, ndlp, 1); 1334 1335 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1336 return NLP_STE_FREED_NODE; 1337 } 1338 } 1339 1340 1341 /*! lpfc_device_recov_prli_issue 1342 * 1343 * \pre 1344 * \post 1345 * \param phba 1346 * \param ndlp 1347 * \param arg 1348 * \param evt 1349 * \return uint32_t 1350 * 1351 * \b Description: 1352 * The routine is envoked when the state of a device is unknown, like 1353 * during a link down. We should remove the nodelist entry from the 1354 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1355 * outstanding PRLI command, then free the node entry. 1356 */ 1357 static uint32_t 1358 lpfc_device_recov_prli_issue(struct lpfc_hba * phba, 1359 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1360 { 1361 /* software abort outstanding PRLI */ 1362 lpfc_els_abort(phba, ndlp, 1); 1363 1364 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1365 ndlp->nlp_state = NLP_STE_NPR_NODE; 1366 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1367 spin_lock_irq(phba->host->host_lock); 1368 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1369 spin_unlock_irq(phba->host->host_lock); 1370 return ndlp->nlp_state; 1371 } 1372 1373 static uint32_t 1374 lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba, 1375 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1376 { 1377 struct lpfc_iocbq *cmdiocb; 1378 1379 cmdiocb = (struct lpfc_iocbq *) arg; 1380 1381 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1382 return ndlp->nlp_state; 1383 } 1384 1385 static uint32_t 1386 lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba, 1387 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1388 { 1389 struct lpfc_iocbq *cmdiocb; 1390 1391 cmdiocb = (struct lpfc_iocbq *) arg; 1392 1393 lpfc_rcv_prli(phba, ndlp, cmdiocb); 1394 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 1395 return ndlp->nlp_state; 1396 } 1397 1398 static uint32_t 1399 lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba, 1400 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1401 { 1402 struct lpfc_iocbq *cmdiocb; 1403 1404 cmdiocb = (struct lpfc_iocbq *) arg; 1405 1406 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1407 return ndlp->nlp_state; 1408 } 1409 1410 static uint32_t 1411 lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba, 1412 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1413 { 1414 struct lpfc_iocbq *cmdiocb; 1415 1416 cmdiocb = (struct lpfc_iocbq *) arg; 1417 1418 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1419 return ndlp->nlp_state; 1420 } 1421 1422 static uint32_t 1423 lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba, 1424 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1425 { 1426 struct lpfc_iocbq *cmdiocb; 1427 1428 cmdiocb = (struct lpfc_iocbq *) arg; 1429 1430 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 1431 return ndlp->nlp_state; 1432 } 1433 1434 static uint32_t 1435 lpfc_device_recov_unmap_node(struct lpfc_hba * phba, 1436 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1437 { 1438 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1439 ndlp->nlp_state = NLP_STE_NPR_NODE; 1440 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1441 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1442 lpfc_disc_set_adisc(phba, ndlp); 1443 1444 return ndlp->nlp_state; 1445 } 1446 1447 static uint32_t 1448 lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba, 1449 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1450 { 1451 struct lpfc_iocbq *cmdiocb; 1452 1453 cmdiocb = (struct lpfc_iocbq *) arg; 1454 1455 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1456 return ndlp->nlp_state; 1457 } 1458 1459 static uint32_t 1460 lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba, 1461 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1462 { 1463 struct lpfc_iocbq *cmdiocb; 1464 1465 cmdiocb = (struct lpfc_iocbq *) arg; 1466 1467 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 1468 return ndlp->nlp_state; 1469 } 1470 1471 static uint32_t 1472 lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba, 1473 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1474 { 1475 struct lpfc_iocbq *cmdiocb; 1476 1477 cmdiocb = (struct lpfc_iocbq *) arg; 1478 1479 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1480 return ndlp->nlp_state; 1481 } 1482 1483 static uint32_t 1484 lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba, 1485 struct lpfc_nodelist * ndlp, void *arg, 1486 uint32_t evt) 1487 { 1488 struct lpfc_iocbq *cmdiocb; 1489 1490 cmdiocb = (struct lpfc_iocbq *) arg; 1491 1492 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1493 return ndlp->nlp_state; 1494 } 1495 1496 static uint32_t 1497 lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba, 1498 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1499 { 1500 struct lpfc_iocbq *cmdiocb; 1501 1502 cmdiocb = (struct lpfc_iocbq *) arg; 1503 1504 /* flush the target */ 1505 spin_lock_irq(phba->host->host_lock); 1506 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1507 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 1508 spin_unlock_irq(phba->host->host_lock); 1509 1510 /* Treat like rcv logo */ 1511 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); 1512 return ndlp->nlp_state; 1513 } 1514 1515 static uint32_t 1516 lpfc_device_recov_mapped_node(struct lpfc_hba * phba, 1517 struct lpfc_nodelist * ndlp, void *arg, 1518 uint32_t evt) 1519 { 1520 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1521 ndlp->nlp_state = NLP_STE_NPR_NODE; 1522 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1523 spin_lock_irq(phba->host->host_lock); 1524 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1525 spin_unlock_irq(phba->host->host_lock); 1526 lpfc_disc_set_adisc(phba, ndlp); 1527 return ndlp->nlp_state; 1528 } 1529 1530 static uint32_t 1531 lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba, 1532 struct lpfc_nodelist * ndlp, void *arg, 1533 uint32_t evt) 1534 { 1535 struct lpfc_iocbq *cmdiocb; 1536 1537 cmdiocb = (struct lpfc_iocbq *) arg; 1538 1539 /* Ignore PLOGI if we have an outstanding LOGO */ 1540 if (ndlp->nlp_flag & NLP_LOGO_SND) { 1541 return ndlp->nlp_state; 1542 } 1543 1544 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 1545 spin_lock_irq(phba->host->host_lock); 1546 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1547 spin_unlock_irq(phba->host->host_lock); 1548 return ndlp->nlp_state; 1549 } 1550 1551 /* send PLOGI immediately, move to PLOGI issue state */ 1552 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1553 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1554 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1555 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 1556 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1557 } 1558 1559 return ndlp->nlp_state; 1560 } 1561 1562 static uint32_t 1563 lpfc_rcv_prli_npr_node(struct lpfc_hba * phba, 1564 struct lpfc_nodelist * ndlp, void *arg, 1565 uint32_t evt) 1566 { 1567 struct lpfc_iocbq *cmdiocb; 1568 struct ls_rjt stat; 1569 1570 cmdiocb = (struct lpfc_iocbq *) arg; 1571 1572 memset(&stat, 0, sizeof (struct ls_rjt)); 1573 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1574 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1575 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 1576 1577 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1578 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1579 spin_lock_irq(phba->host->host_lock); 1580 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1581 spin_unlock_irq(phba->host->host_lock); 1582 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1583 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1584 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST); 1585 lpfc_issue_els_adisc(phba, ndlp, 0); 1586 } else { 1587 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1588 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1589 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 1590 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1591 } 1592 1593 } 1594 return ndlp->nlp_state; 1595 } 1596 1597 static uint32_t 1598 lpfc_rcv_logo_npr_node(struct lpfc_hba * phba, 1599 struct lpfc_nodelist * ndlp, void *arg, 1600 uint32_t evt) 1601 { 1602 struct lpfc_iocbq *cmdiocb; 1603 1604 cmdiocb = (struct lpfc_iocbq *) arg; 1605 1606 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1607 return ndlp->nlp_state; 1608 } 1609 1610 static uint32_t 1611 lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, 1612 struct lpfc_nodelist * ndlp, void *arg, 1613 uint32_t evt) 1614 { 1615 struct lpfc_iocbq *cmdiocb; 1616 1617 cmdiocb = (struct lpfc_iocbq *) arg; 1618 1619 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1620 1621 /* 1622 * Do not start discovery if discovery is about to start 1623 * or discovery in progress for this node. Starting discovery 1624 * here will affect the counting of discovery threads. 1625 */ 1626 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1627 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){ 1628 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1629 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1630 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1631 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST); 1632 lpfc_issue_els_adisc(phba, ndlp, 0); 1633 } else { 1634 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1635 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1636 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 1637 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1638 } 1639 } 1640 return ndlp->nlp_state; 1641 } 1642 1643 static uint32_t 1644 lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba, 1645 struct lpfc_nodelist * ndlp, void *arg, 1646 uint32_t evt) 1647 { 1648 struct lpfc_iocbq *cmdiocb; 1649 1650 cmdiocb = (struct lpfc_iocbq *) arg; 1651 1652 spin_lock_irq(phba->host->host_lock); 1653 ndlp->nlp_flag |= NLP_LOGO_ACC; 1654 spin_unlock_irq(phba->host->host_lock); 1655 1656 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1657 1658 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1659 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1660 spin_lock_irq(phba->host->host_lock); 1661 ndlp->nlp_flag |= NLP_DELAY_TMO; 1662 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1663 spin_unlock_irq(phba->host->host_lock); 1664 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1665 } else { 1666 spin_lock_irq(phba->host->host_lock); 1667 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1668 spin_unlock_irq(phba->host->host_lock); 1669 } 1670 return ndlp->nlp_state; 1671 } 1672 1673 static uint32_t 1674 lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, 1675 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1676 { 1677 struct lpfc_iocbq *cmdiocb, *rspiocb; 1678 IOCB_t *irsp; 1679 1680 cmdiocb = (struct lpfc_iocbq *) arg; 1681 rspiocb = cmdiocb->context_un.rsp_iocb; 1682 1683 irsp = &rspiocb->iocb; 1684 if (irsp->ulpStatus) { 1685 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1686 return NLP_STE_FREED_NODE; 1687 } 1688 return ndlp->nlp_state; 1689 } 1690 1691 static uint32_t 1692 lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, 1693 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1694 { 1695 struct lpfc_iocbq *cmdiocb, *rspiocb; 1696 IOCB_t *irsp; 1697 1698 cmdiocb = (struct lpfc_iocbq *) arg; 1699 rspiocb = cmdiocb->context_un.rsp_iocb; 1700 1701 irsp = &rspiocb->iocb; 1702 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1703 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1704 return NLP_STE_FREED_NODE; 1705 } 1706 return ndlp->nlp_state; 1707 } 1708 1709 static uint32_t 1710 lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba, 1711 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1712 { 1713 lpfc_unreg_rpi(phba, ndlp); 1714 /* This routine does nothing, just return the current state */ 1715 return ndlp->nlp_state; 1716 } 1717 1718 static uint32_t 1719 lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, 1720 struct lpfc_nodelist * ndlp, void *arg, 1721 uint32_t evt) 1722 { 1723 struct lpfc_iocbq *cmdiocb, *rspiocb; 1724 IOCB_t *irsp; 1725 1726 cmdiocb = (struct lpfc_iocbq *) arg; 1727 rspiocb = cmdiocb->context_un.rsp_iocb; 1728 1729 irsp = &rspiocb->iocb; 1730 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1731 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1732 return NLP_STE_FREED_NODE; 1733 } 1734 return ndlp->nlp_state; 1735 } 1736 1737 static uint32_t 1738 lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, 1739 struct lpfc_nodelist * ndlp, void *arg, 1740 uint32_t evt) 1741 { 1742 LPFC_MBOXQ_t *pmb; 1743 MAILBOX_t *mb; 1744 1745 pmb = (LPFC_MBOXQ_t *) arg; 1746 mb = &pmb->mb; 1747 1748 if (!mb->mbxStatus) 1749 ndlp->nlp_rpi = mb->un.varWords[0]; 1750 else { 1751 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1752 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1753 return NLP_STE_FREED_NODE; 1754 } 1755 } 1756 return ndlp->nlp_state; 1757 } 1758 1759 static uint32_t 1760 lpfc_device_rm_npr_node(struct lpfc_hba * phba, 1761 struct lpfc_nodelist * ndlp, void *arg, 1762 uint32_t evt) 1763 { 1764 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1765 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1766 return ndlp->nlp_state; 1767 } 1768 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1769 return NLP_STE_FREED_NODE; 1770 } 1771 1772 static uint32_t 1773 lpfc_device_recov_npr_node(struct lpfc_hba * phba, 1774 struct lpfc_nodelist * ndlp, void *arg, 1775 uint32_t evt) 1776 { 1777 spin_lock_irq(phba->host->host_lock); 1778 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1779 spin_unlock_irq(phba->host->host_lock); 1780 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1781 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1782 } 1783 return ndlp->nlp_state; 1784 } 1785 1786 1787 /* This next section defines the NPort Discovery State Machine */ 1788 1789 /* There are 4 different double linked lists nodelist entries can reside on. 1790 * The plogi list and adisc list are used when Link Up discovery or RSCN 1791 * processing is needed. Each list holds the nodes that we will send PLOGI 1792 * or ADISC on. These lists will keep track of what nodes will be effected 1793 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). 1794 * The unmapped_list will contain all nodes that we have successfully logged 1795 * into at the Fibre Channel level. The mapped_list will contain all nodes 1796 * that are mapped FCP targets. 1797 */ 1798 /* 1799 * The bind list is a list of undiscovered (potentially non-existent) nodes 1800 * that we have saved binding information on. This information is used when 1801 * nodes transition from the unmapped to the mapped list. 1802 */ 1803 /* For UNUSED_NODE state, the node has just been allocated . 1804 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on 1805 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list 1806 * and put on the unmapped list. For ADISC processing, the node is taken off 1807 * the ADISC list and placed on either the mapped or unmapped list (depending 1808 * on its previous state). Once on the unmapped list, a PRLI is issued and the 1809 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is 1810 * changed to UNMAPPED_NODE. If the completion indicates a mapped 1811 * node, the node is taken off the unmapped list. The binding list is checked 1812 * for a valid binding, or a binding is automatically assigned. If binding 1813 * assignment is unsuccessful, the node is left on the unmapped list. If 1814 * binding assignment is successful, the associated binding list entry (if 1815 * any) is removed, and the node is placed on the mapped list. 1816 */ 1817 /* 1818 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1819 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers 1820 * expire, all effected nodes will receive a DEVICE_RM event. 1821 */ 1822 /* 1823 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists 1824 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap 1825 * check, additional nodes may be added or removed (via DEVICE_RM) to / from 1826 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, 1827 * we will first process the ADISC list. 32 entries are processed initially and 1828 * ADISC is initited for each one. Completions / Events for each node are 1829 * funnelled thru the state machine. As each node finishes ADISC processing, it 1830 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are 1831 * waiting, and the ADISC list count is identically 0, then we are done. For 1832 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we 1833 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI 1834 * list. 32 entries are processed initially and PLOGI is initited for each one. 1835 * Completions / Events for each node are funnelled thru the state machine. As 1836 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting 1837 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is 1838 * indentically 0, then we are done. We have now completed discovery / RSCN 1839 * handling. Upon completion, ALL nodes should be on either the mapped or 1840 * unmapped lists. 1841 */ 1842 1843 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 1844 (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = { 1845 /* Action routine Event Current State */ 1846 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 1847 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 1848 lpfc_rcv_logo_unused_node, /* RCV_LOGO */ 1849 lpfc_rcv_els_unused_node, /* RCV_ADISC */ 1850 lpfc_rcv_els_unused_node, /* RCV_PDISC */ 1851 lpfc_rcv_els_unused_node, /* RCV_PRLO */ 1852 lpfc_disc_illegal, /* CMPL_PLOGI */ 1853 lpfc_disc_illegal, /* CMPL_PRLI */ 1854 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ 1855 lpfc_disc_illegal, /* CMPL_ADISC */ 1856 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1857 lpfc_device_rm_unused_node, /* DEVICE_RM */ 1858 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 1859 1860 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 1861 lpfc_rcv_els_plogi_issue, /* RCV_PRLI */ 1862 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 1863 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 1864 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 1865 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ 1866 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ 1867 lpfc_disc_illegal, /* CMPL_PRLI */ 1868 lpfc_disc_illegal, /* CMPL_LOGO */ 1869 lpfc_disc_illegal, /* CMPL_ADISC */ 1870 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1871 lpfc_device_rm_plogi_issue, /* DEVICE_RM */ 1872 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ 1873 1874 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ 1875 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ 1876 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ 1877 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ 1878 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ 1879 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ 1880 lpfc_disc_illegal, /* CMPL_PLOGI */ 1881 lpfc_disc_illegal, /* CMPL_PRLI */ 1882 lpfc_disc_illegal, /* CMPL_LOGO */ 1883 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ 1884 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1885 lpfc_device_rm_adisc_issue, /* DEVICE_RM */ 1886 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ 1887 1888 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ 1889 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ 1890 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ 1891 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ 1892 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ 1893 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ 1894 lpfc_disc_illegal, /* CMPL_PLOGI */ 1895 lpfc_disc_illegal, /* CMPL_PRLI */ 1896 lpfc_disc_illegal, /* CMPL_LOGO */ 1897 lpfc_disc_illegal, /* CMPL_ADISC */ 1898 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ 1899 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ 1900 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ 1901 1902 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ 1903 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ 1904 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ 1905 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ 1906 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ 1907 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ 1908 lpfc_disc_illegal, /* CMPL_PLOGI */ 1909 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ 1910 lpfc_disc_illegal, /* CMPL_LOGO */ 1911 lpfc_disc_illegal, /* CMPL_ADISC */ 1912 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1913 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 1914 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 1915 1916 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 1917 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 1918 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 1919 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ 1920 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ 1921 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ 1922 lpfc_disc_illegal, /* CMPL_PLOGI */ 1923 lpfc_disc_illegal, /* CMPL_PRLI */ 1924 lpfc_disc_illegal, /* CMPL_LOGO */ 1925 lpfc_disc_illegal, /* CMPL_ADISC */ 1926 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1927 lpfc_disc_illegal, /* DEVICE_RM */ 1928 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ 1929 1930 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ 1931 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ 1932 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ 1933 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ 1934 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ 1935 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ 1936 lpfc_disc_illegal, /* CMPL_PLOGI */ 1937 lpfc_disc_illegal, /* CMPL_PRLI */ 1938 lpfc_disc_illegal, /* CMPL_LOGO */ 1939 lpfc_disc_illegal, /* CMPL_ADISC */ 1940 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1941 lpfc_disc_illegal, /* DEVICE_RM */ 1942 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ 1943 1944 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ 1945 lpfc_rcv_prli_npr_node, /* RCV_PRLI */ 1946 lpfc_rcv_logo_npr_node, /* RCV_LOGO */ 1947 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ 1948 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ 1949 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ 1950 lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ 1951 lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ 1952 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ 1953 lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ 1954 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ 1955 lpfc_device_rm_npr_node, /* DEVICE_RM */ 1956 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ 1957 }; 1958 1959 int 1960 lpfc_disc_state_machine(struct lpfc_hba * phba, 1961 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1962 { 1963 uint32_t cur_state, rc; 1964 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *, 1965 uint32_t); 1966 1967 ndlp->nlp_disc_refcnt++; 1968 cur_state = ndlp->nlp_state; 1969 1970 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 1971 lpfc_printf_log(phba, 1972 KERN_INFO, 1973 LOG_DISCOVERY, 1974 "%d:0211 DSM in event x%x on NPort x%x in state %d " 1975 "Data: x%x\n", 1976 phba->brd_no, 1977 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 1978 1979 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 1980 rc = (func) (phba, ndlp, arg, evt); 1981 1982 /* DSM out state <rc> on NPort <nlp_DID> */ 1983 lpfc_printf_log(phba, 1984 KERN_INFO, 1985 LOG_DISCOVERY, 1986 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n", 1987 phba->brd_no, 1988 rc, ndlp->nlp_DID, ndlp->nlp_flag); 1989 1990 ndlp->nlp_disc_refcnt--; 1991 1992 /* Check to see if ndlp removal is deferred */ 1993 if ((ndlp->nlp_disc_refcnt == 0) 1994 && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) { 1995 spin_lock_irq(phba->host->host_lock); 1996 ndlp->nlp_flag &= ~NLP_DELAY_REMOVE; 1997 spin_unlock_irq(phba->host->host_lock); 1998 lpfc_nlp_remove(phba, ndlp); 1999 return NLP_STE_FREED_NODE; 2000 } 2001 if (rc == NLP_STE_FREED_NODE) 2002 return NLP_STE_FREED_NODE; 2003 return rc; 2004 } 2005