1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct lpfc_hba *phba = vport->phba; 97 uint32_t ha_copy; 98 99 if (vport->port_state >= LPFC_VPORT_READY || 100 phba->link_state == LPFC_LINK_DOWN || 101 phba->sli_rev > LPFC_SLI_REV3) 102 return 0; 103 104 /* Read the HBA Host Attention Register */ 105 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 106 return 1; 107 108 if (!(ha_copy & HA_LATT)) 109 return 0; 110 111 /* Pending Link Event during Discovery */ 112 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 113 "0237 Pending Link Event during " 114 "Discovery: State x%x\n", 115 phba->pport->port_state); 116 117 /* CLEAR_LA should re-enable link attention events and 118 * we should then immediately take a LATT event. The 119 * LATT processing should call lpfc_linkdown() which 120 * will cleanup any left over in-progress discovery 121 * events. 122 */ 123 set_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); 124 125 if (phba->link_state != LPFC_CLEAR_LA) 126 lpfc_issue_clear_la(phba, vport); 127 128 return 1; 129 } 130 131 static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf) 132 { 133 struct fc_els_ls_acc *rsp = buf->virt; 134 135 if (rsp && rsp->la_cmd == ELS_LS_ACC) 136 return true; 137 return false; 138 } 139 140 /** 141 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 142 * @vport: pointer to a host virtual N_Port data structure. 143 * @expect_rsp: flag indicating whether response is expected. 144 * @cmd_size: size of the ELS command. 145 * @retry: number of retries to the command when it fails. 146 * @ndlp: pointer to a node-list data structure. 147 * @did: destination identifier. 148 * @elscmd: the ELS command code. 149 * 150 * This routine is used for allocating a lpfc-IOCB data structure from 151 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 152 * passed into the routine for discovery state machine to issue an Extended 153 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 154 * and preparation routine that is used by all the discovery state machine 155 * routines and the ELS command-specific fields will be later set up by 156 * the individual discovery machine routines after calling this routine 157 * allocating and preparing a generic IOCB data structure. It fills in the 158 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 159 * payload and response payload (if expected). The reference count on the 160 * ndlp is incremented by 1 and the reference to the ndlp is put into 161 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 162 * reference for the command's callback function to access later. 163 * 164 * Return code 165 * Pointer to the newly allocated/prepared els iocb data structure 166 * NULL - when els iocb data structure allocation/preparation failed 167 **/ 168 struct lpfc_iocbq * 169 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 170 u16 cmd_size, u8 retry, 171 struct lpfc_nodelist *ndlp, u32 did, 172 u32 elscmd) 173 { 174 struct lpfc_hba *phba = vport->phba; 175 struct lpfc_iocbq *elsiocb; 176 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 177 struct ulp_bde64_le *bpl; 178 u32 timeout = 0; 179 180 if (!lpfc_is_link_up(phba)) 181 return NULL; 182 183 /* Allocate buffer for command iocb */ 184 elsiocb = lpfc_sli_get_iocbq(phba); 185 if (!elsiocb) 186 return NULL; 187 188 /* 189 * If this command is for fabric controller and HBA running 190 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 191 */ 192 if (did == Fabric_DID && 193 test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 194 (elscmd == ELS_CMD_FLOGI || 195 elscmd == ELS_CMD_FDISC || 196 elscmd == ELS_CMD_LOGO)) 197 switch (elscmd) { 198 case ELS_CMD_FLOGI: 199 elsiocb->cmd_flag |= 200 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 201 & LPFC_FIP_ELS_ID_MASK); 202 break; 203 case ELS_CMD_FDISC: 204 elsiocb->cmd_flag |= 205 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 206 & LPFC_FIP_ELS_ID_MASK); 207 break; 208 case ELS_CMD_LOGO: 209 elsiocb->cmd_flag |= 210 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 211 & LPFC_FIP_ELS_ID_MASK); 212 break; 213 } 214 else 215 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 216 217 /* fill in BDEs for command */ 218 /* Allocate buffer for command payload */ 219 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 220 if (pcmd) 221 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 222 if (!pcmd || !pcmd->virt) 223 goto els_iocb_free_pcmb_exit; 224 225 INIT_LIST_HEAD(&pcmd->list); 226 227 /* Allocate buffer for response payload */ 228 if (expect_rsp) { 229 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 230 if (prsp) 231 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 232 &prsp->phys); 233 if (!prsp || !prsp->virt) 234 goto els_iocb_free_prsp_exit; 235 INIT_LIST_HEAD(&prsp->list); 236 } else { 237 prsp = NULL; 238 } 239 240 /* Allocate buffer for Buffer ptr list */ 241 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 242 if (pbuflist) 243 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 244 &pbuflist->phys); 245 if (!pbuflist || !pbuflist->virt) 246 goto els_iocb_free_pbuf_exit; 247 248 INIT_LIST_HEAD(&pbuflist->list); 249 250 if (expect_rsp) { 251 switch (elscmd) { 252 case ELS_CMD_FLOGI: 253 timeout = FF_DEF_RATOV * 2; 254 break; 255 case ELS_CMD_LOGO: 256 timeout = phba->fc_ratov; 257 break; 258 default: 259 timeout = phba->fc_ratov * 2; 260 } 261 262 /* Fill SGE for the num bde count */ 263 elsiocb->num_bdes = 2; 264 } 265 266 if (phba->sli_rev == LPFC_SLI_REV4) 267 bmp = pcmd; 268 else 269 bmp = pbuflist; 270 271 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 272 elscmd, timeout, expect_rsp); 273 274 bpl = (struct ulp_bde64_le *)pbuflist->virt; 275 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 276 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 277 bpl->type_size = cpu_to_le32(cmd_size); 278 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 279 280 if (expect_rsp) { 281 bpl++; 282 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 283 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 284 bpl->type_size = cpu_to_le32(FCELSSIZE); 285 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 286 } 287 288 elsiocb->cmd_dmabuf = pcmd; 289 elsiocb->bpl_dmabuf = pbuflist; 290 elsiocb->retry = retry; 291 elsiocb->vport = vport; 292 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 293 294 if (prsp) 295 list_add(&prsp->list, &pcmd->list); 296 if (expect_rsp) { 297 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 298 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 299 "0116 Xmit ELS command x%x to remote " 300 "NPORT x%x I/O tag: x%x, port state:x%x " 301 "rpi x%x fc_flag:x%lx\n", 302 elscmd, did, elsiocb->iotag, 303 vport->port_state, ndlp->nlp_rpi, 304 vport->fc_flag); 305 } else { 306 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 307 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 308 "0117 Xmit ELS response x%x to remote " 309 "NPORT x%x I/O tag: x%x, size: x%x " 310 "port_state x%x rpi x%x fc_flag x%lx\n", 311 elscmd, ndlp->nlp_DID, elsiocb->iotag, 312 cmd_size, vport->port_state, 313 ndlp->nlp_rpi, vport->fc_flag); 314 } 315 316 return elsiocb; 317 318 els_iocb_free_pbuf_exit: 319 if (expect_rsp) 320 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 321 kfree(pbuflist); 322 323 els_iocb_free_prsp_exit: 324 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 325 kfree(prsp); 326 327 els_iocb_free_pcmb_exit: 328 kfree(pcmd); 329 lpfc_sli_release_iocbq(phba, elsiocb); 330 return NULL; 331 } 332 333 /** 334 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 335 * @vport: pointer to a host virtual N_Port data structure. 336 * 337 * This routine issues a fabric registration login for a @vport. An 338 * active ndlp node with Fabric_DID must already exist for this @vport. 339 * The routine invokes two mailbox commands to carry out fabric registration 340 * login through the HBA firmware: the first mailbox command requests the 341 * HBA to perform link configuration for the @vport; and the second mailbox 342 * command requests the HBA to perform the actual fabric registration login 343 * with the @vport. 344 * 345 * Return code 346 * 0 - successfully issued fabric registration login for @vport 347 * -ENXIO -- failed to issue fabric registration login for @vport 348 **/ 349 int 350 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 351 { 352 struct lpfc_hba *phba = vport->phba; 353 LPFC_MBOXQ_t *mbox; 354 struct lpfc_nodelist *ndlp; 355 struct serv_parm *sp; 356 int rc; 357 int err = 0; 358 359 sp = &phba->fc_fabparam; 360 ndlp = lpfc_findnode_did(vport, Fabric_DID); 361 if (!ndlp) { 362 err = 1; 363 goto fail; 364 } 365 366 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 367 if (!mbox) { 368 err = 2; 369 goto fail; 370 } 371 372 vport->port_state = LPFC_FABRIC_CFG_LINK; 373 lpfc_config_link(phba, mbox); 374 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 375 mbox->vport = vport; 376 377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 378 if (rc == MBX_NOT_FINISHED) { 379 err = 3; 380 goto fail_free_mbox; 381 } 382 383 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 384 if (!mbox) { 385 err = 4; 386 goto fail; 387 } 388 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 389 ndlp->nlp_rpi); 390 if (rc) { 391 err = 5; 392 goto fail_free_mbox; 393 } 394 395 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 396 mbox->vport = vport; 397 /* increment the reference count on ndlp to hold reference 398 * for the callback routine. 399 */ 400 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 401 if (!mbox->ctx_ndlp) { 402 err = 6; 403 goto fail_free_mbox; 404 } 405 406 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 407 if (rc == MBX_NOT_FINISHED) { 408 err = 7; 409 goto fail_issue_reg_login; 410 } 411 412 return 0; 413 414 fail_issue_reg_login: 415 /* decrement the reference count on ndlp just incremented 416 * for the failed mbox command. 417 */ 418 lpfc_nlp_put(ndlp); 419 fail_free_mbox: 420 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 421 fail: 422 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 423 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 424 "0249 Cannot issue Register Fabric login: Err %d\n", 425 err); 426 return -ENXIO; 427 } 428 429 /** 430 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 431 * @vport: pointer to a host virtual N_Port data structure. 432 * 433 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 434 * the @vport. This mailbox command is necessary for SLI4 port only. 435 * 436 * Return code 437 * 0 - successfully issued REG_VFI for @vport 438 * A failure code otherwise. 439 **/ 440 int 441 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 442 { 443 struct lpfc_hba *phba = vport->phba; 444 LPFC_MBOXQ_t *mboxq = NULL; 445 struct lpfc_nodelist *ndlp; 446 struct lpfc_dmabuf *dmabuf = NULL; 447 int rc = 0; 448 449 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 450 if ((phba->sli_rev == LPFC_SLI_REV4) && 451 !(phba->link_flag & LS_LOOPBACK_MODE) && 452 !test_bit(FC_PT2PT, &vport->fc_flag)) { 453 ndlp = lpfc_findnode_did(vport, Fabric_DID); 454 if (!ndlp) { 455 rc = -ENODEV; 456 goto fail; 457 } 458 } 459 460 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 461 if (!mboxq) { 462 rc = -ENOMEM; 463 goto fail; 464 } 465 466 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 467 if (test_bit(FC_FABRIC, &vport->fc_flag) || 468 test_bit(FC_PT2PT, &vport->fc_flag)) { 469 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 470 if (rc) { 471 rc = -ENOMEM; 472 goto fail_mbox; 473 } 474 dmabuf = mboxq->ctx_buf; 475 memcpy(dmabuf->virt, &phba->fc_fabparam, 476 sizeof(struct serv_parm)); 477 } 478 479 vport->port_state = LPFC_FABRIC_CFG_LINK; 480 if (dmabuf) { 481 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 482 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 483 mboxq->ctx_buf = dmabuf; 484 } else { 485 lpfc_reg_vfi(mboxq, vport, 0); 486 } 487 488 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 489 mboxq->vport = vport; 490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 491 if (rc == MBX_NOT_FINISHED) { 492 rc = -ENXIO; 493 goto fail_mbox; 494 } 495 return 0; 496 497 fail_mbox: 498 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 499 fail: 500 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 501 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 502 "0289 Issue Register VFI failed: Err %d\n", rc); 503 return rc; 504 } 505 506 /** 507 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 508 * @vport: pointer to a host virtual N_Port data structure. 509 * 510 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 511 * the @vport. This mailbox command is necessary for SLI4 port only. 512 * 513 * Return code 514 * 0 - successfully issued REG_VFI for @vport 515 * A failure code otherwise. 516 **/ 517 int 518 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 519 { 520 struct lpfc_hba *phba = vport->phba; 521 LPFC_MBOXQ_t *mboxq; 522 int rc; 523 524 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 525 if (!mboxq) { 526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 527 "2556 UNREG_VFI mbox allocation failed" 528 "HBA state x%x\n", phba->pport->port_state); 529 return -ENOMEM; 530 } 531 532 lpfc_unreg_vfi(mboxq, vport); 533 mboxq->vport = vport; 534 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 535 536 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 537 if (rc == MBX_NOT_FINISHED) { 538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 539 "2557 UNREG_VFI issue mbox failed rc x%x " 540 "HBA state x%x\n", 541 rc, phba->pport->port_state); 542 mempool_free(mboxq, phba->mbox_mem_pool); 543 return -EIO; 544 } 545 546 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag); 547 return 0; 548 } 549 550 /** 551 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 552 * @vport: pointer to a host virtual N_Port data structure. 553 * @sp: pointer to service parameter data structure. 554 * 555 * This routine is called from FLOGI/FDISC completion handler functions. 556 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 557 * node nodename is changed in the completion service parameter else return 558 * 0. This function also set flag in the vport data structure to delay 559 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 560 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 561 * node nodename is changed in the completion service parameter. 562 * 563 * Return code 564 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 565 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 566 * 567 **/ 568 static uint8_t 569 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 570 struct serv_parm *sp) 571 { 572 struct lpfc_hba *phba = vport->phba; 573 uint8_t fabric_param_changed = 0; 574 575 if ((vport->fc_prevDID != vport->fc_myDID) || 576 memcmp(&vport->fabric_portname, &sp->portName, 577 sizeof(struct lpfc_name)) || 578 memcmp(&vport->fabric_nodename, &sp->nodeName, 579 sizeof(struct lpfc_name)) || 580 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 581 fabric_param_changed = 1; 582 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 583 } 584 /* 585 * Word 1 Bit 31 in common service parameter is overloaded. 586 * Word 1 Bit 31 in FLOGI request is multiple NPort request 587 * Word 1 Bit 31 in FLOGI response is clean address bit 588 * 589 * If fabric parameter is changed and clean address bit is 590 * cleared delay nport discovery if 591 * - vport->fc_prevDID != 0 (not initial discovery) OR 592 * - lpfc_delay_discovery module parameter is set. 593 */ 594 if (fabric_param_changed && !sp->cmn.clean_address_bit && 595 (vport->fc_prevDID || phba->cfg_delay_discovery)) 596 set_bit(FC_DISC_DELAYED, &vport->fc_flag); 597 598 return fabric_param_changed; 599 } 600 601 602 /** 603 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 604 * @vport: pointer to a host virtual N_Port data structure. 605 * @ndlp: pointer to a node-list data structure. 606 * @sp: pointer to service parameter data structure. 607 * @ulp_word4: command response value 608 * 609 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 610 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 611 * port in a fabric topology. It properly sets up the parameters to the @ndlp 612 * from the IOCB response. It also check the newly assigned N_Port ID to the 613 * @vport against the previously assigned N_Port ID. If it is different from 614 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 615 * is invoked on all the remaining nodes with the @vport to unregister the 616 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 617 * is invoked to register login to the fabric. 618 * 619 * Return code 620 * 0 - Success (currently, always return 0) 621 **/ 622 static int 623 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 624 struct serv_parm *sp, uint32_t ulp_word4) 625 { 626 struct lpfc_hba *phba = vport->phba; 627 struct lpfc_nodelist *np; 628 struct lpfc_nodelist *next_np; 629 uint8_t fabric_param_changed; 630 631 set_bit(FC_FABRIC, &vport->fc_flag); 632 633 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 634 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 635 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 636 637 phba->fc_edtovResol = sp->cmn.edtovResolution; 638 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 639 640 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) 641 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 642 643 vport->fc_myDID = ulp_word4 & Mask_DID; 644 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 645 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 646 ndlp->nlp_class_sup = 0; 647 if (sp->cls1.classValid) 648 ndlp->nlp_class_sup |= FC_COS_CLASS1; 649 if (sp->cls2.classValid) 650 ndlp->nlp_class_sup |= FC_COS_CLASS2; 651 if (sp->cls3.classValid) 652 ndlp->nlp_class_sup |= FC_COS_CLASS3; 653 if (sp->cls4.classValid) 654 ndlp->nlp_class_sup |= FC_COS_CLASS4; 655 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 656 sp->cmn.bbRcvSizeLsb; 657 658 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 659 if (fabric_param_changed) { 660 /* Reset FDMI attribute masks based on config parameter */ 661 if (phba->cfg_enable_SmartSAN || 662 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 663 /* Setup appropriate attribute masks */ 664 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 665 if (phba->cfg_enable_SmartSAN) 666 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 667 else 668 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 669 } else { 670 vport->fdmi_hba_mask = 0; 671 vport->fdmi_port_mask = 0; 672 } 673 674 } 675 memcpy(&vport->fabric_portname, &sp->portName, 676 sizeof(struct lpfc_name)); 677 memcpy(&vport->fabric_nodename, &sp->nodeName, 678 sizeof(struct lpfc_name)); 679 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 680 681 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 682 if (sp->cmn.response_multiple_NPort) { 683 lpfc_printf_vlog(vport, KERN_WARNING, 684 LOG_ELS | LOG_VPORT, 685 "1816 FLOGI NPIV supported, " 686 "response data 0x%x\n", 687 sp->cmn.response_multiple_NPort); 688 spin_lock_irq(&phba->hbalock); 689 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 690 spin_unlock_irq(&phba->hbalock); 691 } else { 692 /* Because we asked f/w for NPIV it still expects us 693 to call reg_vnpid at least for the physical host */ 694 lpfc_printf_vlog(vport, KERN_WARNING, 695 LOG_ELS | LOG_VPORT, 696 "1817 Fabric does not support NPIV " 697 "- configuring single port mode.\n"); 698 spin_lock_irq(&phba->hbalock); 699 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 700 spin_unlock_irq(&phba->hbalock); 701 } 702 } 703 704 /* 705 * For FC we need to do some special processing because of the SLI 706 * Port's default settings of the Common Service Parameters. 707 */ 708 if ((phba->sli_rev == LPFC_SLI_REV4) && 709 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 710 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 711 if (fabric_param_changed) 712 lpfc_unregister_fcf_prep(phba); 713 714 /* This should just update the VFI CSPs*/ 715 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) 716 lpfc_issue_reg_vfi(vport); 717 } 718 719 if (fabric_param_changed && 720 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 721 722 /* If our NportID changed, we need to ensure all 723 * remaining NPORTs get unreg_login'ed. 724 */ 725 list_for_each_entry_safe(np, next_np, 726 &vport->fc_nodes, nlp_listp) { 727 if ((np->nlp_state != NLP_STE_NPR_NODE) || 728 !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) 729 continue; 730 clear_bit(NLP_NPR_ADISC, &np->nlp_flag); 731 lpfc_unreg_rpi(vport, np); 732 } 733 lpfc_cleanup_pending_mbox(vport); 734 735 if (phba->sli_rev == LPFC_SLI_REV4) { 736 lpfc_sli4_unreg_all_rpis(vport); 737 lpfc_mbx_unreg_vpi(vport); 738 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 739 } 740 741 /* 742 * For SLI3 and SLI4, the VPI needs to be reregistered in 743 * response to this fabric parameter change event. 744 */ 745 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 746 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 747 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 748 /* 749 * Driver needs to re-reg VPI in order for f/w 750 * to update the MAC address. 751 */ 752 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 753 lpfc_register_new_vport(phba, vport, ndlp); 754 return 0; 755 } 756 757 if (phba->sli_rev < LPFC_SLI_REV4) { 758 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 759 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 760 test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) 761 lpfc_register_new_vport(phba, vport, ndlp); 762 else 763 lpfc_issue_fabric_reglogin(vport); 764 } else { 765 ndlp->nlp_type |= NLP_FABRIC; 766 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 767 if ((!test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) && 768 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 769 lpfc_start_fdiscs(phba); 770 lpfc_do_scr_ns_plogi(phba, vport); 771 } else if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) 772 lpfc_issue_init_vpi(vport); 773 else { 774 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 775 "3135 Need register VFI: (x%x/%x)\n", 776 vport->fc_prevDID, vport->fc_myDID); 777 lpfc_issue_reg_vfi(vport); 778 } 779 } 780 return 0; 781 } 782 783 /** 784 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 785 * @vport: pointer to a host virtual N_Port data structure. 786 * @ndlp: pointer to a node-list data structure. 787 * @sp: pointer to service parameter data structure. 788 * 789 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 790 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 791 * in a point-to-point topology. First, the @vport's N_Port Name is compared 792 * with the received N_Port Name: if the @vport's N_Port Name is greater than 793 * the received N_Port Name lexicographically, this node shall assign local 794 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 795 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 796 * this node shall just wait for the remote node to issue PLOGI and assign 797 * N_Port IDs. 798 * 799 * Return code 800 * 0 - Success 801 * -ENXIO - Fail 802 **/ 803 static int 804 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 805 struct serv_parm *sp) 806 { 807 struct lpfc_hba *phba = vport->phba; 808 LPFC_MBOXQ_t *mbox; 809 int rc; 810 811 clear_bit(FC_FABRIC, &vport->fc_flag); 812 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 813 set_bit(FC_PT2PT, &vport->fc_flag); 814 815 /* If we are pt2pt with another NPort, force NPIV off! */ 816 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 817 818 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 819 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 820 lpfc_unregister_fcf_prep(phba); 821 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag); 822 phba->fc_topology_changed = 0; 823 } 824 825 rc = memcmp(&vport->fc_portname, &sp->portName, 826 sizeof(vport->fc_portname)); 827 828 if (rc >= 0) { 829 /* This side will initiate the PLOGI */ 830 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag); 831 832 /* 833 * N_Port ID cannot be 0, set our Id to LocalID 834 * the other side will be RemoteID. 835 */ 836 837 /* not equal */ 838 if (rc) 839 vport->fc_myDID = PT2PT_LocalID; 840 841 /* If not registered with a transport, decrement ndlp reference 842 * count indicating that ndlp can be safely released when other 843 * references are removed. 844 */ 845 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 846 lpfc_nlp_put(ndlp); 847 848 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 849 if (!ndlp) { 850 /* 851 * Cannot find existing Fabric ndlp, so allocate a 852 * new one 853 */ 854 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 855 if (!ndlp) 856 goto fail; 857 } 858 859 memcpy(&ndlp->nlp_portname, &sp->portName, 860 sizeof(struct lpfc_name)); 861 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 862 sizeof(struct lpfc_name)); 863 /* Set state will put ndlp onto node list if not already done */ 864 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 865 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 866 867 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 868 if (!mbox) 869 goto fail; 870 871 lpfc_config_link(phba, mbox); 872 873 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 874 mbox->vport = vport; 875 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 876 if (rc == MBX_NOT_FINISHED) { 877 mempool_free(mbox, phba->mbox_mem_pool); 878 goto fail; 879 } 880 } else { 881 /* This side will wait for the PLOGI. If not registered with 882 * a transport, decrement node reference count indicating that 883 * ndlp can be released when other references are removed. 884 */ 885 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 886 lpfc_nlp_put(ndlp); 887 888 /* Start discovery - this should just do CLEAR_LA */ 889 lpfc_disc_start(vport); 890 } 891 892 return 0; 893 fail: 894 return -ENXIO; 895 } 896 897 /** 898 * lpfc_cmpl_els_flogi - Completion callback function for flogi 899 * @phba: pointer to lpfc hba data structure. 900 * @cmdiocb: pointer to lpfc command iocb data structure. 901 * @rspiocb: pointer to lpfc response iocb data structure. 902 * 903 * This routine is the top-level completion callback function for issuing 904 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 905 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 906 * retry has been made (either immediately or delayed with lpfc_els_retry() 907 * returning 1), the command IOCB will be released and function returned. 908 * If the retry attempt has been given up (possibly reach the maximum 909 * number of retries), one additional decrement of ndlp reference shall be 910 * invoked before going out after releasing the command IOCB. This will 911 * actually release the remote node (Note, lpfc_els_free_iocb() will also 912 * invoke one decrement of ndlp reference count). If no error reported in 913 * the IOCB status, the command Port ID field is used to determine whether 914 * this is a point-to-point topology or a fabric topology: if the Port ID 915 * field is assigned, it is a fabric topology; otherwise, it is a 916 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 917 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 918 * specific topology completion conditions. 919 **/ 920 static void 921 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 922 struct lpfc_iocbq *rspiocb) 923 { 924 struct lpfc_vport *vport = cmdiocb->vport; 925 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 926 IOCB_t *irsp; 927 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 928 struct serv_parm *sp; 929 uint16_t fcf_index; 930 int rc; 931 u32 ulp_status, ulp_word4, tmo; 932 bool flogi_in_retry = false; 933 934 /* Check to see if link went down during discovery */ 935 if (lpfc_els_chk_latt(vport)) { 936 /* One additional decrement on node reference count to 937 * trigger the release of the node 938 */ 939 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 940 lpfc_nlp_put(ndlp); 941 goto out; 942 } 943 944 ulp_status = get_job_ulpstatus(phba, rspiocb); 945 ulp_word4 = get_job_word4(phba, rspiocb); 946 947 if (phba->sli_rev == LPFC_SLI_REV4) { 948 tmo = get_wqe_tmo(cmdiocb); 949 } else { 950 irsp = &rspiocb->iocb; 951 tmo = irsp->ulpTimeout; 952 } 953 954 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 955 "FLOGI cmpl: status:x%x/x%x state:x%x", 956 ulp_status, ulp_word4, 957 vport->port_state); 958 959 if (ulp_status) { 960 /* 961 * In case of FIP mode, perform roundrobin FCF failover 962 * due to new FCF discovery 963 */ 964 if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 965 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 966 if (phba->link_state < LPFC_LINK_UP) 967 goto stop_rr_fcf_flogi; 968 if ((phba->fcoe_cvl_eventtag_attn == 969 phba->fcoe_cvl_eventtag) && 970 (ulp_status == IOSTAT_LOCAL_REJECT) && 971 ((ulp_word4 & IOERR_PARAM_MASK) == 972 IOERR_SLI_ABORTED)) 973 goto stop_rr_fcf_flogi; 974 else 975 phba->fcoe_cvl_eventtag_attn = 976 phba->fcoe_cvl_eventtag; 977 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 978 "2611 FLOGI FCF (x%x), " 979 "status:x%x/x%x, tmo:x%x, perform " 980 "roundrobin FCF failover\n", 981 phba->fcf.current_rec.fcf_indx, 982 ulp_status, ulp_word4, tmo); 983 lpfc_sli4_set_fcf_flogi_fail(phba, 984 phba->fcf.current_rec.fcf_indx); 985 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 986 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 987 if (rc) 988 goto out; 989 } 990 991 stop_rr_fcf_flogi: 992 /* FLOGI failure */ 993 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 994 ((ulp_word4 & IOERR_PARAM_MASK) == 995 IOERR_LOOP_OPEN_FAILURE))) 996 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 997 "2858 FLOGI Status:x%x/x%x TMO" 998 ":x%x Data x%lx x%x\n", 999 ulp_status, ulp_word4, tmo, 1000 phba->hba_flag, phba->fcf.fcf_flag); 1001 1002 /* Check for retry */ 1003 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1004 /* Address a timing race with dev_loss. If dev_loss 1005 * is active on this FPort node, put the initial ref 1006 * count back to stop premature node release actions. 1007 */ 1008 lpfc_check_nlp_post_devloss(vport, ndlp); 1009 flogi_in_retry = true; 1010 goto out; 1011 } 1012 1013 /* The FLOGI will not be retried. If the FPort node is not 1014 * registered with the SCSI transport, remove the initial 1015 * reference to trigger node release. 1016 */ 1017 if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && 1018 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 1019 lpfc_nlp_put(ndlp); 1020 1021 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1022 "0150 FLOGI Status:x%x/x%x " 1023 "xri x%x TMO:x%x refcnt %d\n", 1024 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1025 tmo, kref_read(&ndlp->kref)); 1026 1027 /* If this is not a loop open failure, bail out */ 1028 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1029 ((ulp_word4 & IOERR_PARAM_MASK) == 1030 IOERR_LOOP_OPEN_FAILURE))) { 1031 /* Warn FLOGI status */ 1032 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 1033 "0100 FLOGI Status:x%x/x%x " 1034 "TMO:x%x\n", 1035 ulp_status, ulp_word4, tmo); 1036 goto flogifail; 1037 } 1038 1039 /* FLOGI failed, so there is no fabric */ 1040 clear_bit(FC_FABRIC, &vport->fc_flag); 1041 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 1042 clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag); 1043 1044 /* If private loop, then allow max outstanding els to be 1045 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1046 * alpa map would take too long otherwise. 1047 */ 1048 if (phba->alpa_map[0] == 0) 1049 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1050 if ((phba->sli_rev == LPFC_SLI_REV4) && 1051 (!test_bit(FC_VFI_REGISTERED, &vport->fc_flag) || 1052 (vport->fc_prevDID != vport->fc_myDID) || 1053 phba->fc_topology_changed)) { 1054 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) { 1055 if (phba->fc_topology_changed) { 1056 lpfc_unregister_fcf_prep(phba); 1057 clear_bit(FC_VFI_REGISTERED, 1058 &vport->fc_flag); 1059 phba->fc_topology_changed = 0; 1060 } else { 1061 lpfc_sli4_unreg_all_rpis(vport); 1062 } 1063 } 1064 1065 /* Do not register VFI if the driver aborted FLOGI */ 1066 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 1067 lpfc_issue_reg_vfi(vport); 1068 1069 goto out; 1070 } 1071 goto flogifail; 1072 } 1073 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag); 1074 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag); 1075 1076 /* 1077 * The FLOGI succeeded. Sync the data for the CPU before 1078 * accessing it. 1079 */ 1080 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1081 if (!prsp) 1082 goto out; 1083 if (!lpfc_is_els_acc_rsp(prsp)) 1084 goto out; 1085 sp = prsp->virt + sizeof(uint32_t); 1086 1087 /* FLOGI completes successfully */ 1088 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1089 "0101 FLOGI completes successfully, I/O tag:x%x " 1090 "xri x%x Data: x%x x%x x%x x%x x%x x%lx x%x %d\n", 1091 cmdiocb->iotag, cmdiocb->sli4_xritag, 1092 ulp_word4, sp->cmn.e_d_tov, 1093 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1094 vport->port_state, vport->fc_flag, 1095 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1096 1097 /* reinitialize the VMID datastructure before returning */ 1098 if (lpfc_is_vmid_enabled(phba)) { 1099 lpfc_reinit_vmid(vport); 1100 vport->vmid_flag = 0; 1101 } 1102 if (sp->cmn.priority_tagging) 1103 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1104 LPFC_VMID_TYPE_PRIO); 1105 1106 /* 1107 * Address a timing race with dev_loss. If dev_loss is active on 1108 * this FPort node, put the initial ref count back to stop premature 1109 * node release actions. 1110 */ 1111 lpfc_check_nlp_post_devloss(vport, ndlp); 1112 if (vport->port_state == LPFC_FLOGI) { 1113 /* 1114 * If Common Service Parameters indicate Nport 1115 * we are point to point, if Fport we are Fabric. 1116 */ 1117 if (sp->cmn.fPort) 1118 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1119 ulp_word4); 1120 else if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) 1121 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1122 else { 1123 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1124 "2831 FLOGI response with cleared Fabric " 1125 "bit fcf_index 0x%x " 1126 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1127 "Fabric Name " 1128 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1129 phba->fcf.current_rec.fcf_indx, 1130 phba->fcf.current_rec.switch_name[0], 1131 phba->fcf.current_rec.switch_name[1], 1132 phba->fcf.current_rec.switch_name[2], 1133 phba->fcf.current_rec.switch_name[3], 1134 phba->fcf.current_rec.switch_name[4], 1135 phba->fcf.current_rec.switch_name[5], 1136 phba->fcf.current_rec.switch_name[6], 1137 phba->fcf.current_rec.switch_name[7], 1138 phba->fcf.current_rec.fabric_name[0], 1139 phba->fcf.current_rec.fabric_name[1], 1140 phba->fcf.current_rec.fabric_name[2], 1141 phba->fcf.current_rec.fabric_name[3], 1142 phba->fcf.current_rec.fabric_name[4], 1143 phba->fcf.current_rec.fabric_name[5], 1144 phba->fcf.current_rec.fabric_name[6], 1145 phba->fcf.current_rec.fabric_name[7]); 1146 1147 lpfc_nlp_put(ndlp); 1148 spin_lock_irq(&phba->hbalock); 1149 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1150 spin_unlock_irq(&phba->hbalock); 1151 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1152 clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); 1153 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1154 goto out; 1155 } 1156 if (!rc) { 1157 /* Mark the FCF discovery process done */ 1158 if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) 1159 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1160 LOG_ELS, 1161 "2769 FLOGI to FCF (x%x) " 1162 "completed successfully\n", 1163 phba->fcf.current_rec.fcf_indx); 1164 spin_lock_irq(&phba->hbalock); 1165 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1166 spin_unlock_irq(&phba->hbalock); 1167 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1168 clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); 1169 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1170 goto out; 1171 } 1172 } else if (vport->port_state > LPFC_FLOGI && 1173 test_bit(FC_PT2PT, &vport->fc_flag)) { 1174 /* 1175 * In a p2p topology, it is possible that discovery has 1176 * already progressed, and this completion can be ignored. 1177 * Recheck the indicated topology. 1178 */ 1179 if (!sp->cmn.fPort) 1180 goto out; 1181 } 1182 1183 flogifail: 1184 spin_lock_irq(&phba->hbalock); 1185 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1186 spin_unlock_irq(&phba->hbalock); 1187 1188 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 1189 /* FLOGI failed, so just use loop map to make discovery list */ 1190 lpfc_disc_list_loopmap(vport); 1191 1192 /* Start discovery */ 1193 lpfc_disc_start(vport); 1194 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1195 (((ulp_word4 & IOERR_PARAM_MASK) != 1196 IOERR_SLI_ABORTED) && 1197 ((ulp_word4 & IOERR_PARAM_MASK) != 1198 IOERR_SLI_DOWN))) && 1199 (phba->link_state != LPFC_CLEAR_LA)) { 1200 /* If FLOGI failed enable link interrupt. */ 1201 lpfc_issue_clear_la(phba, vport); 1202 } 1203 out: 1204 if (!flogi_in_retry) 1205 clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1206 1207 lpfc_els_free_iocb(phba, cmdiocb); 1208 lpfc_nlp_put(ndlp); 1209 } 1210 1211 /** 1212 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1213 * aborted during a link down 1214 * @phba: pointer to lpfc hba data structure. 1215 * @cmdiocb: pointer to lpfc command iocb data structure. 1216 * @rspiocb: pointer to lpfc response iocb data structure. 1217 * 1218 */ 1219 static void 1220 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1221 struct lpfc_iocbq *rspiocb) 1222 { 1223 uint32_t *pcmd; 1224 uint32_t cmd; 1225 u32 ulp_status, ulp_word4; 1226 1227 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1228 cmd = *pcmd; 1229 1230 ulp_status = get_job_ulpstatus(phba, rspiocb); 1231 ulp_word4 = get_job_word4(phba, rspiocb); 1232 1233 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1234 "6445 ELS completes after LINK_DOWN: " 1235 " Status %x/%x cmd x%x flg x%x iotag x%x\n", 1236 ulp_status, ulp_word4, cmd, 1237 cmdiocb->cmd_flag, cmdiocb->iotag); 1238 1239 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1240 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1241 atomic_dec(&phba->fabric_iocb_count); 1242 } 1243 lpfc_els_free_iocb(phba, cmdiocb); 1244 } 1245 1246 /** 1247 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1248 * @vport: pointer to a host virtual N_Port data structure. 1249 * @ndlp: pointer to a node-list data structure. 1250 * @retry: number of retries to the command IOCB. 1251 * 1252 * This routine issues a Fabric Login (FLOGI) Request ELS command 1253 * for a @vport. The initiator service parameters are put into the payload 1254 * of the FLOGI Request IOCB and the top-level callback function pointer 1255 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1256 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1257 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1258 * 1259 * Note that the ndlp reference count will be incremented by 1 for holding the 1260 * ndlp and the reference to ndlp will be stored into the ndlp field of 1261 * the IOCB for the completion callback function to the FLOGI ELS command. 1262 * 1263 * Return code 1264 * 0 - successfully issued flogi iocb for @vport 1265 * 1 - failed to issue flogi iocb for @vport 1266 **/ 1267 static int 1268 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1269 uint8_t retry) 1270 { 1271 struct lpfc_hba *phba = vport->phba; 1272 struct serv_parm *sp; 1273 union lpfc_wqe128 *wqe = NULL; 1274 IOCB_t *icmd = NULL; 1275 struct lpfc_iocbq *elsiocb; 1276 struct lpfc_iocbq defer_flogi_acc; 1277 u8 *pcmd, ct; 1278 uint16_t cmdsize; 1279 uint32_t tmo, did; 1280 int rc; 1281 1282 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1283 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1284 ndlp->nlp_DID, ELS_CMD_FLOGI); 1285 1286 if (!elsiocb) 1287 return 1; 1288 1289 wqe = &elsiocb->wqe; 1290 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1291 icmd = &elsiocb->iocb; 1292 1293 /* For FLOGI request, remainder of payload is service parameters */ 1294 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1295 pcmd += sizeof(uint32_t); 1296 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1297 sp = (struct serv_parm *) pcmd; 1298 1299 /* Setup CSPs accordingly for Fabric */ 1300 sp->cmn.e_d_tov = 0; 1301 sp->cmn.w2.r_a_tov = 0; 1302 sp->cmn.virtual_fabric_support = 0; 1303 sp->cls1.classValid = 0; 1304 if (sp->cmn.fcphLow < FC_PH3) 1305 sp->cmn.fcphLow = FC_PH3; 1306 if (sp->cmn.fcphHigh < FC_PH3) 1307 sp->cmn.fcphHigh = FC_PH3; 1308 1309 /* Determine if switch supports priority tagging */ 1310 if (phba->cfg_vmid_priority_tagging) { 1311 sp->cmn.priority_tagging = 1; 1312 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1313 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 1314 sizeof(vport->lpfc_vmid_host_uuid))) { 1315 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1316 sizeof(phba->wwpn)); 1317 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1318 sizeof(phba->wwnn)); 1319 } 1320 } 1321 1322 if (phba->sli_rev == LPFC_SLI_REV4) { 1323 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1324 LPFC_SLI_INTF_IF_TYPE_0) { 1325 /* FLOGI needs to be 3 for WQE FCFI */ 1326 ct = SLI4_CT_FCFI; 1327 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1328 1329 /* Set the fcfi to the fcfi we registered with */ 1330 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1331 phba->fcf.fcfi); 1332 } 1333 1334 /* Can't do SLI4 class2 without support sequence coalescing */ 1335 sp->cls2.classValid = 0; 1336 sp->cls2.seqDelivery = 0; 1337 } else { 1338 /* Historical, setting sequential-delivery bit for SLI3 */ 1339 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1340 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1341 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1342 sp->cmn.request_multiple_Nport = 1; 1343 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1344 icmd->ulpCt_h = 1; 1345 icmd->ulpCt_l = 0; 1346 } else { 1347 sp->cmn.request_multiple_Nport = 0; 1348 } 1349 1350 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1351 icmd->un.elsreq64.myID = 0; 1352 icmd->un.elsreq64.fl = 1; 1353 } 1354 } 1355 1356 tmo = phba->fc_ratov; 1357 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1358 lpfc_set_disctmo(vport); 1359 phba->fc_ratov = tmo; 1360 1361 phba->fc_stat.elsXmitFLOGI++; 1362 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1363 1364 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1365 "Issue FLOGI: opt:x%x", 1366 phba->sli3_options, 0, 0); 1367 1368 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1369 if (!elsiocb->ndlp) { 1370 lpfc_els_free_iocb(phba, elsiocb); 1371 return 1; 1372 } 1373 1374 /* Avoid race with FLOGI completion and hba_flags. */ 1375 set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1376 set_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1377 1378 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1379 if (rc == IOCB_ERROR) { 1380 clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1381 clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1382 lpfc_els_free_iocb(phba, elsiocb); 1383 lpfc_nlp_put(ndlp); 1384 return 1; 1385 } 1386 1387 /* Clear external loopback plug detected flag */ 1388 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1389 1390 /* Check for a deferred FLOGI ACC condition */ 1391 if (phba->defer_flogi_acc.flag) { 1392 /* lookup ndlp for received FLOGI */ 1393 ndlp = lpfc_findnode_did(vport, 0); 1394 if (!ndlp) 1395 return 0; 1396 1397 did = vport->fc_myDID; 1398 vport->fc_myDID = Fabric_DID; 1399 1400 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1401 1402 if (phba->sli_rev == LPFC_SLI_REV4) { 1403 bf_set(wqe_ctxt_tag, 1404 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1405 phba->defer_flogi_acc.rx_id); 1406 bf_set(wqe_rcvoxid, 1407 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1408 phba->defer_flogi_acc.ox_id); 1409 } else { 1410 icmd = &defer_flogi_acc.iocb; 1411 icmd->ulpContext = phba->defer_flogi_acc.rx_id; 1412 icmd->unsli3.rcvsli3.ox_id = 1413 phba->defer_flogi_acc.ox_id; 1414 } 1415 1416 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1417 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1418 " ox_id: x%x, hba_flag x%lx\n", 1419 phba->defer_flogi_acc.rx_id, 1420 phba->defer_flogi_acc.ox_id, phba->hba_flag); 1421 1422 /* Send deferred FLOGI ACC */ 1423 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1424 ndlp, NULL); 1425 1426 phba->defer_flogi_acc.flag = false; 1427 1428 /* Decrement the held ndlp that was incremented when the 1429 * deferred flogi acc flag was set. 1430 */ 1431 if (phba->defer_flogi_acc.ndlp) { 1432 lpfc_nlp_put(phba->defer_flogi_acc.ndlp); 1433 phba->defer_flogi_acc.ndlp = NULL; 1434 } 1435 1436 vport->fc_myDID = did; 1437 } 1438 1439 return 0; 1440 } 1441 1442 /** 1443 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1444 * @phba: pointer to lpfc hba data structure. 1445 * 1446 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1447 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1448 * list and issues an abort IOCB commond on each outstanding IOCB that 1449 * contains a active Fabric_DID ndlp. Note that this function is to issue 1450 * the abort IOCB command on all the outstanding IOCBs, thus when this 1451 * function returns, it does not guarantee all the IOCBs are actually aborted. 1452 * 1453 * Return code 1454 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1455 **/ 1456 int 1457 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1458 { 1459 struct lpfc_sli_ring *pring; 1460 struct lpfc_iocbq *iocb, *next_iocb; 1461 struct lpfc_nodelist *ndlp; 1462 u32 ulp_command; 1463 1464 /* Abort outstanding I/O on NPort <nlp_DID> */ 1465 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1466 "0201 Abort outstanding I/O on NPort x%x\n", 1467 Fabric_DID); 1468 1469 pring = lpfc_phba_elsring(phba); 1470 if (unlikely(!pring)) 1471 return -EIO; 1472 1473 /* 1474 * Check the txcmplq for an iocb that matches the nport the driver is 1475 * searching for. 1476 */ 1477 spin_lock_irq(&phba->hbalock); 1478 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1479 ulp_command = get_job_cmnd(phba, iocb); 1480 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1481 ndlp = iocb->ndlp; 1482 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1483 if (test_bit(FC_PT2PT, &phba->pport->fc_flag) && 1484 !test_bit(FC_PT2PT_PLOGI, 1485 &phba->pport->fc_flag)) 1486 iocb->fabric_cmd_cmpl = 1487 lpfc_ignore_els_cmpl; 1488 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1489 NULL); 1490 } 1491 } 1492 } 1493 /* Make sure HBA is alive */ 1494 lpfc_issue_hb_tmo(phba); 1495 1496 spin_unlock_irq(&phba->hbalock); 1497 1498 return 0; 1499 } 1500 1501 /** 1502 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1503 * @vport: pointer to a host virtual N_Port data structure. 1504 * 1505 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1506 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1507 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1508 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1509 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1510 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1511 * @vport. 1512 * 1513 * Return code 1514 * 0 - failed to issue initial flogi for @vport 1515 * 1 - successfully issued initial flogi for @vport 1516 **/ 1517 int 1518 lpfc_initial_flogi(struct lpfc_vport *vport) 1519 { 1520 struct lpfc_nodelist *ndlp; 1521 1522 vport->port_state = LPFC_FLOGI; 1523 lpfc_set_disctmo(vport); 1524 1525 /* First look for the Fabric ndlp */ 1526 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1527 if (!ndlp) { 1528 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1529 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1530 if (!ndlp) 1531 return 0; 1532 /* Set the node type */ 1533 ndlp->nlp_type |= NLP_FABRIC; 1534 1535 /* Put ndlp onto node list */ 1536 lpfc_enqueue_node(vport, ndlp); 1537 } 1538 1539 /* Reset the Fabric flag, topology change may have happened */ 1540 clear_bit(FC_FABRIC, &vport->fc_flag); 1541 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1542 /* A node reference should be retained while registered with a 1543 * transport or dev-loss-evt work is pending. 1544 * Otherwise, decrement node reference to trigger release. 1545 */ 1546 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1547 !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) 1548 lpfc_nlp_put(ndlp); 1549 return 0; 1550 } 1551 return 1; 1552 } 1553 1554 /** 1555 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1556 * @vport: pointer to a host virtual N_Port data structure. 1557 * 1558 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1559 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1560 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1561 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1562 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1563 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1564 * @vport. 1565 * 1566 * Return code 1567 * 0 - failed to issue initial fdisc for @vport 1568 * 1 - successfully issued initial fdisc for @vport 1569 **/ 1570 int 1571 lpfc_initial_fdisc(struct lpfc_vport *vport) 1572 { 1573 struct lpfc_nodelist *ndlp; 1574 1575 /* First look for the Fabric ndlp */ 1576 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1577 if (!ndlp) { 1578 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1579 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1580 if (!ndlp) 1581 return 0; 1582 1583 /* NPIV is only supported in Fabrics. */ 1584 ndlp->nlp_type |= NLP_FABRIC; 1585 1586 /* Put ndlp onto node list */ 1587 lpfc_enqueue_node(vport, ndlp); 1588 } 1589 1590 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1591 /* A node reference should be retained while registered with a 1592 * transport or dev-loss-evt work is pending. 1593 * Otherwise, decrement node reference to trigger release. 1594 */ 1595 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1596 !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) 1597 lpfc_nlp_put(ndlp); 1598 return 0; 1599 } 1600 return 1; 1601 } 1602 1603 /** 1604 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1605 * @vport: pointer to a host virtual N_Port data structure. 1606 * 1607 * This routine checks whether there are more remaining Port Logins 1608 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1609 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1610 * to issue ELS PLOGIs up to the configured discover threads with the 1611 * @vport (@vport->cfg_discovery_threads). The function also decrement 1612 * the @vport's num_disc_node by 1 if it is not already 0. 1613 **/ 1614 void 1615 lpfc_more_plogi(struct lpfc_vport *vport) 1616 { 1617 if (vport->num_disc_nodes) 1618 vport->num_disc_nodes--; 1619 1620 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1621 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1622 "0232 Continue discovery with %d PLOGIs to go " 1623 "Data: x%x x%lx x%x\n", 1624 vport->num_disc_nodes, 1625 atomic_read(&vport->fc_plogi_cnt), 1626 vport->fc_flag, vport->port_state); 1627 /* Check to see if there are more PLOGIs to be sent */ 1628 if (test_bit(FC_NLP_MORE, &vport->fc_flag)) 1629 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1630 lpfc_els_disc_plogi(vport); 1631 1632 return; 1633 } 1634 1635 /** 1636 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1637 * @phba: pointer to lpfc hba data structure. 1638 * @prsp: pointer to response IOCB payload. 1639 * @ndlp: pointer to a node-list data structure. 1640 * 1641 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1642 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1643 * The following cases are considered N_Port confirmed: 1644 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1645 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1646 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1647 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1648 * 1) if there is a node on vport list other than the @ndlp with the same 1649 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1650 * on that node to release the RPI associated with the node; 2) if there is 1651 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1652 * into, a new node shall be allocated (or activated). In either case, the 1653 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1654 * be released and the new_ndlp shall be put on to the vport node list and 1655 * its pointer returned as the confirmed node. 1656 * 1657 * Note that before the @ndlp got "released", the keepDID from not-matching 1658 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1659 * of the @ndlp. This is because the release of @ndlp is actually to put it 1660 * into an inactive state on the vport node list and the vport node list 1661 * management algorithm does not allow two node with a same DID. 1662 * 1663 * Return code 1664 * pointer to the PLOGI N_Port @ndlp 1665 **/ 1666 static struct lpfc_nodelist * 1667 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1668 struct lpfc_nodelist *ndlp) 1669 { 1670 struct lpfc_vport *vport = ndlp->vport; 1671 struct lpfc_nodelist *new_ndlp; 1672 struct serv_parm *sp; 1673 uint8_t name[sizeof(struct lpfc_name)]; 1674 uint32_t keepDID = 0; 1675 int rc; 1676 unsigned long keep_nlp_flag = 0, keep_new_nlp_flag = 0; 1677 uint16_t keep_nlp_state; 1678 u32 keep_nlp_fc4_type = 0; 1679 struct lpfc_nvme_rport *keep_nrport = NULL; 1680 unsigned long *active_rrqs_xri_bitmap = NULL; 1681 1682 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1683 memset(name, 0, sizeof(struct lpfc_name)); 1684 1685 /* Now we find out if the NPort we are logging into, matches the WWPN 1686 * we have for that ndlp. If not, we have some work to do. 1687 */ 1688 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1689 1690 /* return immediately if the WWPN matches ndlp */ 1691 if (new_ndlp == ndlp) 1692 return ndlp; 1693 1694 if (phba->sli_rev == LPFC_SLI_REV4) { 1695 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1696 GFP_KERNEL); 1697 if (active_rrqs_xri_bitmap) 1698 memset(active_rrqs_xri_bitmap, 0, 1699 phba->cfg_rrq_xri_bitmap_sz); 1700 } 1701 1702 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1703 "3178 PLOGI confirm: ndlp x%x x%lx x%x: " 1704 "new_ndlp x%x x%lx x%x\n", 1705 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1706 (new_ndlp ? new_ndlp->nlp_DID : 0), 1707 (new_ndlp ? new_ndlp->nlp_flag : 0), 1708 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1709 1710 if (!new_ndlp) { 1711 rc = memcmp(&ndlp->nlp_portname, name, 1712 sizeof(struct lpfc_name)); 1713 if (!rc) { 1714 if (active_rrqs_xri_bitmap) 1715 mempool_free(active_rrqs_xri_bitmap, 1716 phba->active_rrq_pool); 1717 return ndlp; 1718 } 1719 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); 1720 if (!new_ndlp) { 1721 if (active_rrqs_xri_bitmap) 1722 mempool_free(active_rrqs_xri_bitmap, 1723 phba->active_rrq_pool); 1724 return ndlp; 1725 } 1726 } else { 1727 if (phba->sli_rev == LPFC_SLI_REV4 && 1728 active_rrqs_xri_bitmap) 1729 memcpy(active_rrqs_xri_bitmap, 1730 new_ndlp->active_rrqs_xri_bitmap, 1731 phba->cfg_rrq_xri_bitmap_sz); 1732 1733 /* 1734 * Unregister from backend if not done yet. Could have been 1735 * skipped due to ADISC 1736 */ 1737 lpfc_nlp_unreg_node(vport, new_ndlp); 1738 } 1739 1740 keepDID = new_ndlp->nlp_DID; 1741 1742 /* At this point in this routine, we know new_ndlp will be 1743 * returned. however, any previous GID_FTs that were done 1744 * would have updated nlp_fc4_type in ndlp, so we must ensure 1745 * new_ndlp has the right value. 1746 */ 1747 if (test_bit(FC_FABRIC, &vport->fc_flag)) { 1748 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1749 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1750 } 1751 1752 lpfc_unreg_rpi(vport, new_ndlp); 1753 new_ndlp->nlp_DID = ndlp->nlp_DID; 1754 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1755 if (phba->sli_rev == LPFC_SLI_REV4) 1756 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1757 ndlp->active_rrqs_xri_bitmap, 1758 phba->cfg_rrq_xri_bitmap_sz); 1759 1760 /* Lock both ndlps */ 1761 spin_lock_irq(&ndlp->lock); 1762 spin_lock_irq(&new_ndlp->lock); 1763 keep_new_nlp_flag = new_ndlp->nlp_flag; 1764 keep_nlp_flag = ndlp->nlp_flag; 1765 new_ndlp->nlp_flag = ndlp->nlp_flag; 1766 1767 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1768 if (test_bit(NLP_UNREG_INP, &keep_new_nlp_flag)) 1769 set_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); 1770 else 1771 clear_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); 1772 1773 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1774 if (test_bit(NLP_RPI_REGISTERED, &keep_new_nlp_flag)) 1775 set_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); 1776 else 1777 clear_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); 1778 1779 /* 1780 * Retain the DROPPED flag. This will take care of the init 1781 * refcount when affecting the state change 1782 */ 1783 if (test_bit(NLP_DROPPED, &keep_new_nlp_flag)) 1784 set_bit(NLP_DROPPED, &new_ndlp->nlp_flag); 1785 else 1786 clear_bit(NLP_DROPPED, &new_ndlp->nlp_flag); 1787 1788 ndlp->nlp_flag = keep_new_nlp_flag; 1789 1790 /* if ndlp had NLP_UNREG_INP set, keep it */ 1791 if (test_bit(NLP_UNREG_INP, &keep_nlp_flag)) 1792 set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); 1793 else 1794 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); 1795 1796 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1797 if (test_bit(NLP_RPI_REGISTERED, &keep_nlp_flag)) 1798 set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); 1799 else 1800 clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); 1801 1802 /* 1803 * Retain the DROPPED flag. This will take care of the init 1804 * refcount when affecting the state change 1805 */ 1806 if (test_bit(NLP_DROPPED, &keep_nlp_flag)) 1807 set_bit(NLP_DROPPED, &ndlp->nlp_flag); 1808 else 1809 clear_bit(NLP_DROPPED, &ndlp->nlp_flag); 1810 1811 spin_unlock_irq(&new_ndlp->lock); 1812 spin_unlock_irq(&ndlp->lock); 1813 1814 /* Set nlp_states accordingly */ 1815 keep_nlp_state = new_ndlp->nlp_state; 1816 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1817 1818 /* interchange the nvme remoteport structs */ 1819 keep_nrport = new_ndlp->nrport; 1820 new_ndlp->nrport = ndlp->nrport; 1821 1822 /* Move this back to NPR state */ 1823 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1824 /* The ndlp doesn't have a portname yet, but does have an 1825 * NPort ID. The new_ndlp portname matches the Rport's 1826 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1827 */ 1828 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1829 "3179 PLOGI confirm NEW: %x %x\n", 1830 new_ndlp->nlp_DID, keepDID); 1831 1832 /* Two ndlps cannot have the same did on the nodelist. 1833 * The KeepDID and keep_nlp_fc4_type need to be swapped 1834 * because ndlp is inflight with no WWPN. 1835 */ 1836 ndlp->nlp_DID = keepDID; 1837 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1838 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1839 if (phba->sli_rev == LPFC_SLI_REV4 && 1840 active_rrqs_xri_bitmap) 1841 memcpy(ndlp->active_rrqs_xri_bitmap, 1842 active_rrqs_xri_bitmap, 1843 phba->cfg_rrq_xri_bitmap_sz); 1844 1845 } else { 1846 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1847 "3180 PLOGI confirm SWAP: %x %x\n", 1848 new_ndlp->nlp_DID, keepDID); 1849 1850 lpfc_unreg_rpi(vport, ndlp); 1851 1852 /* The ndlp and new_ndlp both have WWPNs but are swapping 1853 * NPort Ids and attributes. 1854 */ 1855 ndlp->nlp_DID = keepDID; 1856 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1857 1858 if (phba->sli_rev == LPFC_SLI_REV4 && 1859 active_rrqs_xri_bitmap) 1860 memcpy(ndlp->active_rrqs_xri_bitmap, 1861 active_rrqs_xri_bitmap, 1862 phba->cfg_rrq_xri_bitmap_sz); 1863 1864 /* Since we are switching over to the new_ndlp, 1865 * reset the old ndlp state 1866 */ 1867 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1868 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1869 keep_nlp_state = NLP_STE_NPR_NODE; 1870 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1871 ndlp->nrport = keep_nrport; 1872 } 1873 1874 /* 1875 * If ndlp is not associated with any rport we can drop it here else 1876 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1877 */ 1878 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1879 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1880 1881 if (phba->sli_rev == LPFC_SLI_REV4 && 1882 active_rrqs_xri_bitmap) 1883 mempool_free(active_rrqs_xri_bitmap, 1884 phba->active_rrq_pool); 1885 1886 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1887 "3173 PLOGI confirm exit: new_ndlp x%x x%lx x%x\n", 1888 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1889 new_ndlp->nlp_fc4_type); 1890 1891 return new_ndlp; 1892 } 1893 1894 /** 1895 * lpfc_end_rscn - Check and handle more rscn for a vport 1896 * @vport: pointer to a host virtual N_Port data structure. 1897 * 1898 * This routine checks whether more Registration State Change 1899 * Notifications (RSCNs) came in while the discovery state machine was in 1900 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1901 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1902 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1903 * handling the RSCNs. 1904 **/ 1905 void 1906 lpfc_end_rscn(struct lpfc_vport *vport) 1907 { 1908 1909 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 1910 /* 1911 * Check to see if more RSCNs came in while we were 1912 * processing this one. 1913 */ 1914 if (vport->fc_rscn_id_cnt || 1915 test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) 1916 lpfc_els_handle_rscn(vport); 1917 else 1918 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 1919 } 1920 } 1921 1922 /** 1923 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1924 * @phba: pointer to lpfc hba data structure. 1925 * @cmdiocb: pointer to lpfc command iocb data structure. 1926 * @rspiocb: pointer to lpfc response iocb data structure. 1927 * 1928 * This routine will call the clear rrq function to free the rrq and 1929 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1930 * exist then the clear_rrq is still called because the rrq needs to 1931 * be freed. 1932 **/ 1933 1934 static void 1935 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1936 struct lpfc_iocbq *rspiocb) 1937 { 1938 struct lpfc_vport *vport = cmdiocb->vport; 1939 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1940 struct lpfc_node_rrq *rrq; 1941 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1942 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1943 1944 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1945 rrq = cmdiocb->context_un.rrq; 1946 cmdiocb->rsp_iocb = rspiocb; 1947 1948 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1949 "RRQ cmpl: status:x%x/x%x did:x%x", 1950 ulp_status, ulp_word4, 1951 get_job_els_rsp64_did(phba, cmdiocb)); 1952 1953 1954 /* rrq completes to NPort <nlp_DID> */ 1955 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1956 "2880 RRQ completes to DID x%x " 1957 "Data: x%x x%x x%x x%x x%x\n", 1958 ndlp->nlp_DID, ulp_status, ulp_word4, 1959 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1960 1961 if (ulp_status) { 1962 /* Check for retry */ 1963 /* Warn RRQ status Don't print the vport to vport rjts */ 1964 if (ulp_status != IOSTAT_LS_RJT || 1965 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1966 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1967 (phba)->pport->cfg_log_verbose & LOG_ELS) 1968 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 1969 "2881 RRQ DID:%06X Status:" 1970 "x%x/x%x\n", 1971 ndlp->nlp_DID, ulp_status, 1972 ulp_word4); 1973 } 1974 1975 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1976 lpfc_els_free_iocb(phba, cmdiocb); 1977 lpfc_nlp_put(ndlp); 1978 return; 1979 } 1980 /** 1981 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1982 * @phba: pointer to lpfc hba data structure. 1983 * @cmdiocb: pointer to lpfc command iocb data structure. 1984 * @rspiocb: pointer to lpfc response iocb data structure. 1985 * 1986 * This routine is the completion callback function for issuing the Port 1987 * Login (PLOGI) command. For PLOGI completion, there must be an active 1988 * ndlp on the vport node list that matches the remote node ID from the 1989 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1990 * ignored and command IOCB released. The PLOGI response IOCB status is 1991 * checked for error conditions. If there is error status reported, PLOGI 1992 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1993 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1994 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1995 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1996 * there are additional N_Port nodes with the vport that need to perform 1997 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1998 * PLOGIs. 1999 **/ 2000 static void 2001 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2002 struct lpfc_iocbq *rspiocb) 2003 { 2004 struct lpfc_vport *vport = cmdiocb->vport; 2005 IOCB_t *irsp; 2006 struct lpfc_nodelist *ndlp, *free_ndlp; 2007 struct lpfc_dmabuf *prsp; 2008 bool disc; 2009 struct serv_parm *sp = NULL; 2010 u32 ulp_status, ulp_word4, did, iotag; 2011 bool release_node = false; 2012 2013 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2014 cmdiocb->rsp_iocb = rspiocb; 2015 2016 ulp_status = get_job_ulpstatus(phba, rspiocb); 2017 ulp_word4 = get_job_word4(phba, rspiocb); 2018 did = get_job_els_rsp64_did(phba, cmdiocb); 2019 2020 if (phba->sli_rev == LPFC_SLI_REV4) { 2021 iotag = get_wqe_reqtag(cmdiocb); 2022 } else { 2023 irsp = &rspiocb->iocb; 2024 iotag = irsp->ulpIoTag; 2025 } 2026 2027 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2028 "PLOGI cmpl: status:x%x/x%x did:x%x", 2029 ulp_status, ulp_word4, did); 2030 2031 ndlp = lpfc_findnode_did(vport, did); 2032 if (!ndlp) { 2033 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2034 "0136 PLOGI completes to NPort x%x " 2035 "with no ndlp. Data: x%x x%x x%x\n", 2036 did, ulp_status, ulp_word4, iotag); 2037 goto out_freeiocb; 2038 } 2039 2040 /* Since ndlp can be freed in the disc state machine, note if this node 2041 * is being used during discovery. 2042 */ 2043 disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2044 2045 /* PLOGI completes to NPort <nlp_DID> */ 2046 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2047 "0102 PLOGI completes to NPort x%06x " 2048 "IoTag x%x Data: x%x x%x x%x x%x x%x\n", 2049 ndlp->nlp_DID, iotag, 2050 ndlp->nlp_fc4_type, 2051 ulp_status, ulp_word4, 2052 disc, vport->num_disc_nodes); 2053 2054 /* Check to see if link went down during discovery */ 2055 if (lpfc_els_chk_latt(vport)) { 2056 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2057 goto out; 2058 } 2059 2060 if (ulp_status) { 2061 /* Check for retry */ 2062 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2063 /* ELS command is being retried */ 2064 if (disc) 2065 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2066 goto out; 2067 } 2068 /* Warn PLOGI status Don't print the vport to vport rjts */ 2069 if (ulp_status != IOSTAT_LS_RJT || 2070 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2071 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2072 (phba)->pport->cfg_log_verbose & LOG_ELS) 2073 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 2074 "2753 PLOGI DID:%06X " 2075 "Status:x%x/x%x\n", 2076 ndlp->nlp_DID, ulp_status, 2077 ulp_word4); 2078 2079 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2080 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2081 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2082 NLP_EVT_CMPL_PLOGI); 2083 2084 /* If a PLOGI collision occurred, the node needs to continue 2085 * with the reglogin process. 2086 */ 2087 spin_lock_irq(&ndlp->lock); 2088 if ((test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag) || 2089 test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) && 2090 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2091 spin_unlock_irq(&ndlp->lock); 2092 goto out; 2093 } 2094 2095 /* No PLOGI collision and the node is not registered with the 2096 * scsi or nvme transport. It is no longer an active node. Just 2097 * start the device remove process. 2098 */ 2099 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2100 clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2101 if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) 2102 release_node = true; 2103 } 2104 spin_unlock_irq(&ndlp->lock); 2105 2106 if (release_node) 2107 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2108 NLP_EVT_DEVICE_RM); 2109 } else { 2110 /* Good status, call state machine */ 2111 prsp = list_get_first(&cmdiocb->cmd_dmabuf->list, 2112 struct lpfc_dmabuf, list); 2113 if (!prsp) 2114 goto out; 2115 if (!lpfc_is_els_acc_rsp(prsp)) 2116 goto out; 2117 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2118 2119 sp = (struct serv_parm *)((u8 *)prsp->virt + 2120 sizeof(u32)); 2121 2122 ndlp->vmid_support = 0; 2123 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2124 (phba->cfg_vmid_priority_tagging && 2125 sp->cmn.priority_tagging)) { 2126 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2127 "4018 app_hdr_support %d tagging %d DID x%x\n", 2128 sp->cmn.app_hdr_support, 2129 sp->cmn.priority_tagging, 2130 ndlp->nlp_DID); 2131 /* if the dest port supports VMID, mark it in ndlp */ 2132 ndlp->vmid_support = 1; 2133 } 2134 2135 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2136 NLP_EVT_CMPL_PLOGI); 2137 } 2138 2139 if (disc && vport->num_disc_nodes) { 2140 /* Check to see if there are more PLOGIs to be sent */ 2141 lpfc_more_plogi(vport); 2142 2143 if (vport->num_disc_nodes == 0) { 2144 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 2145 2146 lpfc_can_disctmo(vport); 2147 lpfc_end_rscn(vport); 2148 } 2149 } 2150 2151 out: 2152 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2153 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2154 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2155 2156 out_freeiocb: 2157 /* Release the reference on the original I/O request. */ 2158 free_ndlp = cmdiocb->ndlp; 2159 2160 lpfc_els_free_iocb(phba, cmdiocb); 2161 lpfc_nlp_put(free_ndlp); 2162 return; 2163 } 2164 2165 /** 2166 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2167 * @vport: pointer to a host virtual N_Port data structure. 2168 * @did: destination port identifier. 2169 * @retry: number of retries to the command IOCB. 2170 * 2171 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2172 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2173 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2174 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2175 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2176 * 2177 * Note that the ndlp reference count will be incremented by 1 for holding 2178 * the ndlp and the reference to ndlp will be stored into the ndlp field 2179 * of the IOCB for the completion callback function to the PLOGI ELS command. 2180 * 2181 * Return code 2182 * 0 - Successfully issued a plogi for @vport 2183 * 1 - failed to issue a plogi for @vport 2184 **/ 2185 int 2186 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2187 { 2188 struct lpfc_hba *phba = vport->phba; 2189 struct serv_parm *sp; 2190 struct lpfc_nodelist *ndlp; 2191 struct lpfc_iocbq *elsiocb; 2192 uint8_t *pcmd; 2193 uint16_t cmdsize; 2194 int ret; 2195 2196 ndlp = lpfc_findnode_did(vport, did); 2197 if (!ndlp) 2198 return 1; 2199 2200 /* Defer the processing of the issue PLOGI until after the 2201 * outstanding UNREG_RPI mbox command completes, unless we 2202 * are going offline. This logic does not apply for Fabric DIDs 2203 */ 2204 if ((test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || 2205 test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) && 2206 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2207 !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { 2208 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2209 "4110 Issue PLOGI x%x deferred " 2210 "on NPort x%x rpi x%x flg x%lx Data:" 2211 " x%px\n", 2212 ndlp->nlp_defer_did, ndlp->nlp_DID, 2213 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); 2214 2215 /* We can only defer 1st PLOGI */ 2216 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2217 ndlp->nlp_defer_did = did; 2218 return 0; 2219 } 2220 2221 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2222 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2223 ELS_CMD_PLOGI); 2224 if (!elsiocb) 2225 return 1; 2226 2227 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2228 2229 /* For PLOGI request, remainder of payload is service parameters */ 2230 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2231 pcmd += sizeof(uint32_t); 2232 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2233 sp = (struct serv_parm *) pcmd; 2234 2235 /* 2236 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2237 * to device on remote loops work. 2238 */ 2239 if (test_bit(FC_FABRIC, &vport->fc_flag) && 2240 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag)) 2241 sp->cmn.altBbCredit = 1; 2242 2243 if (sp->cmn.fcphLow < FC_PH_4_3) 2244 sp->cmn.fcphLow = FC_PH_4_3; 2245 2246 if (sp->cmn.fcphHigh < FC_PH3) 2247 sp->cmn.fcphHigh = FC_PH3; 2248 2249 sp->cmn.valid_vendor_ver_level = 0; 2250 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2251 sp->cmn.bbRcvSizeMsb &= 0xF; 2252 2253 /* Check if the destination port supports VMID */ 2254 ndlp->vmid_support = 0; 2255 if (vport->vmid_priority_tagging) 2256 sp->cmn.priority_tagging = 1; 2257 else if (phba->cfg_vmid_app_header && 2258 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2259 sp->cmn.app_hdr_support = 1; 2260 2261 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2262 "Issue PLOGI: did:x%x", 2263 did, 0, 0); 2264 2265 /* If our firmware supports this feature, convey that 2266 * information to the target using the vendor specific field. 2267 */ 2268 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2269 sp->cmn.valid_vendor_ver_level = 1; 2270 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2271 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2272 } 2273 2274 phba->fc_stat.elsXmitPLOGI++; 2275 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2276 2277 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2278 "Issue PLOGI: did:x%x refcnt %d", 2279 did, kref_read(&ndlp->kref), 0); 2280 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2281 if (!elsiocb->ndlp) { 2282 lpfc_els_free_iocb(phba, elsiocb); 2283 return 1; 2284 } 2285 2286 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2287 if (ret) { 2288 lpfc_els_free_iocb(phba, elsiocb); 2289 lpfc_nlp_put(ndlp); 2290 return 1; 2291 } 2292 2293 return 0; 2294 } 2295 2296 /** 2297 * lpfc_cmpl_els_prli - Completion callback function for prli 2298 * @phba: pointer to lpfc hba data structure. 2299 * @cmdiocb: pointer to lpfc command iocb data structure. 2300 * @rspiocb: pointer to lpfc response iocb data structure. 2301 * 2302 * This routine is the completion callback function for a Process Login 2303 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2304 * status. If there is error status reported, PRLI retry shall be attempted 2305 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2306 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2307 * ndlp to mark the PRLI completion. 2308 **/ 2309 static void 2310 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2311 struct lpfc_iocbq *rspiocb) 2312 { 2313 struct lpfc_vport *vport = cmdiocb->vport; 2314 struct lpfc_nodelist *ndlp; 2315 char *mode; 2316 u32 ulp_status; 2317 u32 ulp_word4; 2318 bool release_node = false; 2319 2320 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2321 cmdiocb->rsp_iocb = rspiocb; 2322 2323 ndlp = cmdiocb->ndlp; 2324 2325 ulp_status = get_job_ulpstatus(phba, rspiocb); 2326 ulp_word4 = get_job_word4(phba, rspiocb); 2327 2328 clear_bit(NLP_PRLI_SND, &ndlp->nlp_flag); 2329 2330 /* Driver supports multiple FC4 types. Counters matter. */ 2331 spin_lock_irq(&ndlp->lock); 2332 vport->fc_prli_sent--; 2333 ndlp->fc4_prli_sent--; 2334 spin_unlock_irq(&ndlp->lock); 2335 2336 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2337 "PRLI cmpl: status:x%x/x%x did:x%x", 2338 ulp_status, ulp_word4, 2339 ndlp->nlp_DID); 2340 2341 /* PRLI completes to NPort <nlp_DID> */ 2342 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2343 "0103 PRLI completes to NPort x%06x " 2344 "Data: x%x x%x x%x x%x x%x\n", 2345 ndlp->nlp_DID, ulp_status, ulp_word4, 2346 vport->num_disc_nodes, ndlp->fc4_prli_sent, 2347 ndlp->fc4_xpt_flags); 2348 2349 /* Check to see if link went down during discovery */ 2350 if (lpfc_els_chk_latt(vport)) 2351 goto out; 2352 2353 if (ulp_status) { 2354 /* Check for retry */ 2355 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2356 /* ELS command is being retried */ 2357 goto out; 2358 } 2359 2360 /* If we don't send GFT_ID to Fabric, a PRLI error 2361 * could be expected. 2362 */ 2363 if (test_bit(FC_FABRIC, &vport->fc_flag) || 2364 vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) 2365 mode = KERN_WARNING; 2366 else 2367 mode = KERN_INFO; 2368 2369 /* Warn PRLI status */ 2370 lpfc_printf_vlog(vport, mode, LOG_ELS, 2371 "2754 PRLI DID:%06X Status:x%x/x%x, " 2372 "data: x%x x%x x%lx\n", 2373 ndlp->nlp_DID, ulp_status, 2374 ulp_word4, ndlp->nlp_state, 2375 ndlp->fc4_prli_sent, ndlp->nlp_flag); 2376 2377 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2378 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2379 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2380 NLP_EVT_CMPL_PRLI); 2381 2382 /* The following condition catches an inflight transition 2383 * mismatch typically caused by an RSCN. Skip any 2384 * processing to allow recovery. 2385 */ 2386 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 2387 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || 2388 (ndlp->nlp_state == NLP_STE_NPR_NODE && 2389 test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))) { 2390 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 2391 "2784 PRLI cmpl: Allow Node recovery " 2392 "DID x%06x nstate x%x nflag x%lx\n", 2393 ndlp->nlp_DID, ndlp->nlp_state, 2394 ndlp->nlp_flag); 2395 goto out; 2396 } 2397 2398 /* 2399 * For P2P topology, retain the node so that PLOGI can be 2400 * attempted on it again. 2401 */ 2402 if (test_bit(FC_PT2PT, &vport->fc_flag)) 2403 goto out; 2404 2405 /* As long as this node is not registered with the SCSI 2406 * or NVMe transport and no other PRLIs are outstanding, 2407 * it is no longer an active node. Otherwise devloss 2408 * handles the final cleanup. 2409 */ 2410 spin_lock_irq(&ndlp->lock); 2411 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2412 !ndlp->fc4_prli_sent) { 2413 clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2414 if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) 2415 release_node = true; 2416 } 2417 spin_unlock_irq(&ndlp->lock); 2418 2419 if (release_node) 2420 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2421 NLP_EVT_DEVICE_RM); 2422 } else { 2423 /* Good status, call state machine. However, if another 2424 * PRLI is outstanding, don't call the state machine 2425 * because final disposition to Mapped or Unmapped is 2426 * completed there. 2427 */ 2428 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2429 NLP_EVT_CMPL_PRLI); 2430 } 2431 2432 out: 2433 lpfc_els_free_iocb(phba, cmdiocb); 2434 lpfc_nlp_put(ndlp); 2435 return; 2436 } 2437 2438 /** 2439 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2440 * @vport: pointer to a host virtual N_Port data structure. 2441 * @ndlp: pointer to a node-list data structure. 2442 * @retry: number of retries to the command IOCB. 2443 * 2444 * This routine issues a Process Login (PRLI) ELS command for the 2445 * @vport. The PRLI service parameters are set up in the payload of the 2446 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2447 * is put to the IOCB completion callback func field before invoking the 2448 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2449 * 2450 * Note that the ndlp reference count will be incremented by 1 for holding the 2451 * ndlp and the reference to ndlp will be stored into the ndlp field of 2452 * the IOCB for the completion callback function to the PRLI ELS command. 2453 * 2454 * Return code 2455 * 0 - successfully issued prli iocb command for @vport 2456 * 1 - failed to issue prli iocb command for @vport 2457 **/ 2458 int 2459 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2460 uint8_t retry) 2461 { 2462 int rc = 0; 2463 struct lpfc_hba *phba = vport->phba; 2464 PRLI *npr; 2465 struct lpfc_nvme_prli *npr_nvme; 2466 struct lpfc_iocbq *elsiocb; 2467 uint8_t *pcmd; 2468 uint16_t cmdsize; 2469 u32 local_nlp_type, elscmd; 2470 2471 /* 2472 * If we are in RSCN mode, the FC4 types supported from a 2473 * previous GFT_ID command may not be accurate. So, if we 2474 * are a NVME Initiator, always look for the possibility of 2475 * the remote NPort beng a NVME Target. 2476 */ 2477 if (phba->sli_rev == LPFC_SLI_REV4 && 2478 test_bit(FC_RSCN_MODE, &vport->fc_flag) && 2479 vport->nvmei_support) 2480 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2481 local_nlp_type = ndlp->nlp_fc4_type; 2482 2483 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2484 * fields here before any of them can complete. 2485 */ 2486 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2487 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2488 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2489 clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); 2490 clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2491 ndlp->nvme_fb_size = 0; 2492 2493 send_next_prli: 2494 if (local_nlp_type & NLP_FC4_FCP) { 2495 /* Payload is 4 + 16 = 20 x14 bytes. */ 2496 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2497 elscmd = ELS_CMD_PRLI; 2498 } else if (local_nlp_type & NLP_FC4_NVME) { 2499 /* Payload is 4 + 20 = 24 x18 bytes. */ 2500 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2501 elscmd = ELS_CMD_NVMEPRLI; 2502 } else { 2503 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2504 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2505 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2506 return 1; 2507 } 2508 2509 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2510 * FC4 type, implicitly LOGO. 2511 */ 2512 if (phba->sli_rev == LPFC_SLI_REV3 && 2513 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2514 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2515 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2516 ndlp->nlp_type); 2517 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2518 return 1; 2519 } 2520 2521 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2522 ndlp->nlp_DID, elscmd); 2523 if (!elsiocb) 2524 return 1; 2525 2526 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2527 2528 /* For PRLI request, remainder of payload is service parameters */ 2529 memset(pcmd, 0, cmdsize); 2530 2531 if (local_nlp_type & NLP_FC4_FCP) { 2532 /* Remainder of payload is FCP PRLI parameter page. 2533 * Note: this data structure is defined as 2534 * BE/LE in the structure definition so no 2535 * byte swap call is made. 2536 */ 2537 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2538 pcmd += sizeof(uint32_t); 2539 npr = (PRLI *)pcmd; 2540 2541 /* 2542 * If our firmware version is 3.20 or later, 2543 * set the following bits for FC-TAPE support. 2544 */ 2545 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2546 npr->ConfmComplAllowed = 1; 2547 npr->Retry = 1; 2548 npr->TaskRetryIdReq = 1; 2549 } 2550 npr->estabImagePair = 1; 2551 npr->readXferRdyDis = 1; 2552 if (vport->cfg_first_burst_size) 2553 npr->writeXferRdyDis = 1; 2554 2555 /* For FCP support */ 2556 npr->prliType = PRLI_FCP_TYPE; 2557 npr->initiatorFunc = 1; 2558 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2559 2560 /* Remove FCP type - processed. */ 2561 local_nlp_type &= ~NLP_FC4_FCP; 2562 } else if (local_nlp_type & NLP_FC4_NVME) { 2563 /* Remainder of payload is NVME PRLI parameter page. 2564 * This data structure is the newer definition that 2565 * uses bf macros so a byte swap is required. 2566 */ 2567 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2568 pcmd += sizeof(uint32_t); 2569 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2570 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2571 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2572 if (phba->nsler) { 2573 bf_set(prli_nsler, npr_nvme, 1); 2574 bf_set(prli_conf, npr_nvme, 1); 2575 } 2576 2577 /* Only initiators request first burst. */ 2578 if ((phba->cfg_nvme_enable_fb) && 2579 !phba->nvmet_support) 2580 bf_set(prli_fba, npr_nvme, 1); 2581 2582 if (phba->nvmet_support) { 2583 bf_set(prli_tgt, npr_nvme, 1); 2584 bf_set(prli_disc, npr_nvme, 1); 2585 } else { 2586 bf_set(prli_init, npr_nvme, 1); 2587 bf_set(prli_conf, npr_nvme, 1); 2588 } 2589 2590 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2591 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2592 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2593 2594 /* Remove NVME type - processed. */ 2595 local_nlp_type &= ~NLP_FC4_NVME; 2596 } 2597 2598 phba->fc_stat.elsXmitPRLI++; 2599 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2600 2601 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2602 "Issue PRLI: did:x%x refcnt %d", 2603 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2604 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2605 if (!elsiocb->ndlp) { 2606 lpfc_els_free_iocb(phba, elsiocb); 2607 return 1; 2608 } 2609 2610 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2611 if (rc == IOCB_ERROR) { 2612 lpfc_els_free_iocb(phba, elsiocb); 2613 lpfc_nlp_put(ndlp); 2614 return 1; 2615 } 2616 2617 /* The vport counters are used for lpfc_scan_finished, but 2618 * the ndlp is used to track outstanding PRLIs for different 2619 * FC4 types. 2620 */ 2621 set_bit(NLP_PRLI_SND, &ndlp->nlp_flag); 2622 spin_lock_irq(&ndlp->lock); 2623 vport->fc_prli_sent++; 2624 ndlp->fc4_prli_sent++; 2625 spin_unlock_irq(&ndlp->lock); 2626 2627 /* The driver supports 2 FC4 types. Make sure 2628 * a PRLI is issued for all types before exiting. 2629 */ 2630 if (phba->sli_rev == LPFC_SLI_REV4 && 2631 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2632 goto send_next_prli; 2633 else 2634 return 0; 2635 } 2636 2637 /** 2638 * lpfc_rscn_disc - Perform rscn discovery for a vport 2639 * @vport: pointer to a host virtual N_Port data structure. 2640 * 2641 * This routine performs Registration State Change Notification (RSCN) 2642 * discovery for a @vport. If the @vport's node port recovery count is not 2643 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2644 * the nodes that need recovery. If none of the PLOGI were needed through 2645 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2646 * invoked to check and handle possible more RSCN came in during the period 2647 * of processing the current ones. 2648 **/ 2649 static void 2650 lpfc_rscn_disc(struct lpfc_vport *vport) 2651 { 2652 lpfc_can_disctmo(vport); 2653 2654 /* RSCN discovery */ 2655 /* go thru NPR nodes and issue ELS PLOGIs */ 2656 if (atomic_read(&vport->fc_npr_cnt)) 2657 if (lpfc_els_disc_plogi(vport)) 2658 return; 2659 2660 lpfc_end_rscn(vport); 2661 } 2662 2663 /** 2664 * lpfc_adisc_done - Complete the adisc phase of discovery 2665 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2666 * 2667 * This function is called when the final ADISC is completed during discovery. 2668 * This function handles clearing link attention or issuing reg_vpi depending 2669 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2670 * discovery. 2671 * This function is called with no locks held. 2672 **/ 2673 static void 2674 lpfc_adisc_done(struct lpfc_vport *vport) 2675 { 2676 struct lpfc_hba *phba = vport->phba; 2677 2678 /* 2679 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2680 * and continue discovery. 2681 */ 2682 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2683 !test_bit(FC_RSCN_MODE, &vport->fc_flag) && 2684 (phba->sli_rev < LPFC_SLI_REV4)) { 2685 2686 /* 2687 * If link is down, clear_la and reg_vpi will be done after 2688 * flogi following a link up event 2689 */ 2690 if (!lpfc_is_link_up(phba)) 2691 return; 2692 2693 /* The ADISCs are complete. Doesn't matter if they 2694 * succeeded or failed because the ADISC completion 2695 * routine guarantees to call the state machine and 2696 * the RPI is either unregistered (failed ADISC response) 2697 * or the RPI is still valid and the node is marked 2698 * mapped for a target. The exchanges should be in the 2699 * correct state. This code is specific to SLI3. 2700 */ 2701 lpfc_issue_clear_la(phba, vport); 2702 lpfc_issue_reg_vpi(phba, vport); 2703 return; 2704 } 2705 /* 2706 * For SLI2, we need to set port_state to READY 2707 * and continue discovery. 2708 */ 2709 if (vport->port_state < LPFC_VPORT_READY) { 2710 /* If we get here, there is nothing to ADISC */ 2711 lpfc_issue_clear_la(phba, vport); 2712 if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) { 2713 vport->num_disc_nodes = 0; 2714 /* go thru NPR list, issue ELS PLOGIs */ 2715 if (atomic_read(&vport->fc_npr_cnt)) 2716 lpfc_els_disc_plogi(vport); 2717 if (!vport->num_disc_nodes) { 2718 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 2719 lpfc_can_disctmo(vport); 2720 lpfc_end_rscn(vport); 2721 } 2722 } 2723 vport->port_state = LPFC_VPORT_READY; 2724 } else 2725 lpfc_rscn_disc(vport); 2726 } 2727 2728 /** 2729 * lpfc_more_adisc - Issue more adisc as needed 2730 * @vport: pointer to a host virtual N_Port data structure. 2731 * 2732 * This routine determines whether there are more ndlps on a @vport 2733 * node list need to have Address Discover (ADISC) issued. If so, it will 2734 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2735 * remaining nodes which need to have ADISC sent. 2736 **/ 2737 void 2738 lpfc_more_adisc(struct lpfc_vport *vport) 2739 { 2740 if (vport->num_disc_nodes) 2741 vport->num_disc_nodes--; 2742 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2743 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2744 "0210 Continue discovery with %d ADISCs to go " 2745 "Data: x%x x%lx x%x\n", 2746 vport->num_disc_nodes, 2747 atomic_read(&vport->fc_adisc_cnt), 2748 vport->fc_flag, vport->port_state); 2749 /* Check to see if there are more ADISCs to be sent */ 2750 if (test_bit(FC_NLP_MORE, &vport->fc_flag)) { 2751 lpfc_set_disctmo(vport); 2752 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2753 lpfc_els_disc_adisc(vport); 2754 } 2755 if (!vport->num_disc_nodes) 2756 lpfc_adisc_done(vport); 2757 return; 2758 } 2759 2760 /** 2761 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2762 * @phba: pointer to lpfc hba data structure. 2763 * @cmdiocb: pointer to lpfc command iocb data structure. 2764 * @rspiocb: pointer to lpfc response iocb data structure. 2765 * 2766 * This routine is the completion function for issuing the Address Discover 2767 * (ADISC) command. It first checks to see whether link went down during 2768 * the discovery process. If so, the node will be marked as node port 2769 * recovery for issuing discover IOCB by the link attention handler and 2770 * exit. Otherwise, the response status is checked. If error was reported 2771 * in the response status, the ADISC command shall be retried by invoking 2772 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2773 * the response status, the state machine is invoked to set transition 2774 * with respect to NLP_EVT_CMPL_ADISC event. 2775 **/ 2776 static void 2777 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2778 struct lpfc_iocbq *rspiocb) 2779 { 2780 struct lpfc_vport *vport = cmdiocb->vport; 2781 IOCB_t *irsp; 2782 struct lpfc_nodelist *ndlp; 2783 bool disc; 2784 u32 ulp_status, ulp_word4, tmo, iotag; 2785 bool release_node = false; 2786 2787 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2788 cmdiocb->rsp_iocb = rspiocb; 2789 2790 ndlp = cmdiocb->ndlp; 2791 2792 ulp_status = get_job_ulpstatus(phba, rspiocb); 2793 ulp_word4 = get_job_word4(phba, rspiocb); 2794 2795 if (phba->sli_rev == LPFC_SLI_REV4) { 2796 tmo = get_wqe_tmo(cmdiocb); 2797 iotag = get_wqe_reqtag(cmdiocb); 2798 } else { 2799 irsp = &rspiocb->iocb; 2800 tmo = irsp->ulpTimeout; 2801 iotag = irsp->ulpIoTag; 2802 } 2803 2804 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2805 "ADISC cmpl: status:x%x/x%x did:x%x", 2806 ulp_status, ulp_word4, 2807 ndlp->nlp_DID); 2808 2809 /* Since ndlp can be freed in the disc state machine, note if this node 2810 * is being used during discovery. 2811 */ 2812 disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2813 clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); 2814 /* ADISC completes to NPort <nlp_DID> */ 2815 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2816 "0104 ADISC completes to NPort x%x " 2817 "IoTag x%x Data: x%x x%x x%x x%x x%x\n", 2818 ndlp->nlp_DID, iotag, 2819 ulp_status, ulp_word4, 2820 tmo, disc, vport->num_disc_nodes); 2821 2822 /* Check to see if link went down during discovery */ 2823 if (lpfc_els_chk_latt(vport)) { 2824 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2825 goto out; 2826 } 2827 2828 if (ulp_status) { 2829 /* Check for retry */ 2830 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2831 /* ELS command is being retried */ 2832 if (disc) { 2833 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2834 lpfc_set_disctmo(vport); 2835 } 2836 goto out; 2837 } 2838 /* Warn ADISC status */ 2839 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 2840 "2755 ADISC DID:%06X Status:x%x/x%x\n", 2841 ndlp->nlp_DID, ulp_status, 2842 ulp_word4); 2843 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2844 NLP_EVT_CMPL_ADISC); 2845 2846 /* As long as this node is not registered with the SCSI or NVMe 2847 * transport, it is no longer an active node. Otherwise 2848 * devloss handles the final cleanup. 2849 */ 2850 spin_lock_irq(&ndlp->lock); 2851 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2852 clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 2853 if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) 2854 release_node = true; 2855 } 2856 spin_unlock_irq(&ndlp->lock); 2857 2858 if (release_node) 2859 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2860 NLP_EVT_DEVICE_RM); 2861 } else 2862 /* Good status, call state machine */ 2863 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2864 NLP_EVT_CMPL_ADISC); 2865 2866 /* Check to see if there are more ADISCs to be sent */ 2867 if (disc && vport->num_disc_nodes) 2868 lpfc_more_adisc(vport); 2869 out: 2870 lpfc_els_free_iocb(phba, cmdiocb); 2871 lpfc_nlp_put(ndlp); 2872 return; 2873 } 2874 2875 /** 2876 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2877 * @vport: pointer to a virtual N_Port data structure. 2878 * @ndlp: pointer to a node-list data structure. 2879 * @retry: number of retries to the command IOCB. 2880 * 2881 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2882 * @vport. It prepares the payload of the ADISC ELS command, updates the 2883 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2884 * to issue the ADISC ELS command. 2885 * 2886 * Note that the ndlp reference count will be incremented by 1 for holding the 2887 * ndlp and the reference to ndlp will be stored into the ndlp field of 2888 * the IOCB for the completion callback function to the ADISC ELS command. 2889 * 2890 * Return code 2891 * 0 - successfully issued adisc 2892 * 1 - failed to issue adisc 2893 **/ 2894 int 2895 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2896 uint8_t retry) 2897 { 2898 int rc = 0; 2899 struct lpfc_hba *phba = vport->phba; 2900 ADISC *ap; 2901 struct lpfc_iocbq *elsiocb; 2902 uint8_t *pcmd; 2903 uint16_t cmdsize; 2904 2905 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2906 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2907 ndlp->nlp_DID, ELS_CMD_ADISC); 2908 if (!elsiocb) 2909 return 1; 2910 2911 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2912 2913 /* For ADISC request, remainder of payload is service parameters */ 2914 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2915 pcmd += sizeof(uint32_t); 2916 2917 /* Fill in ADISC payload */ 2918 ap = (ADISC *) pcmd; 2919 ap->hardAL_PA = phba->fc_pref_ALPA; 2920 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2921 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2922 ap->DID = be32_to_cpu(vport->fc_myDID); 2923 2924 phba->fc_stat.elsXmitADISC++; 2925 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2926 set_bit(NLP_ADISC_SND, &ndlp->nlp_flag); 2927 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2928 if (!elsiocb->ndlp) { 2929 lpfc_els_free_iocb(phba, elsiocb); 2930 goto err; 2931 } 2932 2933 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2934 "Issue ADISC: did:x%x refcnt %d", 2935 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2936 2937 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2938 if (rc == IOCB_ERROR) { 2939 lpfc_els_free_iocb(phba, elsiocb); 2940 lpfc_nlp_put(ndlp); 2941 goto err; 2942 } 2943 2944 return 0; 2945 2946 err: 2947 clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); 2948 return 1; 2949 } 2950 2951 /** 2952 * lpfc_cmpl_els_logo - Completion callback function for logo 2953 * @phba: pointer to lpfc hba data structure. 2954 * @cmdiocb: pointer to lpfc command iocb data structure. 2955 * @rspiocb: pointer to lpfc response iocb data structure. 2956 * 2957 * This routine is the completion function for issuing the ELS Logout (LOGO) 2958 * command. If no error status was reported from the LOGO response, the 2959 * state machine of the associated ndlp shall be invoked for transition with 2960 * respect to NLP_EVT_CMPL_LOGO event. 2961 **/ 2962 static void 2963 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2964 struct lpfc_iocbq *rspiocb) 2965 { 2966 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2967 struct lpfc_vport *vport = ndlp->vport; 2968 IOCB_t *irsp; 2969 uint32_t skip_recovery = 0; 2970 int wake_up_waiter = 0; 2971 u32 ulp_status; 2972 u32 ulp_word4; 2973 u32 tmo, iotag; 2974 2975 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2976 cmdiocb->rsp_iocb = rspiocb; 2977 2978 ulp_status = get_job_ulpstatus(phba, rspiocb); 2979 ulp_word4 = get_job_word4(phba, rspiocb); 2980 2981 if (phba->sli_rev == LPFC_SLI_REV4) { 2982 tmo = get_wqe_tmo(cmdiocb); 2983 iotag = get_wqe_reqtag(cmdiocb); 2984 } else { 2985 irsp = &rspiocb->iocb; 2986 tmo = irsp->ulpTimeout; 2987 iotag = irsp->ulpIoTag; 2988 } 2989 2990 clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); 2991 if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) 2992 wake_up_waiter = 1; 2993 2994 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2995 "LOGO cmpl: status:x%x/x%x did:x%x", 2996 ulp_status, ulp_word4, 2997 ndlp->nlp_DID); 2998 2999 /* LOGO completes to NPort <nlp_DID> */ 3000 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3001 "0105 LOGO completes to NPort x%x " 3002 "IoTag x%x refcnt %d nflags x%lx xflags x%x " 3003 "Data: x%x x%x x%x x%x\n", 3004 ndlp->nlp_DID, iotag, 3005 kref_read(&ndlp->kref), ndlp->nlp_flag, 3006 ndlp->fc4_xpt_flags, ulp_status, ulp_word4, 3007 tmo, vport->num_disc_nodes); 3008 3009 if (lpfc_els_chk_latt(vport)) { 3010 skip_recovery = 1; 3011 goto out; 3012 } 3013 3014 /* The LOGO will not be retried on failure. A LOGO was 3015 * issued to the remote rport and a ACC or RJT or no Answer are 3016 * all acceptable. Note the failure and move forward with 3017 * discovery. The PLOGI will retry. 3018 */ 3019 if (ulp_status) { 3020 /* Warn LOGO status */ 3021 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 3022 "2756 LOGO, No Retry DID:%06X " 3023 "Status:x%x/x%x\n", 3024 ndlp->nlp_DID, ulp_status, 3025 ulp_word4); 3026 3027 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 3028 skip_recovery = 1; 3029 } 3030 3031 /* Call state machine. This will unregister the rpi if needed. */ 3032 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3033 3034 out: 3035 /* At this point, the LOGO processing is complete. NOTE: For a 3036 * pt2pt topology, we are assuming the NPortID will only change 3037 * on link up processing. For a LOGO / PLOGI initiated by the 3038 * Initiator, we are assuming the NPortID is not going to change. 3039 */ 3040 3041 if (wake_up_waiter && ndlp->logo_waitq) 3042 wake_up(ndlp->logo_waitq); 3043 /* 3044 * If the node is a target, the handling attempts to recover the port. 3045 * For any other port type, the rpi is unregistered as an implicit 3046 * LOGO. 3047 */ 3048 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3049 skip_recovery == 0) { 3050 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3051 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 3052 3053 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3054 "3187 LOGO completes to NPort x%x: Start " 3055 "Recovery Data: x%x x%x x%x x%x\n", 3056 ndlp->nlp_DID, ulp_status, 3057 ulp_word4, tmo, 3058 vport->num_disc_nodes); 3059 3060 lpfc_els_free_iocb(phba, cmdiocb); 3061 lpfc_nlp_put(ndlp); 3062 3063 lpfc_disc_start(vport); 3064 return; 3065 } 3066 3067 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3068 * driver sends a LOGO to the rport to cleanup. For fabric and 3069 * initiator ports cleanup the node as long as it the node is not 3070 * register with the transport. 3071 */ 3072 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3073 clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 3074 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3075 NLP_EVT_DEVICE_RM); 3076 } 3077 3078 /* Driver is done with the I/O. */ 3079 lpfc_els_free_iocb(phba, cmdiocb); 3080 lpfc_nlp_put(ndlp); 3081 } 3082 3083 /** 3084 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3085 * @vport: pointer to a virtual N_Port data structure. 3086 * @ndlp: pointer to a node-list data structure. 3087 * @retry: number of retries to the command IOCB. 3088 * 3089 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3090 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3091 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3092 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3093 * 3094 * Note that the ndlp reference count will be incremented by 1 for holding the 3095 * ndlp and the reference to ndlp will be stored into the ndlp field of 3096 * the IOCB for the completion callback function to the LOGO ELS command. 3097 * 3098 * Callers of this routine are expected to unregister the RPI first 3099 * 3100 * Return code 3101 * 0 - successfully issued logo 3102 * 1 - failed to issue logo 3103 **/ 3104 int 3105 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3106 uint8_t retry) 3107 { 3108 struct lpfc_hba *phba = vport->phba; 3109 struct lpfc_iocbq *elsiocb; 3110 uint8_t *pcmd; 3111 uint16_t cmdsize; 3112 int rc; 3113 3114 if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag)) 3115 return 0; 3116 3117 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3118 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3119 ndlp->nlp_DID, ELS_CMD_LOGO); 3120 if (!elsiocb) 3121 return 1; 3122 3123 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3124 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3125 pcmd += sizeof(uint32_t); 3126 3127 /* Fill in LOGO payload */ 3128 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3129 pcmd += sizeof(uint32_t); 3130 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3131 3132 phba->fc_stat.elsXmitLOGO++; 3133 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3134 set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); 3135 clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); 3136 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3137 if (!elsiocb->ndlp) { 3138 lpfc_els_free_iocb(phba, elsiocb); 3139 goto err; 3140 } 3141 3142 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3143 "Issue LOGO: did:x%x refcnt %d", 3144 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3145 3146 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3147 if (rc == IOCB_ERROR) { 3148 lpfc_els_free_iocb(phba, elsiocb); 3149 lpfc_nlp_put(ndlp); 3150 goto err; 3151 } 3152 3153 spin_lock_irq(&ndlp->lock); 3154 ndlp->nlp_prev_state = ndlp->nlp_state; 3155 spin_unlock_irq(&ndlp->lock); 3156 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3157 return 0; 3158 3159 err: 3160 clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); 3161 return 1; 3162 } 3163 3164 /** 3165 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3166 * @phba: pointer to lpfc hba data structure. 3167 * @cmdiocb: pointer to lpfc command iocb data structure. 3168 * @rspiocb: pointer to lpfc response iocb data structure. 3169 * 3170 * This routine is a generic completion callback function for ELS commands. 3171 * Specifically, it is the callback function which does not need to perform 3172 * any command specific operations. It is currently used by the ELS command 3173 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3174 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3175 * Other than certain debug loggings, this callback function simply invokes the 3176 * lpfc_els_chk_latt() routine to check whether link went down during the 3177 * discovery process. 3178 **/ 3179 static void 3180 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3181 struct lpfc_iocbq *rspiocb) 3182 { 3183 struct lpfc_vport *vport = cmdiocb->vport; 3184 struct lpfc_nodelist *free_ndlp; 3185 IOCB_t *irsp; 3186 u32 ulp_status, ulp_word4, tmo, did, iotag; 3187 3188 ulp_status = get_job_ulpstatus(phba, rspiocb); 3189 ulp_word4 = get_job_word4(phba, rspiocb); 3190 did = get_job_els_rsp64_did(phba, cmdiocb); 3191 3192 if (phba->sli_rev == LPFC_SLI_REV4) { 3193 tmo = get_wqe_tmo(cmdiocb); 3194 iotag = get_wqe_reqtag(cmdiocb); 3195 } else { 3196 irsp = &rspiocb->iocb; 3197 tmo = irsp->ulpTimeout; 3198 iotag = irsp->ulpIoTag; 3199 } 3200 3201 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3202 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3203 ulp_status, ulp_word4, did); 3204 3205 /* ELS cmd tag <ulpIoTag> completes */ 3206 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3207 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3208 iotag, ulp_status, ulp_word4, tmo); 3209 3210 /* Check to see if link went down during discovery */ 3211 lpfc_els_chk_latt(vport); 3212 3213 free_ndlp = cmdiocb->ndlp; 3214 3215 lpfc_els_free_iocb(phba, cmdiocb); 3216 lpfc_nlp_put(free_ndlp); 3217 } 3218 3219 /** 3220 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3221 * @vport: pointer to lpfc_vport data structure. 3222 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3223 * 3224 * This routine registers the rpi assigned to the fabric controller 3225 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3226 * state triggering a registration with the SCSI transport. 3227 * 3228 * This routine is single out because the fabric controller node 3229 * does not receive a PLOGI. This routine is consumed by the 3230 * SCR and RDF ELS commands. Callers are expected to qualify 3231 * with SLI4 first. 3232 **/ 3233 static int 3234 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3235 { 3236 int rc; 3237 struct lpfc_hba *phba = vport->phba; 3238 struct lpfc_nodelist *ns_ndlp; 3239 LPFC_MBOXQ_t *mbox; 3240 3241 if (test_bit(NLP_RPI_REGISTERED, &fc_ndlp->nlp_flag)) 3242 return 0; 3243 3244 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3245 if (!ns_ndlp) 3246 return -ENODEV; 3247 3248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3249 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3250 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3251 ns_ndlp->nlp_state); 3252 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3253 return -ENODEV; 3254 3255 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3256 if (!mbox) { 3257 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3258 "0936 %s: no memory for reg_login " 3259 "Data: x%x x%x x%lx x%x\n", __func__, 3260 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3261 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3262 return -ENOMEM; 3263 } 3264 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3265 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3266 if (rc) { 3267 rc = -EACCES; 3268 goto out; 3269 } 3270 3271 set_bit(NLP_REG_LOGIN_SEND, &fc_ndlp->nlp_flag); 3272 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3273 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3274 if (!mbox->ctx_ndlp) { 3275 rc = -ENOMEM; 3276 goto out; 3277 } 3278 3279 mbox->vport = vport; 3280 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3281 if (rc == MBX_NOT_FINISHED) { 3282 rc = -ENODEV; 3283 lpfc_nlp_put(fc_ndlp); 3284 goto out; 3285 } 3286 /* Success path. Exit. */ 3287 lpfc_nlp_set_state(vport, fc_ndlp, 3288 NLP_STE_REG_LOGIN_ISSUE); 3289 return 0; 3290 3291 out: 3292 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3293 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3294 "0938 %s: failed to format reg_login " 3295 "Data: x%x x%x x%lx x%x\n", __func__, 3296 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3297 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3298 return rc; 3299 } 3300 3301 /** 3302 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3303 * @phba: pointer to lpfc hba data structure. 3304 * @cmdiocb: pointer to lpfc command iocb data structure. 3305 * @rspiocb: pointer to lpfc response iocb data structure. 3306 * 3307 * This routine is a generic completion callback function for Discovery ELS cmd. 3308 * Currently used by the ELS command issuing routines for the ELS State Change 3309 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3310 * These commands will be retried once only for ELS timeout errors. 3311 **/ 3312 static void 3313 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3314 struct lpfc_iocbq *rspiocb) 3315 { 3316 struct lpfc_vport *vport = cmdiocb->vport; 3317 IOCB_t *irsp; 3318 struct lpfc_els_rdf_rsp *prdf; 3319 struct lpfc_dmabuf *pcmd, *prsp; 3320 u32 *pdata; 3321 u32 cmd; 3322 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3323 u32 ulp_status, ulp_word4, tmo, did, iotag; 3324 3325 ulp_status = get_job_ulpstatus(phba, rspiocb); 3326 ulp_word4 = get_job_word4(phba, rspiocb); 3327 did = get_job_els_rsp64_did(phba, cmdiocb); 3328 3329 if (phba->sli_rev == LPFC_SLI_REV4) { 3330 tmo = get_wqe_tmo(cmdiocb); 3331 iotag = get_wqe_reqtag(cmdiocb); 3332 } else { 3333 irsp = &rspiocb->iocb; 3334 tmo = irsp->ulpTimeout; 3335 iotag = irsp->ulpIoTag; 3336 } 3337 3338 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3339 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3340 ulp_status, ulp_word4, did); 3341 3342 /* ELS cmd tag <ulpIoTag> completes */ 3343 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3344 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3345 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3346 3347 pcmd = cmdiocb->cmd_dmabuf; 3348 if (!pcmd) 3349 goto out; 3350 3351 pdata = (u32 *)pcmd->virt; 3352 if (!pdata) 3353 goto out; 3354 cmd = *pdata; 3355 3356 /* Only 1 retry for ELS Timeout only */ 3357 if (ulp_status == IOSTAT_LOCAL_REJECT && 3358 ((ulp_word4 & IOERR_PARAM_MASK) == 3359 IOERR_SEQUENCE_TIMEOUT)) { 3360 cmdiocb->retry++; 3361 if (cmdiocb->retry <= 1) { 3362 switch (cmd) { 3363 case ELS_CMD_SCR: 3364 lpfc_issue_els_scr(vport, cmdiocb->retry); 3365 break; 3366 case ELS_CMD_EDC: 3367 lpfc_issue_els_edc(vport, cmdiocb->retry); 3368 break; 3369 case ELS_CMD_RDF: 3370 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3371 break; 3372 } 3373 goto out; 3374 } 3375 phba->fc_stat.elsRetryExceeded++; 3376 } 3377 if (cmd == ELS_CMD_EDC) { 3378 /* must be called before checking uplStatus and returning */ 3379 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3380 return; 3381 } 3382 if (ulp_status) { 3383 /* ELS discovery cmd completes with error */ 3384 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3385 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3386 ulp_status, ulp_word4); 3387 goto out; 3388 } 3389 3390 /* The RDF response doesn't have any impact on the running driver 3391 * but the notification descriptors are dumped here for support. 3392 */ 3393 if (cmd == ELS_CMD_RDF) { 3394 int i; 3395 3396 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3397 if (!prsp) 3398 goto out; 3399 3400 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3401 if (!prdf) 3402 goto out; 3403 if (!lpfc_is_els_acc_rsp(prsp)) 3404 goto out; 3405 3406 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3407 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3408 lpfc_printf_vlog(vport, KERN_INFO, 3409 LOG_ELS | LOG_CGN_MGMT, 3410 "4677 Fabric RDF Notification Grant " 3411 "Data: 0x%08x Reg: %x %x\n", 3412 be32_to_cpu( 3413 prdf->reg_d1.desc_tags[i]), 3414 phba->cgn_reg_signal, 3415 phba->cgn_reg_fpin); 3416 } 3417 3418 out: 3419 /* Check to see if link went down during discovery */ 3420 lpfc_els_chk_latt(vport); 3421 lpfc_els_free_iocb(phba, cmdiocb); 3422 lpfc_nlp_put(ndlp); 3423 return; 3424 } 3425 3426 /** 3427 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3428 * @vport: pointer to a host virtual N_Port data structure. 3429 * @retry: retry counter for the command IOCB. 3430 * 3431 * This routine issues a State Change Request (SCR) to a fabric node 3432 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3433 * first search the @vport node list to find the matching ndlp. If no such 3434 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3435 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3436 * routine is invoked to send the SCR IOCB. 3437 * 3438 * Note that the ndlp reference count will be incremented by 1 for holding the 3439 * ndlp and the reference to ndlp will be stored into the ndlp field of 3440 * the IOCB for the completion callback function to the SCR ELS command. 3441 * 3442 * Return code 3443 * 0 - Successfully issued scr command 3444 * 1 - Failed to issue scr command 3445 **/ 3446 int 3447 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3448 { 3449 int rc = 0; 3450 struct lpfc_hba *phba = vport->phba; 3451 struct lpfc_iocbq *elsiocb; 3452 uint8_t *pcmd; 3453 uint16_t cmdsize; 3454 struct lpfc_nodelist *ndlp; 3455 3456 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3457 3458 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3459 if (!ndlp) { 3460 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3461 if (!ndlp) 3462 return 1; 3463 lpfc_enqueue_node(vport, ndlp); 3464 } 3465 3466 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3467 ndlp->nlp_DID, ELS_CMD_SCR); 3468 if (!elsiocb) 3469 return 1; 3470 3471 if (phba->sli_rev == LPFC_SLI_REV4) { 3472 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3473 if (rc) { 3474 lpfc_els_free_iocb(phba, elsiocb); 3475 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3476 "0937 %s: Failed to reg fc node, rc %d\n", 3477 __func__, rc); 3478 return 1; 3479 } 3480 } 3481 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3482 3483 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3484 pcmd += sizeof(uint32_t); 3485 3486 /* For SCR, remainder of payload is SCR parameter page */ 3487 memset(pcmd, 0, sizeof(SCR)); 3488 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3489 3490 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3491 "Issue SCR: did:x%x", 3492 ndlp->nlp_DID, 0, 0); 3493 3494 phba->fc_stat.elsXmitSCR++; 3495 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3496 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3497 if (!elsiocb->ndlp) { 3498 lpfc_els_free_iocb(phba, elsiocb); 3499 return 1; 3500 } 3501 3502 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3503 "Issue SCR: did:x%x refcnt %d", 3504 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3505 3506 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3507 if (rc == IOCB_ERROR) { 3508 lpfc_els_free_iocb(phba, elsiocb); 3509 lpfc_nlp_put(ndlp); 3510 return 1; 3511 } 3512 3513 return 0; 3514 } 3515 3516 /** 3517 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3518 * or the other nport (pt2pt). 3519 * @vport: pointer to a host virtual N_Port data structure. 3520 * @retry: number of retries to the command IOCB. 3521 * 3522 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3523 * when connected to a fabric, or to the remote port when connected 3524 * in point-to-point mode. When sent to the Fabric Controller, it will 3525 * replay the RSCN to registered recipients. 3526 * 3527 * Note that the ndlp reference count will be incremented by 1 for holding the 3528 * ndlp and the reference to ndlp will be stored into the ndlp field of 3529 * the IOCB for the completion callback function to the RSCN ELS command. 3530 * 3531 * Return code 3532 * 0 - Successfully issued RSCN command 3533 * 1 - Failed to issue RSCN command 3534 **/ 3535 int 3536 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3537 { 3538 int rc = 0; 3539 struct lpfc_hba *phba = vport->phba; 3540 struct lpfc_iocbq *elsiocb; 3541 struct lpfc_nodelist *ndlp; 3542 struct { 3543 struct fc_els_rscn rscn; 3544 struct fc_els_rscn_page portid; 3545 } *event; 3546 uint32_t nportid; 3547 uint16_t cmdsize = sizeof(*event); 3548 3549 /* Not supported for private loop */ 3550 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3551 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag)) 3552 return 1; 3553 3554 if (test_bit(FC_PT2PT, &vport->fc_flag)) { 3555 /* find any mapped nport - that would be the other nport */ 3556 ndlp = lpfc_findnode_mapped(vport); 3557 if (!ndlp) 3558 return 1; 3559 } else { 3560 nportid = FC_FID_FCTRL; 3561 /* find the fabric controller node */ 3562 ndlp = lpfc_findnode_did(vport, nportid); 3563 if (!ndlp) { 3564 /* if one didn't exist, make one */ 3565 ndlp = lpfc_nlp_init(vport, nportid); 3566 if (!ndlp) 3567 return 1; 3568 lpfc_enqueue_node(vport, ndlp); 3569 } 3570 } 3571 3572 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3573 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3574 3575 if (!elsiocb) 3576 return 1; 3577 3578 event = elsiocb->cmd_dmabuf->virt; 3579 3580 event->rscn.rscn_cmd = ELS_RSCN; 3581 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3582 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3583 3584 nportid = vport->fc_myDID; 3585 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3586 event->portid.rscn_page_flags = 0; 3587 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3588 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3589 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3590 3591 phba->fc_stat.elsXmitRSCN++; 3592 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3593 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3594 if (!elsiocb->ndlp) { 3595 lpfc_els_free_iocb(phba, elsiocb); 3596 return 1; 3597 } 3598 3599 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3600 "Issue RSCN: did:x%x", 3601 ndlp->nlp_DID, 0, 0); 3602 3603 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3604 if (rc == IOCB_ERROR) { 3605 lpfc_els_free_iocb(phba, elsiocb); 3606 lpfc_nlp_put(ndlp); 3607 return 1; 3608 } 3609 3610 return 0; 3611 } 3612 3613 /** 3614 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3615 * @vport: pointer to a host virtual N_Port data structure. 3616 * @nportid: N_Port identifier to the remote node. 3617 * @retry: number of retries to the command IOCB. 3618 * 3619 * This routine issues a Fibre Channel Address Resolution Response 3620 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3621 * is passed into the function. It first search the @vport node list to find 3622 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3623 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3624 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3625 * 3626 * Note that the ndlp reference count will be incremented by 1 for holding the 3627 * ndlp and the reference to ndlp will be stored into the ndlp field of 3628 * the IOCB for the completion callback function to the FARPR ELS command. 3629 * 3630 * Return code 3631 * 0 - Successfully issued farpr command 3632 * 1 - Failed to issue farpr command 3633 **/ 3634 static int 3635 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3636 { 3637 int rc = 0; 3638 struct lpfc_hba *phba = vport->phba; 3639 struct lpfc_iocbq *elsiocb; 3640 FARP *fp; 3641 uint8_t *pcmd; 3642 uint32_t *lp; 3643 uint16_t cmdsize; 3644 struct lpfc_nodelist *ondlp; 3645 struct lpfc_nodelist *ndlp; 3646 3647 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3648 3649 ndlp = lpfc_findnode_did(vport, nportid); 3650 if (!ndlp) { 3651 ndlp = lpfc_nlp_init(vport, nportid); 3652 if (!ndlp) 3653 return 1; 3654 lpfc_enqueue_node(vport, ndlp); 3655 } 3656 3657 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3658 ndlp->nlp_DID, ELS_CMD_FARPR); 3659 if (!elsiocb) 3660 return 1; 3661 3662 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3663 3664 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3665 pcmd += sizeof(uint32_t); 3666 3667 /* Fill in FARPR payload */ 3668 fp = (FARP *) (pcmd); 3669 memset(fp, 0, sizeof(FARP)); 3670 lp = (uint32_t *) pcmd; 3671 *lp++ = be32_to_cpu(nportid); 3672 *lp++ = be32_to_cpu(vport->fc_myDID); 3673 fp->Rflags = 0; 3674 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3675 3676 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3677 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3678 ondlp = lpfc_findnode_did(vport, nportid); 3679 if (ondlp) { 3680 memcpy(&fp->OportName, &ondlp->nlp_portname, 3681 sizeof(struct lpfc_name)); 3682 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3683 sizeof(struct lpfc_name)); 3684 } 3685 3686 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3687 "Issue FARPR: did:x%x", 3688 ndlp->nlp_DID, 0, 0); 3689 3690 phba->fc_stat.elsXmitFARPR++; 3691 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3692 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3693 if (!elsiocb->ndlp) { 3694 lpfc_els_free_iocb(phba, elsiocb); 3695 return 1; 3696 } 3697 3698 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3699 if (rc == IOCB_ERROR) { 3700 /* The additional lpfc_nlp_put will cause the following 3701 * lpfc_els_free_iocb routine to trigger the release of 3702 * the node. 3703 */ 3704 lpfc_els_free_iocb(phba, elsiocb); 3705 lpfc_nlp_put(ndlp); 3706 return 1; 3707 } 3708 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3709 * trigger the release of the node. 3710 */ 3711 /* Don't release reference count as RDF is likely outstanding */ 3712 return 0; 3713 } 3714 3715 /** 3716 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3717 * @vport: pointer to a host virtual N_Port data structure. 3718 * @retry: retry counter for the command IOCB. 3719 * 3720 * This routine issues an ELS RDF to the Fabric Controller to register 3721 * for diagnostic functions. 3722 * 3723 * Note that the ndlp reference count will be incremented by 1 for holding the 3724 * ndlp and the reference to ndlp will be stored into the ndlp field of 3725 * the IOCB for the completion callback function to the RDF ELS command. 3726 * 3727 * Return code 3728 * 0 - Successfully issued rdf command 3729 * 1 - Failed to issue rdf command 3730 **/ 3731 int 3732 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3733 { 3734 struct lpfc_hba *phba = vport->phba; 3735 struct lpfc_iocbq *elsiocb; 3736 struct lpfc_els_rdf_req *prdf; 3737 struct lpfc_nodelist *ndlp; 3738 uint16_t cmdsize; 3739 int rc; 3740 3741 cmdsize = sizeof(*prdf); 3742 3743 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3744 if (!ndlp) { 3745 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3746 if (!ndlp) 3747 return -ENODEV; 3748 lpfc_enqueue_node(vport, ndlp); 3749 } 3750 3751 /* RDF ELS is not required on an NPIV VN_Port. */ 3752 if (vport->port_type == LPFC_NPIV_PORT) 3753 return -EACCES; 3754 3755 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3756 ndlp->nlp_DID, ELS_CMD_RDF); 3757 if (!elsiocb) 3758 return -ENOMEM; 3759 3760 /* Configure the payload for the supported FPIN events. */ 3761 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3762 memset(prdf, 0, cmdsize); 3763 prdf->rdf.fpin_cmd = ELS_RDF; 3764 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3765 sizeof(struct fc_els_rdf)); 3766 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3767 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3768 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3769 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3770 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3771 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3772 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3773 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3774 3775 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3776 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3777 ndlp->nlp_DID, phba->cgn_reg_signal, 3778 phba->cgn_reg_fpin); 3779 3780 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3781 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3782 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3783 if (!elsiocb->ndlp) { 3784 lpfc_els_free_iocb(phba, elsiocb); 3785 return -EIO; 3786 } 3787 3788 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3789 "Issue RDF: did:x%x refcnt %d", 3790 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3791 3792 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3793 if (rc == IOCB_ERROR) { 3794 lpfc_els_free_iocb(phba, elsiocb); 3795 lpfc_nlp_put(ndlp); 3796 return -EIO; 3797 } 3798 return 0; 3799 } 3800 3801 /** 3802 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3803 * @vport: pointer to a host virtual N_Port data structure. 3804 * @cmdiocb: pointer to lpfc command iocb data structure. 3805 * @ndlp: pointer to a node-list data structure. 3806 * 3807 * A received RDF implies a possible change to fabric supported diagnostic 3808 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3809 * RDF request to reregister for supported diagnostic functions. 3810 * 3811 * Return code 3812 * 0 - Success 3813 * -EIO - Failed to process received RDF 3814 **/ 3815 static int 3816 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3817 struct lpfc_nodelist *ndlp) 3818 { 3819 /* Send LS_ACC */ 3820 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3821 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3822 "1623 Failed to RDF_ACC from x%x for x%x\n", 3823 ndlp->nlp_DID, vport->fc_myDID); 3824 return -EIO; 3825 } 3826 3827 /* Issue new RDF for reregistering */ 3828 if (lpfc_issue_els_rdf(vport, 0)) { 3829 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3830 "2623 Failed to re register RDF for x%x\n", 3831 vport->fc_myDID); 3832 return -EIO; 3833 } 3834 3835 return 0; 3836 } 3837 3838 /** 3839 * lpfc_least_capable_settings - helper function for EDC rsp processing 3840 * @phba: pointer to lpfc hba data structure. 3841 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3842 * 3843 * This helper routine determines the least capable setting for 3844 * congestion signals, signal freq, including scale, from the 3845 * congestion detection descriptor in the EDC rsp. The routine 3846 * sets @phba values in preparation for a set_featues mailbox. 3847 **/ 3848 static void 3849 lpfc_least_capable_settings(struct lpfc_hba *phba, 3850 struct fc_diag_cg_sig_desc *pcgd) 3851 { 3852 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3853 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3854 3855 /* Get rsp signal and frequency capabilities. */ 3856 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3857 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3858 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3859 3860 /* If the Fport does not support signals. Set FPIN only */ 3861 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3862 goto out_no_support; 3863 3864 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3865 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3866 * to milliSeconds. 3867 */ 3868 switch (rsp_sig_freq_scale) { 3869 case EDC_CG_SIGFREQ_SEC: 3870 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3871 break; 3872 case EDC_CG_SIGFREQ_MSEC: 3873 rsp_sig_freq_cyc = 1; 3874 break; 3875 default: 3876 goto out_no_support; 3877 } 3878 3879 /* Convenient shorthand. */ 3880 drv_sig_cap = phba->cgn_reg_signal; 3881 3882 /* Choose the least capable frequency. */ 3883 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3884 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3885 3886 /* Should be some common signals support. Settle on least capable 3887 * signal and adjust FPIN values. Initialize defaults to ease the 3888 * decision. 3889 */ 3890 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3891 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3892 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3893 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3894 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3895 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3896 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3897 } 3898 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3899 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3900 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3901 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3902 } 3903 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3904 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3905 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3906 } 3907 } 3908 3909 /* We are NOT recording signal frequency in congestion info buffer */ 3910 return; 3911 3912 out_no_support: 3913 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3914 phba->cgn_sig_freq = 0; 3915 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3916 } 3917 3918 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3919 FC_LS_TLV_DTAG_INIT); 3920 3921 /** 3922 * lpfc_cmpl_els_edc - Completion callback function for EDC 3923 * @phba: pointer to lpfc hba data structure. 3924 * @cmdiocb: pointer to lpfc command iocb data structure. 3925 * @rspiocb: pointer to lpfc response iocb data structure. 3926 * 3927 * This routine is the completion callback function for issuing the Exchange 3928 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3929 * notify the FPort of its Congestion and Link Fault capabilities. This 3930 * routine parses the FPort's response and decides on the least common 3931 * values applicable to both FPort and NPort for Warnings and Alarms that 3932 * are communicated via hardware signals. 3933 **/ 3934 static void 3935 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3936 struct lpfc_iocbq *rspiocb) 3937 { 3938 IOCB_t *irsp_iocb; 3939 struct fc_els_edc_resp *edc_rsp; 3940 struct fc_tlv_desc *tlv; 3941 struct fc_diag_cg_sig_desc *pcgd; 3942 struct fc_diag_lnkflt_desc *plnkflt; 3943 struct lpfc_dmabuf *pcmd, *prsp; 3944 const char *dtag_nm; 3945 u32 *pdata, dtag; 3946 int desc_cnt = 0, bytes_remain; 3947 bool rcv_cap_desc = false; 3948 struct lpfc_nodelist *ndlp; 3949 u32 ulp_status, ulp_word4, tmo, did, iotag; 3950 3951 ndlp = cmdiocb->ndlp; 3952 3953 ulp_status = get_job_ulpstatus(phba, rspiocb); 3954 ulp_word4 = get_job_word4(phba, rspiocb); 3955 did = get_job_els_rsp64_did(phba, rspiocb); 3956 3957 if (phba->sli_rev == LPFC_SLI_REV4) { 3958 tmo = get_wqe_tmo(rspiocb); 3959 iotag = get_wqe_reqtag(rspiocb); 3960 } else { 3961 irsp_iocb = &rspiocb->iocb; 3962 tmo = irsp_iocb->ulpTimeout; 3963 iotag = irsp_iocb->ulpIoTag; 3964 } 3965 3966 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3967 "EDC cmpl: status:x%x/x%x did:x%x", 3968 ulp_status, ulp_word4, did); 3969 3970 /* ELS cmd tag <ulpIoTag> completes */ 3971 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3972 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3973 iotag, ulp_status, ulp_word4, tmo); 3974 3975 pcmd = cmdiocb->cmd_dmabuf; 3976 if (!pcmd) 3977 goto out; 3978 3979 pdata = (u32 *)pcmd->virt; 3980 if (!pdata) 3981 goto out; 3982 3983 /* Need to clear signal values, send features MB and RDF with FPIN. */ 3984 if (ulp_status) 3985 goto out; 3986 3987 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3988 if (!prsp) 3989 goto out; 3990 3991 edc_rsp = prsp->virt; 3992 if (!edc_rsp) 3993 goto out; 3994 3995 /* ELS cmd tag <ulpIoTag> completes */ 3996 lpfc_printf_log(phba, KERN_INFO, 3997 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 3998 "4676 Fabric EDC Rsp: " 3999 "0x%02x, 0x%08x\n", 4000 edc_rsp->acc_hdr.la_cmd, 4001 be32_to_cpu(edc_rsp->desc_list_len)); 4002 4003 if (!lpfc_is_els_acc_rsp(prsp)) 4004 goto out; 4005 4006 /* 4007 * Payload length in bytes is the response descriptor list 4008 * length minus the 12 bytes of Link Service Request 4009 * Information descriptor in the reply. 4010 */ 4011 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4012 sizeof(struct fc_els_lsri_desc); 4013 if (bytes_remain <= 0) 4014 goto out; 4015 4016 tlv = edc_rsp->desc; 4017 4018 /* 4019 * cycle through EDC diagnostic descriptors to find the 4020 * congestion signaling capability descriptor 4021 */ 4022 while (bytes_remain) { 4023 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4024 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4025 "6461 Truncated TLV hdr on " 4026 "Diagnostic descriptor[%d]\n", 4027 desc_cnt); 4028 goto out; 4029 } 4030 4031 dtag = be32_to_cpu(tlv->desc_tag); 4032 switch (dtag) { 4033 case ELS_DTAG_LNK_FAULT_CAP: 4034 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4035 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4036 sizeof(struct fc_diag_lnkflt_desc)) { 4037 lpfc_printf_log(phba, KERN_WARNING, 4038 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4039 "6462 Truncated Link Fault Diagnostic " 4040 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4041 desc_cnt, bytes_remain, 4042 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4043 sizeof(struct fc_diag_lnkflt_desc)); 4044 goto out; 4045 } 4046 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4047 lpfc_printf_log(phba, KERN_INFO, 4048 LOG_ELS | LOG_LDS_EVENT, 4049 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4050 "0x%08x 0x%08x 0x%08x\n", 4051 be32_to_cpu(plnkflt->desc_tag), 4052 be32_to_cpu(plnkflt->desc_len), 4053 be32_to_cpu( 4054 plnkflt->degrade_activate_threshold), 4055 be32_to_cpu( 4056 plnkflt->degrade_deactivate_threshold), 4057 be32_to_cpu(plnkflt->fec_degrade_interval)); 4058 break; 4059 case ELS_DTAG_CG_SIGNAL_CAP: 4060 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4061 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4062 sizeof(struct fc_diag_cg_sig_desc)) { 4063 lpfc_printf_log( 4064 phba, KERN_WARNING, LOG_CGN_MGMT, 4065 "6463 Truncated Cgn Signal Diagnostic " 4066 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4067 desc_cnt, bytes_remain, 4068 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4069 sizeof(struct fc_diag_cg_sig_desc)); 4070 goto out; 4071 } 4072 4073 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4074 lpfc_printf_log( 4075 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4076 "4616 CGN Desc Data: 0x%08x 0x%08x " 4077 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4078 be32_to_cpu(pcgd->desc_tag), 4079 be32_to_cpu(pcgd->desc_len), 4080 be32_to_cpu(pcgd->xmt_signal_capability), 4081 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4082 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4083 be32_to_cpu(pcgd->rcv_signal_capability), 4084 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4085 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4086 4087 /* Compare driver and Fport capabilities and choose 4088 * least common. 4089 */ 4090 lpfc_least_capable_settings(phba, pcgd); 4091 rcv_cap_desc = true; 4092 break; 4093 default: 4094 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4095 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4096 "4919 unknown Diagnostic " 4097 "Descriptor[%d]: tag x%x (%s)\n", 4098 desc_cnt, dtag, dtag_nm); 4099 } 4100 4101 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4102 tlv = fc_tlv_next_desc(tlv); 4103 desc_cnt++; 4104 } 4105 4106 out: 4107 if (!rcv_cap_desc) { 4108 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4109 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4110 phba->cgn_sig_freq = 0; 4111 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4112 "4202 EDC rsp error - sending RDF " 4113 "for FPIN only.\n"); 4114 } 4115 4116 lpfc_config_cgn_signal(phba); 4117 4118 /* Check to see if link went down during discovery */ 4119 lpfc_els_chk_latt(phba->pport); 4120 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4121 "EDC Cmpl: did:x%x refcnt %d", 4122 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4123 lpfc_els_free_iocb(phba, cmdiocb); 4124 lpfc_nlp_put(ndlp); 4125 } 4126 4127 static void 4128 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4129 { 4130 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4131 4132 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4133 lft->desc_len = cpu_to_be32( 4134 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4135 4136 lft->degrade_activate_threshold = 4137 cpu_to_be32(phba->degrade_activate_threshold); 4138 lft->degrade_deactivate_threshold = 4139 cpu_to_be32(phba->degrade_deactivate_threshold); 4140 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4141 } 4142 4143 static void 4144 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4145 { 4146 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4147 4148 /* We are assuming cgd was zero'ed before calling this routine */ 4149 4150 /* Configure the congestion detection capability */ 4151 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4152 4153 /* Descriptor len doesn't include the tag or len fields. */ 4154 cgd->desc_len = cpu_to_be32( 4155 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4156 4157 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4158 * xmt_signal_frequency.count already set to 0. 4159 * xmt_signal_frequency.units already set to 0. 4160 */ 4161 4162 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4163 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4164 * rcv_signal_frequency.count already set to 0. 4165 * rcv_signal_frequency.units already set to 0. 4166 */ 4167 phba->cgn_sig_freq = 0; 4168 return; 4169 } 4170 switch (phba->cgn_reg_signal) { 4171 case EDC_CG_SIG_WARN_ONLY: 4172 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4173 break; 4174 case EDC_CG_SIG_WARN_ALARM: 4175 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4176 break; 4177 default: 4178 /* rcv_signal_capability left 0 thus no support */ 4179 break; 4180 } 4181 4182 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4183 * the completion we settle on the higher frequency. 4184 */ 4185 cgd->rcv_signal_frequency.count = 4186 cpu_to_be16(lpfc_fabric_cgn_frequency); 4187 cgd->rcv_signal_frequency.units = 4188 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4189 } 4190 4191 static bool 4192 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4193 { 4194 if (!(phba->lmt & LMT_64Gb)) 4195 return false; 4196 if (phba->sli_rev != LPFC_SLI_REV4) 4197 return false; 4198 4199 if (phba->sli4_hba.conf_trunk) { 4200 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4201 return true; 4202 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4203 return true; 4204 } 4205 return false; 4206 } 4207 4208 /** 4209 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4210 * @vport: pointer to a host virtual N_Port data structure. 4211 * @retry: retry counter for the command iocb. 4212 * 4213 * This routine issues an ELS EDC to the F-Port Controller to communicate 4214 * this N_Port's support of hardware signals in its Congestion 4215 * Capabilities Descriptor. 4216 * 4217 * Note: This routine does not check if one or more signals are 4218 * set in the cgn_reg_signal parameter. The caller makes the 4219 * decision to enforce cgn_reg_signal as nonzero or zero depending 4220 * on the conditions. During Fabric requests, the driver 4221 * requires cgn_reg_signals to be nonzero. But a dynamic request 4222 * to set the congestion mode to OFF from Monitor or Manage 4223 * would correctly issue an EDC with no signals enabled to 4224 * turn off switch functionality and then update the FW. 4225 * 4226 * Return code 4227 * 0 - Successfully issued edc command 4228 * 1 - Failed to issue edc command 4229 **/ 4230 int 4231 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4232 { 4233 struct lpfc_hba *phba = vport->phba; 4234 struct lpfc_iocbq *elsiocb; 4235 struct fc_els_edc *edc_req; 4236 struct fc_tlv_desc *tlv; 4237 u16 cmdsize; 4238 struct lpfc_nodelist *ndlp; 4239 u8 *pcmd = NULL; 4240 u32 cgn_desc_size, lft_desc_size; 4241 int rc; 4242 4243 if (vport->port_type == LPFC_NPIV_PORT) 4244 return -EACCES; 4245 4246 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4247 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4248 return -ENODEV; 4249 4250 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4251 sizeof(struct fc_diag_cg_sig_desc) : 0; 4252 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4253 sizeof(struct fc_diag_lnkflt_desc) : 0; 4254 cmdsize = cgn_desc_size + lft_desc_size; 4255 4256 /* Skip EDC if no applicable descriptors */ 4257 if (!cmdsize) 4258 goto try_rdf; 4259 4260 cmdsize += sizeof(struct fc_els_edc); 4261 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4262 ndlp->nlp_DID, ELS_CMD_EDC); 4263 if (!elsiocb) 4264 goto try_rdf; 4265 4266 /* Configure the payload for the supported Diagnostics capabilities. */ 4267 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4268 memset(pcmd, 0, cmdsize); 4269 edc_req = (struct fc_els_edc *)pcmd; 4270 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4271 edc_req->edc_cmd = ELS_EDC; 4272 tlv = edc_req->desc; 4273 4274 if (cgn_desc_size) { 4275 lpfc_format_edc_cgn_desc(phba, tlv); 4276 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4277 tlv = fc_tlv_next_desc(tlv); 4278 } 4279 4280 if (lft_desc_size) 4281 lpfc_format_edc_lft_desc(phba, tlv); 4282 4283 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4284 "4623 Xmit EDC to remote " 4285 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4286 ndlp->nlp_DID, phba->cgn_reg_signal, 4287 phba->cgn_reg_fpin); 4288 4289 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4290 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4291 if (!elsiocb->ndlp) { 4292 lpfc_els_free_iocb(phba, elsiocb); 4293 return -EIO; 4294 } 4295 4296 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4297 "Issue EDC: did:x%x refcnt %d", 4298 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4299 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4300 if (rc == IOCB_ERROR) { 4301 /* The additional lpfc_nlp_put will cause the following 4302 * lpfc_els_free_iocb routine to trigger the rlease of 4303 * the node. 4304 */ 4305 lpfc_els_free_iocb(phba, elsiocb); 4306 lpfc_nlp_put(ndlp); 4307 goto try_rdf; 4308 } 4309 return 0; 4310 try_rdf: 4311 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4312 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4313 rc = lpfc_issue_els_rdf(vport, 0); 4314 return rc; 4315 } 4316 4317 /** 4318 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4319 * @vport: pointer to a host virtual N_Port data structure. 4320 * @nlp: pointer to a node-list data structure. 4321 * 4322 * This routine cancels the timer with a delayed IOCB-command retry for 4323 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4324 * removes the ELS retry event if it presents. In addition, if the 4325 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4326 * commands are sent for the @vport's nodes that require issuing discovery 4327 * ADISC. 4328 **/ 4329 void 4330 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4331 { 4332 struct lpfc_work_evt *evtp; 4333 4334 if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) 4335 return; 4336 del_timer_sync(&nlp->nlp_delayfunc); 4337 nlp->nlp_last_elscmd = 0; 4338 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4339 list_del_init(&nlp->els_retry_evt.evt_listp); 4340 /* Decrement nlp reference count held for the delayed retry */ 4341 evtp = &nlp->els_retry_evt; 4342 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4343 } 4344 if (test_and_clear_bit(NLP_NPR_2B_DISC, &nlp->nlp_flag)) { 4345 if (vport->num_disc_nodes) { 4346 if (vport->port_state < LPFC_VPORT_READY) { 4347 /* Check if there are more ADISCs to be sent */ 4348 lpfc_more_adisc(vport); 4349 } else { 4350 /* Check if there are more PLOGIs to be sent */ 4351 lpfc_more_plogi(vport); 4352 if (vport->num_disc_nodes == 0) { 4353 clear_bit(FC_NDISC_ACTIVE, 4354 &vport->fc_flag); 4355 lpfc_can_disctmo(vport); 4356 lpfc_end_rscn(vport); 4357 } 4358 } 4359 } 4360 } 4361 return; 4362 } 4363 4364 /** 4365 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4366 * @t: pointer to the timer function associated data (ndlp). 4367 * 4368 * This routine is invoked by the ndlp delayed-function timer to check 4369 * whether there is any pending ELS retry event(s) with the node. If not, it 4370 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4371 * adds the delayed events to the HBA work list and invokes the 4372 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4373 * event. Note that lpfc_nlp_get() is called before posting the event to 4374 * the work list to hold reference count of ndlp so that it guarantees the 4375 * reference to ndlp will still be available when the worker thread gets 4376 * to the event associated with the ndlp. 4377 **/ 4378 void 4379 lpfc_els_retry_delay(struct timer_list *t) 4380 { 4381 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4382 struct lpfc_vport *vport = ndlp->vport; 4383 struct lpfc_hba *phba = vport->phba; 4384 unsigned long flags; 4385 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4386 4387 /* Hold a node reference for outstanding queued work */ 4388 if (!lpfc_nlp_get(ndlp)) 4389 return; 4390 4391 spin_lock_irqsave(&phba->hbalock, flags); 4392 if (!list_empty(&evtp->evt_listp)) { 4393 spin_unlock_irqrestore(&phba->hbalock, flags); 4394 lpfc_nlp_put(ndlp); 4395 return; 4396 } 4397 4398 evtp->evt_arg1 = ndlp; 4399 evtp->evt = LPFC_EVT_ELS_RETRY; 4400 list_add_tail(&evtp->evt_listp, &phba->work_list); 4401 spin_unlock_irqrestore(&phba->hbalock, flags); 4402 4403 lpfc_worker_wake_up(phba); 4404 } 4405 4406 /** 4407 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4408 * @ndlp: pointer to a node-list data structure. 4409 * 4410 * This routine is the worker-thread handler for processing the @ndlp delayed 4411 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4412 * the last ELS command from the associated ndlp and invokes the proper ELS 4413 * function according to the delayed ELS command to retry the command. 4414 **/ 4415 void 4416 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4417 { 4418 struct lpfc_vport *vport = ndlp->vport; 4419 uint32_t cmd, retry; 4420 4421 spin_lock_irq(&ndlp->lock); 4422 cmd = ndlp->nlp_last_elscmd; 4423 ndlp->nlp_last_elscmd = 0; 4424 spin_unlock_irq(&ndlp->lock); 4425 4426 if (!test_and_clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) 4427 return; 4428 4429 /* 4430 * If a discovery event readded nlp_delayfunc after timer 4431 * firing and before processing the timer, cancel the 4432 * nlp_delayfunc. 4433 */ 4434 del_timer_sync(&ndlp->nlp_delayfunc); 4435 retry = ndlp->nlp_retry; 4436 ndlp->nlp_retry = 0; 4437 4438 switch (cmd) { 4439 case ELS_CMD_FLOGI: 4440 lpfc_issue_els_flogi(vport, ndlp, retry); 4441 break; 4442 case ELS_CMD_PLOGI: 4443 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4444 ndlp->nlp_prev_state = ndlp->nlp_state; 4445 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4446 } 4447 break; 4448 case ELS_CMD_ADISC: 4449 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4450 ndlp->nlp_prev_state = ndlp->nlp_state; 4451 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4452 } 4453 break; 4454 case ELS_CMD_PRLI: 4455 case ELS_CMD_NVMEPRLI: 4456 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4457 ndlp->nlp_prev_state = ndlp->nlp_state; 4458 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4459 } 4460 break; 4461 case ELS_CMD_LOGO: 4462 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4463 ndlp->nlp_prev_state = ndlp->nlp_state; 4464 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4465 } 4466 break; 4467 case ELS_CMD_FDISC: 4468 if (!test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) 4469 lpfc_issue_els_fdisc(vport, ndlp, retry); 4470 break; 4471 } 4472 return; 4473 } 4474 4475 /** 4476 * lpfc_link_reset - Issue link reset 4477 * @vport: pointer to a virtual N_Port data structure. 4478 * 4479 * This routine performs link reset by sending INIT_LINK mailbox command. 4480 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4481 * INIT_LINK mailbox command. 4482 * 4483 * Return code 4484 * 0 - Link reset initiated successfully 4485 * 1 - Failed to initiate link reset 4486 **/ 4487 int 4488 lpfc_link_reset(struct lpfc_vport *vport) 4489 { 4490 struct lpfc_hba *phba = vport->phba; 4491 LPFC_MBOXQ_t *mbox; 4492 uint32_t control; 4493 int rc; 4494 4495 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4496 "2851 Attempt link reset\n"); 4497 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4498 if (!mbox) { 4499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4500 "2852 Failed to allocate mbox memory"); 4501 return 1; 4502 } 4503 4504 /* Enable Link attention interrupts */ 4505 if (phba->sli_rev <= LPFC_SLI_REV3) { 4506 spin_lock_irq(&phba->hbalock); 4507 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4508 control = readl(phba->HCregaddr); 4509 control |= HC_LAINT_ENA; 4510 writel(control, phba->HCregaddr); 4511 readl(phba->HCregaddr); /* flush */ 4512 spin_unlock_irq(&phba->hbalock); 4513 } 4514 4515 lpfc_init_link(phba, mbox, phba->cfg_topology, 4516 phba->cfg_link_speed); 4517 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4518 mbox->vport = vport; 4519 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4520 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4522 "2853 Failed to issue INIT_LINK " 4523 "mbox command, rc:x%x\n", rc); 4524 mempool_free(mbox, phba->mbox_mem_pool); 4525 return 1; 4526 } 4527 4528 return 0; 4529 } 4530 4531 /** 4532 * lpfc_els_retry - Make retry decision on an els command iocb 4533 * @phba: pointer to lpfc hba data structure. 4534 * @cmdiocb: pointer to lpfc command iocb data structure. 4535 * @rspiocb: pointer to lpfc response iocb data structure. 4536 * 4537 * This routine makes a retry decision on an ELS command IOCB, which has 4538 * failed. The following ELS IOCBs use this function for retrying the command 4539 * when previously issued command responsed with error status: FLOGI, PLOGI, 4540 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4541 * returned error status, it makes the decision whether a retry shall be 4542 * issued for the command, and whether a retry shall be made immediately or 4543 * delayed. In the former case, the corresponding ELS command issuing-function 4544 * is called to retry the command. In the later case, the ELS command shall 4545 * be posted to the ndlp delayed event and delayed function timer set to the 4546 * ndlp for the delayed command issusing. 4547 * 4548 * Return code 4549 * 0 - No retry of els command is made 4550 * 1 - Immediate or delayed retry of els command is made 4551 **/ 4552 static int 4553 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4554 struct lpfc_iocbq *rspiocb) 4555 { 4556 struct lpfc_vport *vport = cmdiocb->vport; 4557 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4558 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4559 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4560 uint32_t *elscmd; 4561 struct ls_rjt stat; 4562 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4563 int logerr = 0; 4564 uint32_t cmd = 0; 4565 uint32_t did; 4566 int link_reset = 0, rc; 4567 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4568 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4569 u8 rsn_code_exp = 0; 4570 4571 4572 /* Note: cmd_dmabuf may be 0 for internal driver abort 4573 * of delays ELS command. 4574 */ 4575 4576 if (pcmd && pcmd->virt) { 4577 elscmd = (uint32_t *) (pcmd->virt); 4578 cmd = *elscmd++; 4579 } 4580 4581 if (ndlp) 4582 did = ndlp->nlp_DID; 4583 else { 4584 /* We should only hit this case for retrying PLOGI */ 4585 did = get_job_els_rsp64_did(phba, rspiocb); 4586 ndlp = lpfc_findnode_did(vport, did); 4587 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4588 return 0; 4589 } 4590 4591 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4592 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4593 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4594 4595 switch (ulp_status) { 4596 case IOSTAT_FCP_RSP_ERROR: 4597 break; 4598 case IOSTAT_REMOTE_STOP: 4599 if (phba->sli_rev == LPFC_SLI_REV4) { 4600 /* This IO was aborted by the target, we don't 4601 * know the rxid and because we did not send the 4602 * ABTS we cannot generate and RRQ. 4603 */ 4604 lpfc_set_rrq_active(phba, ndlp, 4605 cmdiocb->sli4_lxritag, 0, 0); 4606 } 4607 break; 4608 case IOSTAT_LOCAL_REJECT: 4609 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4610 case IOERR_LOOP_OPEN_FAILURE: 4611 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4612 delay = 1000; 4613 retry = 1; 4614 break; 4615 4616 case IOERR_ILLEGAL_COMMAND: 4617 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4618 "0124 Retry illegal cmd x%x " 4619 "retry:x%x delay:x%x\n", 4620 cmd, cmdiocb->retry, delay); 4621 retry = 1; 4622 /* All command's retry policy */ 4623 maxretry = 8; 4624 if (cmdiocb->retry > 2) 4625 delay = 1000; 4626 break; 4627 4628 case IOERR_NO_RESOURCES: 4629 logerr = 1; /* HBA out of resources */ 4630 retry = 1; 4631 if (cmdiocb->retry > 100) 4632 delay = 100; 4633 maxretry = 250; 4634 break; 4635 4636 case IOERR_ILLEGAL_FRAME: 4637 delay = 100; 4638 retry = 1; 4639 break; 4640 4641 case IOERR_INVALID_RPI: 4642 if (cmd == ELS_CMD_PLOGI && 4643 did == NameServer_DID) { 4644 /* Continue forever if plogi to */ 4645 /* the nameserver fails */ 4646 maxretry = 0; 4647 delay = 100; 4648 } else if (cmd == ELS_CMD_PRLI && 4649 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 4650 /* State-command disagreement. The PRLI was 4651 * failed with an invalid rpi meaning there 4652 * some unexpected state change. Don't retry. 4653 */ 4654 maxretry = 0; 4655 retry = 0; 4656 break; 4657 } 4658 retry = 1; 4659 break; 4660 4661 case IOERR_SEQUENCE_TIMEOUT: 4662 if (cmd == ELS_CMD_PLOGI && 4663 did == NameServer_DID && 4664 (cmdiocb->retry + 1) == maxretry) { 4665 /* Reset the Link */ 4666 link_reset = 1; 4667 break; 4668 } 4669 retry = 1; 4670 delay = 100; 4671 break; 4672 case IOERR_SLI_ABORTED: 4673 /* Retry ELS PLOGI command? 4674 * Possibly the rport just wasn't ready. 4675 */ 4676 if (cmd == ELS_CMD_PLOGI) { 4677 /* No retry if state change */ 4678 if (ndlp && 4679 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4680 goto out_retry; 4681 retry = 1; 4682 maxretry = 2; 4683 } 4684 break; 4685 } 4686 break; 4687 4688 case IOSTAT_NPORT_RJT: 4689 case IOSTAT_FABRIC_RJT: 4690 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4691 retry = 1; 4692 break; 4693 } 4694 break; 4695 4696 case IOSTAT_NPORT_BSY: 4697 case IOSTAT_FABRIC_BSY: 4698 logerr = 1; /* Fabric / Remote NPort out of resources */ 4699 retry = 1; 4700 break; 4701 4702 case IOSTAT_LS_RJT: 4703 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4704 /* Added for Vendor specifc support 4705 * Just keep retrying for these Rsn / Exp codes 4706 */ 4707 if (test_bit(FC_PT2PT, &vport->fc_flag) && 4708 cmd == ELS_CMD_NVMEPRLI) { 4709 switch (stat.un.b.lsRjtRsnCode) { 4710 case LSRJT_UNABLE_TPC: 4711 case LSRJT_INVALID_CMD: 4712 case LSRJT_LOGICAL_ERR: 4713 case LSRJT_CMD_UNSUPPORTED: 4714 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4715 "0168 NVME PRLI LS_RJT " 4716 "reason %x port doesn't " 4717 "support NVME, disabling NVME\n", 4718 stat.un.b.lsRjtRsnCode); 4719 retry = 0; 4720 set_bit(FC_PT2PT_NO_NVME, &vport->fc_flag); 4721 goto out_retry; 4722 } 4723 } 4724 switch (stat.un.b.lsRjtRsnCode) { 4725 case LSRJT_UNABLE_TPC: 4726 /* Special case for PRLI LS_RJTs. Recall that lpfc 4727 * uses a single routine to issue both PRLI FC4 types. 4728 * If the PRLI is rejected because that FC4 type 4729 * isn't really supported, don't retry and cause 4730 * multiple transport registrations. Otherwise, parse 4731 * the reason code/reason code explanation and take the 4732 * appropriate action. 4733 */ 4734 lpfc_printf_vlog(vport, KERN_INFO, 4735 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4736 "0153 ELS cmd x%x LS_RJT by x%x. " 4737 "RsnCode x%x RsnCodeExp x%x\n", 4738 cmd, did, stat.un.b.lsRjtRsnCode, 4739 stat.un.b.lsRjtRsnCodeExp); 4740 4741 switch (stat.un.b.lsRjtRsnCodeExp) { 4742 case LSEXP_CANT_GIVE_DATA: 4743 case LSEXP_CMD_IN_PROGRESS: 4744 if (cmd == ELS_CMD_PLOGI) { 4745 delay = 1000; 4746 maxretry = 48; 4747 } 4748 retry = 1; 4749 break; 4750 case LSEXP_REQ_UNSUPPORTED: 4751 case LSEXP_NO_RSRC_ASSIGN: 4752 /* These explanation codes get no retry. */ 4753 if (cmd == ELS_CMD_PRLI || 4754 cmd == ELS_CMD_NVMEPRLI) 4755 break; 4756 fallthrough; 4757 default: 4758 /* Limit the delay and retry action to a limited 4759 * cmd set. There are other ELS commands where 4760 * a retry is not expected. 4761 */ 4762 if (cmd == ELS_CMD_PLOGI || 4763 cmd == ELS_CMD_PRLI || 4764 cmd == ELS_CMD_NVMEPRLI) { 4765 delay = 1000; 4766 maxretry = lpfc_max_els_tries + 1; 4767 retry = 1; 4768 } 4769 break; 4770 } 4771 4772 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4773 (cmd == ELS_CMD_FDISC) && 4774 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4775 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 4776 "0125 FDISC (x%x). " 4777 "Fabric out of resources\n", 4778 stat.un.lsRjtError); 4779 lpfc_vport_set_state(vport, 4780 FC_VPORT_NO_FABRIC_RSCS); 4781 } 4782 break; 4783 4784 case LSRJT_LOGICAL_BSY: 4785 rsn_code_exp = stat.un.b.lsRjtRsnCodeExp; 4786 if ((cmd == ELS_CMD_PLOGI) || 4787 (cmd == ELS_CMD_PRLI) || 4788 (cmd == ELS_CMD_NVMEPRLI)) { 4789 delay = 1000; 4790 maxretry = 48; 4791 4792 /* An authentication LS_RJT reason code 4793 * explanation means some error in the 4794 * security settings end-to-end. Reduce 4795 * the retry count to allow lpfc to clear 4796 * RSCN mode and not race with dev_loss. 4797 */ 4798 if (cmd == ELS_CMD_PLOGI && 4799 rsn_code_exp == LSEXP_AUTH_REQ) 4800 maxretry = 8; 4801 } else if (cmd == ELS_CMD_FDISC) { 4802 /* FDISC retry policy */ 4803 maxretry = 48; 4804 if (cmdiocb->retry >= 32) 4805 delay = 1000; 4806 } 4807 retry = 1; 4808 break; 4809 4810 case LSRJT_LOGICAL_ERR: 4811 /* There are some cases where switches return this 4812 * error when they are not ready and should be returning 4813 * Logical Busy. We should delay every time. 4814 */ 4815 if (cmd == ELS_CMD_FDISC && 4816 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4817 maxretry = 3; 4818 delay = 1000; 4819 retry = 1; 4820 } else if (cmd == ELS_CMD_FLOGI && 4821 stat.un.b.lsRjtRsnCodeExp == 4822 LSEXP_NOTHING_MORE) { 4823 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4824 retry = 1; 4825 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 4826 "0820 FLOGI (x%x). " 4827 "BBCredit Not Supported\n", 4828 stat.un.lsRjtError); 4829 } else if (cmd == ELS_CMD_PLOGI) { 4830 rsn_code_exp = stat.un.b.lsRjtRsnCodeExp; 4831 4832 /* An authentication LS_RJT reason code 4833 * explanation means some error in the 4834 * security settings end-to-end. Reduce 4835 * the retry count to allow lpfc to clear 4836 * RSCN mode and not race with dev_loss. 4837 */ 4838 if (rsn_code_exp == LSEXP_AUTH_REQ) { 4839 delay = 1000; 4840 retry = 1; 4841 maxretry = 8; 4842 } 4843 } 4844 break; 4845 4846 case LSRJT_PROTOCOL_ERR: 4847 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4848 (cmd == ELS_CMD_FDISC) && 4849 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4850 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4851 ) { 4852 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 4853 "0122 FDISC (x%x). " 4854 "Fabric Detected Bad WWN\n", 4855 stat.un.lsRjtError); 4856 lpfc_vport_set_state(vport, 4857 FC_VPORT_FABRIC_REJ_WWN); 4858 } 4859 break; 4860 case LSRJT_VENDOR_UNIQUE: 4861 if ((stat.un.b.vendorUnique == 0x45) && 4862 (cmd == ELS_CMD_FLOGI)) { 4863 goto out_retry; 4864 } 4865 break; 4866 case LSRJT_CMD_UNSUPPORTED: 4867 /* lpfc nvmet returns this type of LS_RJT when it 4868 * receives an FCP PRLI because lpfc nvmet only 4869 * support NVME. ELS request is terminated for FCP4 4870 * on this rport. 4871 */ 4872 if (stat.un.b.lsRjtRsnCodeExp == 4873 LSEXP_REQ_UNSUPPORTED) { 4874 if (cmd == ELS_CMD_PRLI) 4875 goto out_retry; 4876 } 4877 break; 4878 } 4879 break; 4880 4881 case IOSTAT_INTERMED_RSP: 4882 case IOSTAT_BA_RJT: 4883 break; 4884 4885 default: 4886 break; 4887 } 4888 4889 if (link_reset) { 4890 rc = lpfc_link_reset(vport); 4891 if (rc) { 4892 /* Do not give up. Retry PLOGI one more time and attempt 4893 * link reset if PLOGI fails again. 4894 */ 4895 retry = 1; 4896 delay = 100; 4897 goto out_retry; 4898 } 4899 return 1; 4900 } 4901 4902 if (did == FDMI_DID) 4903 retry = 1; 4904 4905 if ((cmd == ELS_CMD_FLOGI) && 4906 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4907 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4908 /* FLOGI retry policy */ 4909 retry = 1; 4910 /* retry FLOGI forever */ 4911 if (phba->link_flag != LS_LOOPBACK_MODE) 4912 maxretry = 0; 4913 else 4914 maxretry = 2; 4915 4916 if (cmdiocb->retry >= 100) 4917 delay = 5000; 4918 else if (cmdiocb->retry >= 32) 4919 delay = 1000; 4920 } else if ((cmd == ELS_CMD_FDISC) && 4921 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4922 /* retry FDISCs every second up to devloss */ 4923 retry = 1; 4924 maxretry = vport->cfg_devloss_tmo; 4925 delay = 1000; 4926 } 4927 4928 cmdiocb->retry++; 4929 if (maxretry && (cmdiocb->retry >= maxretry)) { 4930 phba->fc_stat.elsRetryExceeded++; 4931 retry = 0; 4932 } 4933 4934 if (test_bit(FC_UNLOADING, &vport->load_flag)) 4935 retry = 0; 4936 4937 out_retry: 4938 if (retry) { 4939 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4940 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4941 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4942 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4943 "2849 Stop retry ELS command " 4944 "x%x to remote NPORT x%x, " 4945 "Data: x%x x%x\n", cmd, did, 4946 cmdiocb->retry, delay); 4947 return 0; 4948 } 4949 } 4950 4951 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4952 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4953 "0107 Retry ELS command x%x to remote " 4954 "NPORT x%x Data: x%x x%x\n", 4955 cmd, did, cmdiocb->retry, delay); 4956 4957 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4958 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4959 ((ulp_word4 & IOERR_PARAM_MASK) != 4960 IOERR_NO_RESOURCES))) { 4961 /* Don't reset timer for no resources */ 4962 4963 /* If discovery / RSCN timer is running, reset it */ 4964 if (timer_pending(&vport->fc_disctmo) || 4965 test_bit(FC_RSCN_MODE, &vport->fc_flag)) 4966 lpfc_set_disctmo(vport); 4967 } 4968 4969 phba->fc_stat.elsXmitRetry++; 4970 if (ndlp && delay) { 4971 phba->fc_stat.elsDelayRetry++; 4972 ndlp->nlp_retry = cmdiocb->retry; 4973 4974 /* delay is specified in milliseconds */ 4975 mod_timer(&ndlp->nlp_delayfunc, 4976 jiffies + msecs_to_jiffies(delay)); 4977 set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); 4978 4979 ndlp->nlp_prev_state = ndlp->nlp_state; 4980 if ((cmd == ELS_CMD_PRLI) || 4981 (cmd == ELS_CMD_NVMEPRLI)) 4982 lpfc_nlp_set_state(vport, ndlp, 4983 NLP_STE_PRLI_ISSUE); 4984 else if (cmd != ELS_CMD_ADISC) 4985 lpfc_nlp_set_state(vport, ndlp, 4986 NLP_STE_NPR_NODE); 4987 ndlp->nlp_last_elscmd = cmd; 4988 4989 return 1; 4990 } 4991 switch (cmd) { 4992 case ELS_CMD_FLOGI: 4993 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4994 return 1; 4995 case ELS_CMD_FDISC: 4996 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4997 return 1; 4998 case ELS_CMD_PLOGI: 4999 if (ndlp) { 5000 ndlp->nlp_prev_state = ndlp->nlp_state; 5001 lpfc_nlp_set_state(vport, ndlp, 5002 NLP_STE_PLOGI_ISSUE); 5003 } 5004 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 5005 return 1; 5006 case ELS_CMD_ADISC: 5007 ndlp->nlp_prev_state = ndlp->nlp_state; 5008 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5009 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 5010 return 1; 5011 case ELS_CMD_PRLI: 5012 case ELS_CMD_NVMEPRLI: 5013 ndlp->nlp_prev_state = ndlp->nlp_state; 5014 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 5015 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 5016 return 1; 5017 case ELS_CMD_LOGO: 5018 ndlp->nlp_prev_state = ndlp->nlp_state; 5019 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 5020 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 5021 return 1; 5022 } 5023 } 5024 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 5025 if (logerr) { 5026 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5027 "0137 No retry ELS command x%x to remote " 5028 "NPORT x%x: Out of Resources: Error:x%x/%x " 5029 "IoTag x%x\n", 5030 cmd, did, ulp_status, ulp_word4, 5031 cmdiocb->iotag); 5032 } 5033 else { 5034 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5035 "0108 No retry ELS command x%x to remote " 5036 "NPORT x%x Retried:%d Error:x%x/%x " 5037 "IoTag x%x nflags x%lx\n", 5038 cmd, did, cmdiocb->retry, ulp_status, 5039 ulp_word4, cmdiocb->iotag, 5040 (ndlp ? ndlp->nlp_flag : 0)); 5041 } 5042 return 0; 5043 } 5044 5045 /** 5046 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5047 * @phba: pointer to lpfc hba data structure. 5048 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5049 * 5050 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5051 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5052 * checks to see whether there is a lpfc DMA buffer associated with the 5053 * response of the command IOCB. If so, it will be released before releasing 5054 * the lpfc DMA buffer associated with the IOCB itself. 5055 * 5056 * Return code 5057 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5058 **/ 5059 static int 5060 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5061 { 5062 struct lpfc_dmabuf *buf_ptr; 5063 5064 /* Free the response before processing the command. */ 5065 if (!list_empty(&buf_ptr1->list)) { 5066 list_remove_head(&buf_ptr1->list, buf_ptr, 5067 struct lpfc_dmabuf, 5068 list); 5069 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5070 kfree(buf_ptr); 5071 } 5072 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5073 kfree(buf_ptr1); 5074 return 0; 5075 } 5076 5077 /** 5078 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5079 * @phba: pointer to lpfc hba data structure. 5080 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5081 * 5082 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5083 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5084 * pool. 5085 * 5086 * Return code 5087 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5088 **/ 5089 static int 5090 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5091 { 5092 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5093 kfree(buf_ptr); 5094 return 0; 5095 } 5096 5097 /** 5098 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5099 * @phba: pointer to lpfc hba data structure. 5100 * @elsiocb: pointer to lpfc els command iocb data structure. 5101 * 5102 * This routine frees a command IOCB and its associated resources. The 5103 * command IOCB data structure contains the reference to various associated 5104 * resources, these fields must be set to NULL if the associated reference 5105 * not present: 5106 * cmd_dmabuf - reference to cmd. 5107 * cmd_dmabuf->next - reference to rsp 5108 * rsp_dmabuf - unused 5109 * bpl_dmabuf - reference to bpl 5110 * 5111 * It first properly decrements the reference count held on ndlp for the 5112 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5113 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5114 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5115 * adds the DMA buffer the @phba data structure for the delayed release. 5116 * If reference to the Buffer Pointer List (BPL) is present, the 5117 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5118 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5119 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5120 * 5121 * Return code 5122 * 0 - Success (currently, always return 0) 5123 **/ 5124 int 5125 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5126 { 5127 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5128 5129 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5130 elsiocb->ndlp = NULL; 5131 5132 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5133 if (elsiocb->cmd_dmabuf) { 5134 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5135 /* Firmware could still be in progress of DMAing 5136 * payload, so don't free data buffer till after 5137 * a hbeat. 5138 */ 5139 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5140 buf_ptr = elsiocb->cmd_dmabuf; 5141 elsiocb->cmd_dmabuf = NULL; 5142 if (buf_ptr) { 5143 buf_ptr1 = NULL; 5144 spin_lock_irq(&phba->hbalock); 5145 if (!list_empty(&buf_ptr->list)) { 5146 list_remove_head(&buf_ptr->list, 5147 buf_ptr1, struct lpfc_dmabuf, 5148 list); 5149 INIT_LIST_HEAD(&buf_ptr1->list); 5150 list_add_tail(&buf_ptr1->list, 5151 &phba->elsbuf); 5152 phba->elsbuf_cnt++; 5153 } 5154 INIT_LIST_HEAD(&buf_ptr->list); 5155 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5156 phba->elsbuf_cnt++; 5157 spin_unlock_irq(&phba->hbalock); 5158 } 5159 } else { 5160 buf_ptr1 = elsiocb->cmd_dmabuf; 5161 lpfc_els_free_data(phba, buf_ptr1); 5162 elsiocb->cmd_dmabuf = NULL; 5163 } 5164 } 5165 5166 if (elsiocb->bpl_dmabuf) { 5167 buf_ptr = elsiocb->bpl_dmabuf; 5168 lpfc_els_free_bpl(phba, buf_ptr); 5169 elsiocb->bpl_dmabuf = NULL; 5170 } 5171 lpfc_sli_release_iocbq(phba, elsiocb); 5172 return 0; 5173 } 5174 5175 /** 5176 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5177 * @phba: pointer to lpfc hba data structure. 5178 * @cmdiocb: pointer to lpfc command iocb data structure. 5179 * @rspiocb: pointer to lpfc response iocb data structure. 5180 * 5181 * This routine is the completion callback function to the Logout (LOGO) 5182 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5183 * the completion of the LOGO process. If the node has transitioned to NPR, 5184 * this routine unregisters the RPI if it is still registered. The 5185 * lpfc_els_free_iocb() is invoked to release the IOCB data structure. 5186 **/ 5187 static void 5188 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5189 struct lpfc_iocbq *rspiocb) 5190 { 5191 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5192 struct lpfc_vport *vport = cmdiocb->vport; 5193 u32 ulp_status, ulp_word4; 5194 5195 ulp_status = get_job_ulpstatus(phba, rspiocb); 5196 ulp_word4 = get_job_word4(phba, rspiocb); 5197 5198 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5199 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5200 ulp_status, ulp_word4, ndlp->nlp_DID); 5201 /* ACC to LOGO completes to NPort <nlp_DID> */ 5202 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5203 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5204 "last els x%x Data: x%lx x%x x%x\n", 5205 ndlp->nlp_DID, kref_read(&ndlp->kref), 5206 ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, 5207 ndlp->nlp_rpi); 5208 5209 /* This clause allows the LOGO ACC to complete and free resources 5210 * for the Fabric Domain Controller. It does deliberately skip 5211 * the unreg_rpi and release rpi because some fabrics send RDP 5212 * requests after logging out from the initiator. 5213 */ 5214 if (ndlp->nlp_type & NLP_FABRIC && 5215 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5216 goto out; 5217 5218 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5219 if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) 5220 lpfc_unreg_rpi(vport, ndlp); 5221 5222 /* If came from PRLO, then PRLO_ACC is done. 5223 * Start rediscovery now. 5224 */ 5225 if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { 5226 set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); 5227 ndlp->nlp_prev_state = ndlp->nlp_state; 5228 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 5229 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5230 } 5231 } 5232 5233 out: 5234 /* 5235 * The driver received a LOGO from the rport and has ACK'd it. 5236 * At this point, the driver is done so release the IOCB 5237 */ 5238 lpfc_els_free_iocb(phba, cmdiocb); 5239 lpfc_nlp_put(ndlp); 5240 } 5241 5242 /** 5243 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5244 * @phba: pointer to lpfc hba data structure. 5245 * @pmb: pointer to the driver internal queue element for mailbox command. 5246 * 5247 * This routine is the completion callback function for unregister default 5248 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5249 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5250 * decrements the ndlp reference count held for this completion callback 5251 * function. After that, it invokes the lpfc_drop_node to check 5252 * whether it is appropriate to release the node. 5253 **/ 5254 void 5255 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5256 { 5257 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5258 u32 mbx_flag = pmb->mbox_flag; 5259 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5260 5261 if (ndlp) { 5262 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5263 "0006 rpi x%x DID:%x flg:%lx %d x%px " 5264 "mbx_cmd x%x mbx_flag x%x x%px\n", 5265 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5266 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5267 mbx_flag, pmb); 5268 5269 /* This ends the default/temporary RPI cleanup logic for this 5270 * ndlp and the node and rpi needs to be released. Free the rpi 5271 * first on an UNREG_LOGIN and then release the final 5272 * references. 5273 */ 5274 clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); 5275 if (mbx_cmd == MBX_UNREG_LOGIN) 5276 clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); 5277 lpfc_nlp_put(ndlp); 5278 lpfc_drop_node(ndlp->vport, ndlp); 5279 } 5280 5281 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5282 } 5283 5284 /** 5285 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5286 * @phba: pointer to lpfc hba data structure. 5287 * @cmdiocb: pointer to lpfc command iocb data structure. 5288 * @rspiocb: pointer to lpfc response iocb data structure. 5289 * 5290 * This routine is the completion callback function for ELS Response IOCB 5291 * command. In normal case, this callback function just properly sets the 5292 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5293 * field in the command IOCB is not NULL, the referred mailbox command will 5294 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5295 * the IOCB. 5296 **/ 5297 static void 5298 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5299 struct lpfc_iocbq *rspiocb) 5300 { 5301 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5302 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5303 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5304 IOCB_t *irsp; 5305 LPFC_MBOXQ_t *mbox = NULL; 5306 u32 ulp_status, ulp_word4, tmo, did, iotag; 5307 5308 if (!vport) { 5309 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 5310 "3177 null vport in ELS rsp\n"); 5311 goto out; 5312 } 5313 if (cmdiocb->context_un.mbox) 5314 mbox = cmdiocb->context_un.mbox; 5315 5316 ulp_status = get_job_ulpstatus(phba, rspiocb); 5317 ulp_word4 = get_job_word4(phba, rspiocb); 5318 did = get_job_els_rsp64_did(phba, cmdiocb); 5319 5320 if (phba->sli_rev == LPFC_SLI_REV4) { 5321 tmo = get_wqe_tmo(cmdiocb); 5322 iotag = get_wqe_reqtag(cmdiocb); 5323 } else { 5324 irsp = &rspiocb->iocb; 5325 tmo = irsp->ulpTimeout; 5326 iotag = irsp->ulpIoTag; 5327 } 5328 5329 /* Check to see if link went down during discovery */ 5330 if (!ndlp || lpfc_els_chk_latt(vport)) { 5331 if (mbox) 5332 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5333 goto out; 5334 } 5335 5336 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5337 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5338 ulp_status, ulp_word4, did); 5339 /* ELS response tag <ulpIoTag> completes */ 5340 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5341 "0110 ELS response tag x%x completes " 5342 "Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n", 5343 iotag, ulp_status, ulp_word4, tmo, 5344 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5345 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5346 if (mbox) { 5347 if (ulp_status == 0 && 5348 test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { 5349 if (!lpfc_unreg_rpi(vport, ndlp) && 5350 !test_bit(FC_PT2PT, &vport->fc_flag)) { 5351 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5352 ndlp->nlp_state == 5353 NLP_STE_REG_LOGIN_ISSUE) { 5354 lpfc_printf_vlog(vport, KERN_INFO, 5355 LOG_DISCOVERY, 5356 "0314 PLOGI recov " 5357 "DID x%x " 5358 "Data: x%x x%x x%lx\n", 5359 ndlp->nlp_DID, 5360 ndlp->nlp_state, 5361 ndlp->nlp_rpi, 5362 ndlp->nlp_flag); 5363 goto out_free_mbox; 5364 } 5365 } 5366 5367 /* Increment reference count to ndlp to hold the 5368 * reference to ndlp for the callback function. 5369 */ 5370 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5371 if (!mbox->ctx_ndlp) 5372 goto out_free_mbox; 5373 5374 mbox->vport = vport; 5375 if (test_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag)) { 5376 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5377 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5378 } else { 5379 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5380 ndlp->nlp_prev_state = ndlp->nlp_state; 5381 lpfc_nlp_set_state(vport, ndlp, 5382 NLP_STE_REG_LOGIN_ISSUE); 5383 } 5384 5385 set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); 5386 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5387 != MBX_NOT_FINISHED) 5388 goto out; 5389 5390 /* Decrement the ndlp reference count we 5391 * set for this failed mailbox command. 5392 */ 5393 lpfc_nlp_put(ndlp); 5394 clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); 5395 5396 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5397 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5398 "0138 ELS rsp: Cannot issue reg_login for x%x " 5399 "Data: x%lx x%x x%x\n", 5400 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5401 ndlp->nlp_rpi); 5402 } 5403 out_free_mbox: 5404 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5405 } 5406 out: 5407 if (ndlp && shost) { 5408 if (mbox) 5409 clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); 5410 clear_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); 5411 } 5412 5413 /* An SLI4 NPIV instance wants to drop the node at this point under 5414 * these conditions because it doesn't need the login. 5415 */ 5416 if (phba->sli_rev == LPFC_SLI_REV4 && 5417 vport && vport->port_type == LPFC_NPIV_PORT && 5418 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5419 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5420 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && 5421 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 5422 /* Drop ndlp if there is no planned or outstanding 5423 * issued PRLI. 5424 * 5425 * In cases when the ndlp is acting as both an initiator 5426 * and target function, let our issued PRLI determine 5427 * the final ndlp kref drop. 5428 */ 5429 lpfc_drop_node(vport, ndlp); 5430 } 5431 } 5432 5433 /* Release the originating I/O reference. */ 5434 lpfc_els_free_iocb(phba, cmdiocb); 5435 lpfc_nlp_put(ndlp); 5436 return; 5437 } 5438 5439 /** 5440 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5441 * @vport: pointer to a host virtual N_Port data structure. 5442 * @flag: the els command code to be accepted. 5443 * @oldiocb: pointer to the original lpfc command iocb data structure. 5444 * @ndlp: pointer to a node-list data structure. 5445 * @mbox: pointer to the driver internal queue element for mailbox command. 5446 * 5447 * This routine prepares and issues an Accept (ACC) response IOCB 5448 * command. It uses the @flag to properly set up the IOCB field for the 5449 * specific ACC response command to be issued and invokes the 5450 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5451 * @mbox pointer is passed in, it will be put into the context_un.mbox 5452 * field of the IOCB for the completion callback function to issue the 5453 * mailbox command to the HBA later when callback is invoked. 5454 * 5455 * Note that the ndlp reference count will be incremented by 1 for holding the 5456 * ndlp and the reference to ndlp will be stored into the ndlp field of 5457 * the IOCB for the completion callback function to the corresponding 5458 * response ELS IOCB command. 5459 * 5460 * Return code 5461 * 0 - Successfully issued acc response 5462 * 1 - Failed to issue acc response 5463 **/ 5464 int 5465 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5466 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5467 LPFC_MBOXQ_t *mbox) 5468 { 5469 struct lpfc_hba *phba = vport->phba; 5470 IOCB_t *icmd; 5471 IOCB_t *oldcmd; 5472 union lpfc_wqe128 *wqe; 5473 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5474 struct lpfc_iocbq *elsiocb; 5475 uint8_t *pcmd; 5476 struct serv_parm *sp; 5477 uint16_t cmdsize; 5478 int rc; 5479 ELS_PKT *els_pkt_ptr; 5480 struct fc_els_rdf_resp *rdf_resp; 5481 5482 switch (flag) { 5483 case ELS_CMD_ACC: 5484 cmdsize = sizeof(uint32_t); 5485 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5486 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5487 if (!elsiocb) { 5488 clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); 5489 return 1; 5490 } 5491 5492 if (phba->sli_rev == LPFC_SLI_REV4) { 5493 wqe = &elsiocb->wqe; 5494 /* XRI / rx_id */ 5495 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5496 bf_get(wqe_ctxt_tag, 5497 &oldwqe->xmit_els_rsp.wqe_com)); 5498 5499 /* oxid */ 5500 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5501 bf_get(wqe_rcvoxid, 5502 &oldwqe->xmit_els_rsp.wqe_com)); 5503 } else { 5504 icmd = &elsiocb->iocb; 5505 oldcmd = &oldiocb->iocb; 5506 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5507 icmd->unsli3.rcvsli3.ox_id = 5508 oldcmd->unsli3.rcvsli3.ox_id; 5509 } 5510 5511 pcmd = elsiocb->cmd_dmabuf->virt; 5512 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5513 pcmd += sizeof(uint32_t); 5514 5515 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5516 "Issue ACC: did:x%x flg:x%lx", 5517 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5518 break; 5519 case ELS_CMD_FLOGI: 5520 case ELS_CMD_PLOGI: 5521 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5522 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5523 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5524 if (!elsiocb) 5525 return 1; 5526 5527 if (phba->sli_rev == LPFC_SLI_REV4) { 5528 wqe = &elsiocb->wqe; 5529 /* XRI / rx_id */ 5530 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5531 bf_get(wqe_ctxt_tag, 5532 &oldwqe->xmit_els_rsp.wqe_com)); 5533 5534 /* oxid */ 5535 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5536 bf_get(wqe_rcvoxid, 5537 &oldwqe->xmit_els_rsp.wqe_com)); 5538 } else { 5539 icmd = &elsiocb->iocb; 5540 oldcmd = &oldiocb->iocb; 5541 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5542 icmd->unsli3.rcvsli3.ox_id = 5543 oldcmd->unsli3.rcvsli3.ox_id; 5544 } 5545 5546 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5547 5548 if (mbox) 5549 elsiocb->context_un.mbox = mbox; 5550 5551 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5552 pcmd += sizeof(uint32_t); 5553 sp = (struct serv_parm *)pcmd; 5554 5555 if (flag == ELS_CMD_FLOGI) { 5556 /* Copy the received service parameters back */ 5557 memcpy(sp, &phba->fc_fabparam, 5558 sizeof(struct serv_parm)); 5559 5560 /* Clear the F_Port bit */ 5561 sp->cmn.fPort = 0; 5562 5563 /* Mark all class service parameters as invalid */ 5564 sp->cls1.classValid = 0; 5565 sp->cls2.classValid = 0; 5566 sp->cls3.classValid = 0; 5567 sp->cls4.classValid = 0; 5568 5569 /* Copy our worldwide names */ 5570 memcpy(&sp->portName, &vport->fc_sparam.portName, 5571 sizeof(struct lpfc_name)); 5572 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5573 sizeof(struct lpfc_name)); 5574 } else { 5575 memcpy(pcmd, &vport->fc_sparam, 5576 sizeof(struct serv_parm)); 5577 5578 sp->cmn.valid_vendor_ver_level = 0; 5579 memset(sp->un.vendorVersion, 0, 5580 sizeof(sp->un.vendorVersion)); 5581 sp->cmn.bbRcvSizeMsb &= 0xF; 5582 5583 /* If our firmware supports this feature, convey that 5584 * info to the target using the vendor specific field. 5585 */ 5586 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5587 sp->cmn.valid_vendor_ver_level = 1; 5588 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5589 sp->un.vv.flags = 5590 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5591 } 5592 } 5593 5594 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5595 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%lx", 5596 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5597 break; 5598 case ELS_CMD_PRLO: 5599 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5600 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5601 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5602 if (!elsiocb) 5603 return 1; 5604 5605 if (phba->sli_rev == LPFC_SLI_REV4) { 5606 wqe = &elsiocb->wqe; 5607 /* XRI / rx_id */ 5608 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5609 bf_get(wqe_ctxt_tag, 5610 &oldwqe->xmit_els_rsp.wqe_com)); 5611 5612 /* oxid */ 5613 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5614 bf_get(wqe_rcvoxid, 5615 &oldwqe->xmit_els_rsp.wqe_com)); 5616 } else { 5617 icmd = &elsiocb->iocb; 5618 oldcmd = &oldiocb->iocb; 5619 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5620 icmd->unsli3.rcvsli3.ox_id = 5621 oldcmd->unsli3.rcvsli3.ox_id; 5622 } 5623 5624 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5625 5626 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5627 sizeof(uint32_t) + sizeof(PRLO)); 5628 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5629 els_pkt_ptr = (ELS_PKT *) pcmd; 5630 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5631 5632 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5633 "Issue ACC PRLO: did:x%x flg:x%lx", 5634 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5635 break; 5636 case ELS_CMD_RDF: 5637 cmdsize = sizeof(*rdf_resp); 5638 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5639 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5640 if (!elsiocb) 5641 return 1; 5642 5643 if (phba->sli_rev == LPFC_SLI_REV4) { 5644 wqe = &elsiocb->wqe; 5645 /* XRI / rx_id */ 5646 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5647 bf_get(wqe_ctxt_tag, 5648 &oldwqe->xmit_els_rsp.wqe_com)); 5649 5650 /* oxid */ 5651 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5652 bf_get(wqe_rcvoxid, 5653 &oldwqe->xmit_els_rsp.wqe_com)); 5654 } else { 5655 icmd = &elsiocb->iocb; 5656 oldcmd = &oldiocb->iocb; 5657 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5658 icmd->unsli3.rcvsli3.ox_id = 5659 oldcmd->unsli3.rcvsli3.ox_id; 5660 } 5661 5662 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5663 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5664 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5665 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5666 5667 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5668 rdf_resp->desc_list_len = cpu_to_be32(12); 5669 5670 /* FC-LS-5 specifies LS REQ Information descriptor */ 5671 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5672 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5673 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5674 break; 5675 default: 5676 return 1; 5677 } 5678 if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) { 5679 if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) && 5680 !test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) 5681 clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); 5682 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5683 } else { 5684 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5685 } 5686 5687 phba->fc_stat.elsXmitACC++; 5688 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5689 if (!elsiocb->ndlp) { 5690 lpfc_els_free_iocb(phba, elsiocb); 5691 return 1; 5692 } 5693 5694 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5695 if (rc == IOCB_ERROR) { 5696 lpfc_els_free_iocb(phba, elsiocb); 5697 lpfc_nlp_put(ndlp); 5698 return 1; 5699 } 5700 5701 /* Xmit ELS ACC response tag <ulpIoTag> */ 5702 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5703 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5704 "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " 5705 "RPI: x%x, fc_flag x%lx refcnt %d\n", 5706 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5707 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5708 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5709 return 0; 5710 } 5711 5712 /** 5713 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5714 * @vport: pointer to a virtual N_Port data structure. 5715 * @rejectError: reject response to issue 5716 * @oldiocb: pointer to the original lpfc command iocb data structure. 5717 * @ndlp: pointer to a node-list data structure. 5718 * @mbox: pointer to the driver internal queue element for mailbox command. 5719 * 5720 * This routine prepares and issue an Reject (RJT) response IOCB 5721 * command. If a @mbox pointer is passed in, it will be put into the 5722 * context_un.mbox field of the IOCB for the completion callback function 5723 * to issue to the HBA later. 5724 * 5725 * Note that the ndlp reference count will be incremented by 1 for holding the 5726 * ndlp and the reference to ndlp will be stored into the ndlp field of 5727 * the IOCB for the completion callback function to the reject response 5728 * ELS IOCB command. 5729 * 5730 * Return code 5731 * 0 - Successfully issued reject response 5732 * 1 - Failed to issue reject response 5733 **/ 5734 int 5735 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5736 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5737 LPFC_MBOXQ_t *mbox) 5738 { 5739 int rc; 5740 struct lpfc_hba *phba = vport->phba; 5741 IOCB_t *icmd; 5742 IOCB_t *oldcmd; 5743 union lpfc_wqe128 *wqe; 5744 struct lpfc_iocbq *elsiocb; 5745 uint8_t *pcmd; 5746 uint16_t cmdsize; 5747 5748 cmdsize = 2 * sizeof(uint32_t); 5749 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5750 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5751 if (!elsiocb) 5752 return 1; 5753 5754 if (phba->sli_rev == LPFC_SLI_REV4) { 5755 wqe = &elsiocb->wqe; 5756 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5757 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5758 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5759 get_job_rcvoxid(phba, oldiocb)); 5760 } else { 5761 icmd = &elsiocb->iocb; 5762 oldcmd = &oldiocb->iocb; 5763 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5764 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5765 } 5766 5767 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5768 5769 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5770 pcmd += sizeof(uint32_t); 5771 *((uint32_t *) (pcmd)) = rejectError; 5772 5773 if (mbox) 5774 elsiocb->context_un.mbox = mbox; 5775 5776 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5777 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5778 "0129 Xmit ELS RJT x%x response tag x%x " 5779 "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " 5780 "rpi x%x\n", 5781 rejectError, elsiocb->iotag, 5782 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5783 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5784 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5785 "Issue LS_RJT: did:x%x flg:x%lx err:x%x", 5786 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5787 5788 phba->fc_stat.elsXmitLSRJT++; 5789 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5790 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5791 if (!elsiocb->ndlp) { 5792 lpfc_els_free_iocb(phba, elsiocb); 5793 return 1; 5794 } 5795 5796 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5797 if (rc == IOCB_ERROR) { 5798 lpfc_els_free_iocb(phba, elsiocb); 5799 lpfc_nlp_put(ndlp); 5800 return 1; 5801 } 5802 5803 return 0; 5804 } 5805 5806 /** 5807 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5808 * @vport: pointer to a host virtual N_Port data structure. 5809 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5810 * @ndlp: NPort to where rsp is directed 5811 * 5812 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5813 * this N_Port's support of hardware signals in its Congestion 5814 * Capabilities Descriptor. 5815 * 5816 * Return code 5817 * 0 - Successfully issued edc rsp command 5818 * 1 - Failed to issue edc rsp command 5819 **/ 5820 static int 5821 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5822 struct lpfc_nodelist *ndlp) 5823 { 5824 struct lpfc_hba *phba = vport->phba; 5825 struct fc_els_edc_resp *edc_rsp; 5826 struct fc_tlv_desc *tlv; 5827 struct lpfc_iocbq *elsiocb; 5828 IOCB_t *icmd, *cmd; 5829 union lpfc_wqe128 *wqe; 5830 u32 cgn_desc_size, lft_desc_size; 5831 u16 cmdsize; 5832 uint8_t *pcmd; 5833 int rc; 5834 5835 cmdsize = sizeof(struct fc_els_edc_resp); 5836 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5837 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5838 sizeof(struct fc_diag_lnkflt_desc) : 0; 5839 cmdsize += cgn_desc_size + lft_desc_size; 5840 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5841 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5842 if (!elsiocb) 5843 return 1; 5844 5845 if (phba->sli_rev == LPFC_SLI_REV4) { 5846 wqe = &elsiocb->wqe; 5847 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5848 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5849 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5850 get_job_rcvoxid(phba, cmdiocb)); 5851 } else { 5852 icmd = &elsiocb->iocb; 5853 cmd = &cmdiocb->iocb; 5854 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5855 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5856 } 5857 5858 pcmd = elsiocb->cmd_dmabuf->virt; 5859 memset(pcmd, 0, cmdsize); 5860 5861 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5862 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5863 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5864 cgn_desc_size + lft_desc_size); 5865 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5866 edc_rsp->lsri.desc_len = cpu_to_be32( 5867 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5868 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5869 tlv = edc_rsp->desc; 5870 lpfc_format_edc_cgn_desc(phba, tlv); 5871 tlv = fc_tlv_next_desc(tlv); 5872 if (lft_desc_size) 5873 lpfc_format_edc_lft_desc(phba, tlv); 5874 5875 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5876 "Issue EDC ACC: did:x%x flg:x%lx refcnt %d", 5877 ndlp->nlp_DID, ndlp->nlp_flag, 5878 kref_read(&ndlp->kref)); 5879 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5880 5881 phba->fc_stat.elsXmitACC++; 5882 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5883 if (!elsiocb->ndlp) { 5884 lpfc_els_free_iocb(phba, elsiocb); 5885 return 1; 5886 } 5887 5888 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5889 if (rc == IOCB_ERROR) { 5890 lpfc_els_free_iocb(phba, elsiocb); 5891 lpfc_nlp_put(ndlp); 5892 return 1; 5893 } 5894 5895 /* Xmit ELS ACC response tag <ulpIoTag> */ 5896 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5897 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5898 "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " 5899 "RPI: x%x, fc_flag x%lx\n", 5900 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5901 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5902 ndlp->nlp_rpi, vport->fc_flag); 5903 5904 return 0; 5905 } 5906 5907 /** 5908 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5909 * @vport: pointer to a virtual N_Port data structure. 5910 * @oldiocb: pointer to the original lpfc command iocb data structure. 5911 * @ndlp: pointer to a node-list data structure. 5912 * 5913 * This routine prepares and issues an Accept (ACC) response to Address 5914 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5915 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5916 * 5917 * Note that the ndlp reference count will be incremented by 1 for holding the 5918 * ndlp and the reference to ndlp will be stored into the ndlp field of 5919 * the IOCB for the completion callback function to the ADISC Accept response 5920 * ELS IOCB command. 5921 * 5922 * Return code 5923 * 0 - Successfully issued acc adisc response 5924 * 1 - Failed to issue adisc acc response 5925 **/ 5926 int 5927 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5928 struct lpfc_nodelist *ndlp) 5929 { 5930 struct lpfc_hba *phba = vport->phba; 5931 ADISC *ap; 5932 IOCB_t *icmd, *oldcmd; 5933 union lpfc_wqe128 *wqe; 5934 struct lpfc_iocbq *elsiocb; 5935 uint8_t *pcmd; 5936 uint16_t cmdsize; 5937 int rc; 5938 u32 ulp_context; 5939 5940 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5941 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5942 ndlp->nlp_DID, ELS_CMD_ACC); 5943 if (!elsiocb) 5944 return 1; 5945 5946 if (phba->sli_rev == LPFC_SLI_REV4) { 5947 wqe = &elsiocb->wqe; 5948 /* XRI / rx_id */ 5949 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5950 get_job_ulpcontext(phba, oldiocb)); 5951 ulp_context = get_job_ulpcontext(phba, elsiocb); 5952 /* oxid */ 5953 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5954 get_job_rcvoxid(phba, oldiocb)); 5955 } else { 5956 icmd = &elsiocb->iocb; 5957 oldcmd = &oldiocb->iocb; 5958 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5959 ulp_context = elsiocb->iocb.ulpContext; 5960 icmd->unsli3.rcvsli3.ox_id = 5961 oldcmd->unsli3.rcvsli3.ox_id; 5962 } 5963 5964 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5965 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5966 "0130 Xmit ADISC ACC response iotag x%x xri: " 5967 "x%x, did x%x, nlp_flag x%lx, nlp_state x%x rpi x%x\n", 5968 elsiocb->iotag, ulp_context, 5969 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5970 ndlp->nlp_rpi); 5971 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5972 5973 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5974 pcmd += sizeof(uint32_t); 5975 5976 ap = (ADISC *) (pcmd); 5977 ap->hardAL_PA = phba->fc_pref_ALPA; 5978 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5979 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5980 ap->DID = be32_to_cpu(vport->fc_myDID); 5981 5982 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5983 "Issue ACC ADISC: did:x%x flg:x%lx refcnt %d", 5984 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5985 5986 phba->fc_stat.elsXmitACC++; 5987 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5988 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5989 if (!elsiocb->ndlp) { 5990 lpfc_els_free_iocb(phba, elsiocb); 5991 return 1; 5992 } 5993 5994 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5995 if (rc == IOCB_ERROR) { 5996 lpfc_els_free_iocb(phba, elsiocb); 5997 lpfc_nlp_put(ndlp); 5998 return 1; 5999 } 6000 6001 return 0; 6002 } 6003 6004 /** 6005 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6006 * @vport: pointer to a virtual N_Port data structure. 6007 * @oldiocb: pointer to the original lpfc command iocb data structure. 6008 * @ndlp: pointer to a node-list data structure. 6009 * 6010 * This routine prepares and issues an Accept (ACC) response to Process 6011 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6012 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6013 * 6014 * Note that the ndlp reference count will be incremented by 1 for holding the 6015 * ndlp and the reference to ndlp will be stored into the ndlp field of 6016 * the IOCB for the completion callback function to the PRLI Accept response 6017 * ELS IOCB command. 6018 * 6019 * Return code 6020 * 0 - Successfully issued acc prli response 6021 * 1 - Failed to issue acc prli response 6022 **/ 6023 int 6024 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6025 struct lpfc_nodelist *ndlp) 6026 { 6027 struct lpfc_hba *phba = vport->phba; 6028 PRLI *npr; 6029 struct lpfc_nvme_prli *npr_nvme; 6030 lpfc_vpd_t *vpd; 6031 IOCB_t *icmd; 6032 IOCB_t *oldcmd; 6033 union lpfc_wqe128 *wqe; 6034 struct lpfc_iocbq *elsiocb; 6035 uint8_t *pcmd; 6036 uint16_t cmdsize; 6037 uint32_t prli_fc4_req, *req_payload; 6038 struct lpfc_dmabuf *req_buf; 6039 int rc; 6040 u32 elsrspcmd, ulp_context; 6041 6042 /* Need the incoming PRLI payload to determine if the ACC is for an 6043 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6044 */ 6045 req_buf = oldiocb->cmd_dmabuf; 6046 req_payload = (((uint32_t *)req_buf->virt) + 1); 6047 6048 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6049 prli_fc4_req = be32_to_cpu(*req_payload); 6050 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6051 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6052 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6053 prli_fc4_req, *((uint32_t *)req_payload)); 6054 6055 if (prli_fc4_req == PRLI_FCP_TYPE) { 6056 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6057 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6058 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6059 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6060 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6061 } else { 6062 return 1; 6063 } 6064 6065 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6066 ndlp->nlp_DID, elsrspcmd); 6067 if (!elsiocb) 6068 return 1; 6069 6070 if (phba->sli_rev == LPFC_SLI_REV4) { 6071 wqe = &elsiocb->wqe; 6072 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6073 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6074 ulp_context = get_job_ulpcontext(phba, elsiocb); 6075 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6076 get_job_rcvoxid(phba, oldiocb)); 6077 } else { 6078 icmd = &elsiocb->iocb; 6079 oldcmd = &oldiocb->iocb; 6080 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6081 ulp_context = elsiocb->iocb.ulpContext; 6082 icmd->unsli3.rcvsli3.ox_id = 6083 oldcmd->unsli3.rcvsli3.ox_id; 6084 } 6085 6086 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6087 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6088 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6089 "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", 6090 elsiocb->iotag, ulp_context, 6091 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6092 ndlp->nlp_rpi); 6093 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6094 memset(pcmd, 0, cmdsize); 6095 6096 *((uint32_t *)(pcmd)) = elsrspcmd; 6097 pcmd += sizeof(uint32_t); 6098 6099 /* For PRLI, remainder of payload is PRLI parameter page */ 6100 vpd = &phba->vpd; 6101 6102 if (prli_fc4_req == PRLI_FCP_TYPE) { 6103 /* 6104 * If the remote port is a target and our firmware version 6105 * is 3.20 or later, set the following bits for FC-TAPE 6106 * support. 6107 */ 6108 npr = (PRLI *) pcmd; 6109 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6110 (vpd->rev.feaLevelHigh >= 0x02)) { 6111 npr->ConfmComplAllowed = 1; 6112 npr->Retry = 1; 6113 npr->TaskRetryIdReq = 1; 6114 } 6115 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6116 6117 /* Set image pair for complementary pairs only. */ 6118 if (ndlp->nlp_type & NLP_FCP_TARGET) 6119 npr->estabImagePair = 1; 6120 else 6121 npr->estabImagePair = 0; 6122 npr->readXferRdyDis = 1; 6123 npr->ConfmComplAllowed = 1; 6124 npr->prliType = PRLI_FCP_TYPE; 6125 npr->initiatorFunc = 1; 6126 6127 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6128 lpfc_printf_vlog(vport, KERN_INFO, 6129 LOG_ELS | LOG_NODE | LOG_DISCOVERY, 6130 "6014 FCP issue PRLI ACC imgpair %d " 6131 "retry %d task %d\n", 6132 npr->estabImagePair, 6133 npr->Retry, npr->TaskRetryIdReq); 6134 6135 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6136 /* Respond with an NVME PRLI Type */ 6137 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6138 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6139 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6140 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6141 if (phba->nvmet_support) { 6142 bf_set(prli_tgt, npr_nvme, 1); 6143 bf_set(prli_disc, npr_nvme, 1); 6144 if (phba->cfg_nvme_enable_fb) { 6145 bf_set(prli_fba, npr_nvme, 1); 6146 6147 /* TBD. Target mode needs to post buffers 6148 * that support the configured first burst 6149 * byte size. 6150 */ 6151 bf_set(prli_fb_sz, npr_nvme, 6152 phba->cfg_nvmet_fb_size); 6153 } 6154 } else { 6155 bf_set(prli_init, npr_nvme, 1); 6156 } 6157 6158 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6159 "6015 NVME issue PRLI ACC word1 x%08x " 6160 "word4 x%08x word5 x%08x flag x%lx, " 6161 "fcp_info x%x nlp_type x%x\n", 6162 npr_nvme->word1, npr_nvme->word4, 6163 npr_nvme->word5, ndlp->nlp_flag, 6164 ndlp->nlp_fcp_info, ndlp->nlp_type); 6165 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6166 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6167 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6168 } else 6169 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6170 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6171 prli_fc4_req, ndlp->nlp_fc4_type, 6172 ndlp->nlp_DID); 6173 6174 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6175 "Issue ACC PRLI: did:x%x flg:x%lx", 6176 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6177 6178 phba->fc_stat.elsXmitACC++; 6179 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6180 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6181 if (!elsiocb->ndlp) { 6182 lpfc_els_free_iocb(phba, elsiocb); 6183 return 1; 6184 } 6185 6186 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6187 if (rc == IOCB_ERROR) { 6188 lpfc_els_free_iocb(phba, elsiocb); 6189 lpfc_nlp_put(ndlp); 6190 return 1; 6191 } 6192 6193 return 0; 6194 } 6195 6196 /** 6197 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6198 * @vport: pointer to a virtual N_Port data structure. 6199 * @format: rnid command format. 6200 * @oldiocb: pointer to the original lpfc command iocb data structure. 6201 * @ndlp: pointer to a node-list data structure. 6202 * 6203 * This routine issues a Request Node Identification Data (RNID) Accept 6204 * (ACC) response. It constructs the RNID ACC response command according to 6205 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6206 * issue the response. 6207 * 6208 * Note that the ndlp reference count will be incremented by 1 for holding the 6209 * ndlp and the reference to ndlp will be stored into the ndlp field of 6210 * the IOCB for the completion callback function. 6211 * 6212 * Return code 6213 * 0 - Successfully issued acc rnid response 6214 * 1 - Failed to issue acc rnid response 6215 **/ 6216 static int 6217 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6218 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6219 { 6220 struct lpfc_hba *phba = vport->phba; 6221 RNID *rn; 6222 IOCB_t *icmd, *oldcmd; 6223 union lpfc_wqe128 *wqe; 6224 struct lpfc_iocbq *elsiocb; 6225 uint8_t *pcmd; 6226 uint16_t cmdsize; 6227 int rc; 6228 u32 ulp_context; 6229 6230 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6231 + (2 * sizeof(struct lpfc_name)); 6232 if (format) 6233 cmdsize += sizeof(RNID_TOP_DISC); 6234 6235 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6236 ndlp->nlp_DID, ELS_CMD_ACC); 6237 if (!elsiocb) 6238 return 1; 6239 6240 if (phba->sli_rev == LPFC_SLI_REV4) { 6241 wqe = &elsiocb->wqe; 6242 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6243 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6244 ulp_context = get_job_ulpcontext(phba, elsiocb); 6245 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6246 get_job_rcvoxid(phba, oldiocb)); 6247 } else { 6248 icmd = &elsiocb->iocb; 6249 oldcmd = &oldiocb->iocb; 6250 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6251 ulp_context = elsiocb->iocb.ulpContext; 6252 icmd->unsli3.rcvsli3.ox_id = 6253 oldcmd->unsli3.rcvsli3.ox_id; 6254 } 6255 6256 /* Xmit RNID ACC response tag <ulpIoTag> */ 6257 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6258 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6259 elsiocb->iotag, ulp_context); 6260 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6261 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6262 pcmd += sizeof(uint32_t); 6263 6264 memset(pcmd, 0, sizeof(RNID)); 6265 rn = (RNID *) (pcmd); 6266 rn->Format = format; 6267 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6268 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6269 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6270 switch (format) { 6271 case 0: 6272 rn->SpecificLen = 0; 6273 break; 6274 case RNID_TOPOLOGY_DISC: 6275 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6276 memcpy(&rn->un.topologyDisc.portName, 6277 &vport->fc_portname, sizeof(struct lpfc_name)); 6278 rn->un.topologyDisc.unitType = RNID_HBA; 6279 rn->un.topologyDisc.physPort = 0; 6280 rn->un.topologyDisc.attachedNodes = 0; 6281 break; 6282 default: 6283 rn->CommonLen = 0; 6284 rn->SpecificLen = 0; 6285 break; 6286 } 6287 6288 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6289 "Issue ACC RNID: did:x%x flg:x%lx refcnt %d", 6290 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6291 6292 phba->fc_stat.elsXmitACC++; 6293 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6294 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6295 if (!elsiocb->ndlp) { 6296 lpfc_els_free_iocb(phba, elsiocb); 6297 return 1; 6298 } 6299 6300 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6301 if (rc == IOCB_ERROR) { 6302 lpfc_els_free_iocb(phba, elsiocb); 6303 lpfc_nlp_put(ndlp); 6304 return 1; 6305 } 6306 6307 return 0; 6308 } 6309 6310 /** 6311 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6312 * @vport: pointer to a virtual N_Port data structure. 6313 * @iocb: pointer to the lpfc command iocb data structure. 6314 * @ndlp: pointer to a node-list data structure. 6315 * 6316 * Return 6317 **/ 6318 static void 6319 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6320 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6321 { 6322 struct lpfc_hba *phba = vport->phba; 6323 uint8_t *pcmd; 6324 struct RRQ *rrq; 6325 uint16_t rxid; 6326 uint16_t xri; 6327 struct lpfc_node_rrq *prrq; 6328 6329 6330 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6331 pcmd += sizeof(uint32_t); 6332 rrq = (struct RRQ *)pcmd; 6333 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6334 rxid = bf_get(rrq_rxid, rrq); 6335 6336 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6337 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6338 " x%x x%x\n", 6339 be32_to_cpu(bf_get(rrq_did, rrq)), 6340 bf_get(rrq_oxid, rrq), 6341 rxid, 6342 get_wqe_reqtag(iocb), 6343 get_job_ulpcontext(phba, iocb)); 6344 6345 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6346 "Clear RRQ: did:x%x flg:x%lx exchg:x%.08x", 6347 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6348 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6349 xri = bf_get(rrq_oxid, rrq); 6350 else 6351 xri = rxid; 6352 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6353 if (prrq) 6354 lpfc_clr_rrq_active(phba, xri, prrq); 6355 return; 6356 } 6357 6358 /** 6359 * lpfc_els_rsp_echo_acc - Issue echo acc response 6360 * @vport: pointer to a virtual N_Port data structure. 6361 * @data: pointer to echo data to return in the accept. 6362 * @oldiocb: pointer to the original lpfc command iocb data structure. 6363 * @ndlp: pointer to a node-list data structure. 6364 * 6365 * Return code 6366 * 0 - Successfully issued acc echo response 6367 * 1 - Failed to issue acc echo response 6368 **/ 6369 static int 6370 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6371 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6372 { 6373 struct lpfc_hba *phba = vport->phba; 6374 IOCB_t *icmd, *oldcmd; 6375 union lpfc_wqe128 *wqe; 6376 struct lpfc_iocbq *elsiocb; 6377 uint8_t *pcmd; 6378 uint16_t cmdsize; 6379 int rc; 6380 u32 ulp_context; 6381 6382 if (phba->sli_rev == LPFC_SLI_REV4) 6383 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6384 else 6385 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6386 6387 /* The accumulated length can exceed the BPL_SIZE. For 6388 * now, use this as the limit 6389 */ 6390 if (cmdsize > LPFC_BPL_SIZE) 6391 cmdsize = LPFC_BPL_SIZE; 6392 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6393 ndlp->nlp_DID, ELS_CMD_ACC); 6394 if (!elsiocb) 6395 return 1; 6396 6397 if (phba->sli_rev == LPFC_SLI_REV4) { 6398 wqe = &elsiocb->wqe; 6399 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6400 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6401 ulp_context = get_job_ulpcontext(phba, elsiocb); 6402 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6403 get_job_rcvoxid(phba, oldiocb)); 6404 } else { 6405 icmd = &elsiocb->iocb; 6406 oldcmd = &oldiocb->iocb; 6407 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6408 ulp_context = elsiocb->iocb.ulpContext; 6409 icmd->unsli3.rcvsli3.ox_id = 6410 oldcmd->unsli3.rcvsli3.ox_id; 6411 } 6412 6413 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6414 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6415 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6416 elsiocb->iotag, ulp_context); 6417 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6418 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6419 pcmd += sizeof(uint32_t); 6420 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6421 6422 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6423 "Issue ACC ECHO: did:x%x flg:x%lx refcnt %d", 6424 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6425 6426 phba->fc_stat.elsXmitACC++; 6427 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6428 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6429 if (!elsiocb->ndlp) { 6430 lpfc_els_free_iocb(phba, elsiocb); 6431 return 1; 6432 } 6433 6434 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6435 if (rc == IOCB_ERROR) { 6436 lpfc_els_free_iocb(phba, elsiocb); 6437 lpfc_nlp_put(ndlp); 6438 return 1; 6439 } 6440 6441 return 0; 6442 } 6443 6444 /** 6445 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6446 * @vport: pointer to a host virtual N_Port data structure. 6447 * 6448 * This routine issues Address Discover (ADISC) ELS commands to those 6449 * N_Ports which are in node port recovery state and ADISC has not been issued 6450 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6451 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6452 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6453 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6454 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6455 * IOCBs quit for later pick up. On the other hand, after walking through 6456 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6457 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6458 * no more ADISC need to be sent. 6459 * 6460 * Return code 6461 * The number of N_Ports with adisc issued. 6462 **/ 6463 int 6464 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6465 { 6466 struct lpfc_nodelist *ndlp, *next_ndlp; 6467 int sentadisc = 0; 6468 6469 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6470 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6471 6472 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6473 !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) 6474 continue; 6475 6476 clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); 6477 6478 if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { 6479 /* This node was marked for ADISC but was not picked 6480 * for discovery. This is possible if the node was 6481 * missing in gidft response. 6482 * 6483 * At time of marking node for ADISC, we skipped unreg 6484 * from backend 6485 */ 6486 lpfc_nlp_unreg_node(vport, ndlp); 6487 lpfc_unreg_rpi(vport, ndlp); 6488 continue; 6489 } 6490 6491 ndlp->nlp_prev_state = ndlp->nlp_state; 6492 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6493 lpfc_issue_els_adisc(vport, ndlp, 0); 6494 sentadisc++; 6495 vport->num_disc_nodes++; 6496 if (vport->num_disc_nodes >= 6497 vport->cfg_discovery_threads) { 6498 set_bit(FC_NLP_MORE, &vport->fc_flag); 6499 break; 6500 } 6501 6502 } 6503 if (sentadisc == 0) 6504 clear_bit(FC_NLP_MORE, &vport->fc_flag); 6505 return sentadisc; 6506 } 6507 6508 /** 6509 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6510 * @vport: pointer to a host virtual N_Port data structure. 6511 * 6512 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6513 * which are in node port recovery state, with a @vport. Each time an ELS 6514 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6515 * the per @vport number of discover count (num_disc_nodes) shall be 6516 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6517 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6518 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6519 * later pick up. On the other hand, after walking through all the ndlps with 6520 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6521 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6522 * PLOGI need to be sent. 6523 * 6524 * Return code 6525 * The number of N_Ports with plogi issued. 6526 **/ 6527 int 6528 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6529 { 6530 struct lpfc_nodelist *ndlp, *next_ndlp; 6531 int sentplogi = 0; 6532 6533 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6534 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6535 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6536 test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && 6537 !test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && 6538 !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { 6539 ndlp->nlp_prev_state = ndlp->nlp_state; 6540 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6541 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6542 sentplogi++; 6543 vport->num_disc_nodes++; 6544 if (vport->num_disc_nodes >= 6545 vport->cfg_discovery_threads) { 6546 set_bit(FC_NLP_MORE, &vport->fc_flag); 6547 break; 6548 } 6549 } 6550 } 6551 6552 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6553 "6452 Discover PLOGI %d flag x%lx\n", 6554 sentplogi, vport->fc_flag); 6555 6556 if (sentplogi) 6557 lpfc_set_disctmo(vport); 6558 else 6559 clear_bit(FC_NLP_MORE, &vport->fc_flag); 6560 return sentplogi; 6561 } 6562 6563 static uint32_t 6564 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6565 uint32_t word0) 6566 { 6567 6568 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6569 desc->payload.els_req = word0; 6570 desc->length = cpu_to_be32(sizeof(desc->payload)); 6571 6572 return sizeof(struct fc_rdp_link_service_desc); 6573 } 6574 6575 static uint32_t 6576 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6577 uint8_t *page_a0, uint8_t *page_a2) 6578 { 6579 uint16_t wavelength; 6580 uint16_t temperature; 6581 uint16_t rx_power; 6582 uint16_t tx_bias; 6583 uint16_t tx_power; 6584 uint16_t vcc; 6585 uint16_t flag = 0; 6586 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6587 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6588 6589 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6590 6591 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6592 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6593 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6594 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6595 6596 if ((trasn_code_byte4->fc_sw_laser) || 6597 (trasn_code_byte5->fc_sw_laser_sl) || 6598 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6599 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6600 } else if (trasn_code_byte4->fc_lw_laser) { 6601 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6602 page_a0[SSF_WAVELENGTH_B0]; 6603 if (wavelength == SFP_WAVELENGTH_LC1310) 6604 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6605 if (wavelength == SFP_WAVELENGTH_LL1550) 6606 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6607 } 6608 /* check if its SFP+ */ 6609 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6610 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6611 << SFP_FLAG_CT_SHIFT; 6612 6613 /* check if its OPTICAL */ 6614 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6615 SFP_FLAG_IS_OPTICAL_PORT : 0) 6616 << SFP_FLAG_IS_OPTICAL_SHIFT; 6617 6618 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6619 page_a2[SFF_TEMPERATURE_B0]); 6620 vcc = (page_a2[SFF_VCC_B1] << 8 | 6621 page_a2[SFF_VCC_B0]); 6622 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6623 page_a2[SFF_TXPOWER_B0]); 6624 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6625 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6626 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6627 page_a2[SFF_RXPOWER_B0]); 6628 desc->sfp_info.temperature = cpu_to_be16(temperature); 6629 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6630 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6631 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6632 desc->sfp_info.vcc = cpu_to_be16(vcc); 6633 6634 desc->sfp_info.flags = cpu_to_be16(flag); 6635 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6636 6637 return sizeof(struct fc_rdp_sfp_desc); 6638 } 6639 6640 static uint32_t 6641 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6642 READ_LNK_VAR *stat) 6643 { 6644 uint32_t type; 6645 6646 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6647 6648 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6649 6650 desc->info.port_type = cpu_to_be32(type); 6651 6652 desc->info.link_status.link_failure_cnt = 6653 cpu_to_be32(stat->linkFailureCnt); 6654 desc->info.link_status.loss_of_synch_cnt = 6655 cpu_to_be32(stat->lossSyncCnt); 6656 desc->info.link_status.loss_of_signal_cnt = 6657 cpu_to_be32(stat->lossSignalCnt); 6658 desc->info.link_status.primitive_seq_proto_err = 6659 cpu_to_be32(stat->primSeqErrCnt); 6660 desc->info.link_status.invalid_trans_word = 6661 cpu_to_be32(stat->invalidXmitWord); 6662 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6663 6664 desc->length = cpu_to_be32(sizeof(desc->info)); 6665 6666 return sizeof(struct fc_rdp_link_error_status_desc); 6667 } 6668 6669 static uint32_t 6670 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6671 struct lpfc_vport *vport) 6672 { 6673 uint32_t bbCredit; 6674 6675 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6676 6677 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6678 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6679 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6680 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6681 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6682 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6683 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6684 } else { 6685 desc->bbc_info.attached_port_bbc = 0; 6686 } 6687 6688 desc->bbc_info.rtt = 0; 6689 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6690 6691 return sizeof(struct fc_rdp_bbc_desc); 6692 } 6693 6694 static uint32_t 6695 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6696 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6697 { 6698 uint32_t flags = 0; 6699 6700 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6701 6702 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6703 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6704 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6705 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6706 6707 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6708 flags |= RDP_OET_HIGH_ALARM; 6709 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6710 flags |= RDP_OET_LOW_ALARM; 6711 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6712 flags |= RDP_OET_HIGH_WARNING; 6713 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6714 flags |= RDP_OET_LOW_WARNING; 6715 6716 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6717 desc->oed_info.function_flags = cpu_to_be32(flags); 6718 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6719 return sizeof(struct fc_rdp_oed_sfp_desc); 6720 } 6721 6722 static uint32_t 6723 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6724 struct fc_rdp_oed_sfp_desc *desc, 6725 uint8_t *page_a2) 6726 { 6727 uint32_t flags = 0; 6728 6729 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6730 6731 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6732 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6733 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6734 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6735 6736 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6737 flags |= RDP_OET_HIGH_ALARM; 6738 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6739 flags |= RDP_OET_LOW_ALARM; 6740 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6741 flags |= RDP_OET_HIGH_WARNING; 6742 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6743 flags |= RDP_OET_LOW_WARNING; 6744 6745 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6746 desc->oed_info.function_flags = cpu_to_be32(flags); 6747 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6748 return sizeof(struct fc_rdp_oed_sfp_desc); 6749 } 6750 6751 static uint32_t 6752 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6753 struct fc_rdp_oed_sfp_desc *desc, 6754 uint8_t *page_a2) 6755 { 6756 uint32_t flags = 0; 6757 6758 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6759 6760 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6761 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6762 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6763 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6764 6765 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6766 flags |= RDP_OET_HIGH_ALARM; 6767 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6768 flags |= RDP_OET_LOW_ALARM; 6769 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6770 flags |= RDP_OET_HIGH_WARNING; 6771 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6772 flags |= RDP_OET_LOW_WARNING; 6773 6774 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6775 desc->oed_info.function_flags = cpu_to_be32(flags); 6776 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6777 return sizeof(struct fc_rdp_oed_sfp_desc); 6778 } 6779 6780 static uint32_t 6781 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6782 struct fc_rdp_oed_sfp_desc *desc, 6783 uint8_t *page_a2) 6784 { 6785 uint32_t flags = 0; 6786 6787 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6788 6789 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6790 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6791 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6792 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6793 6794 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6795 flags |= RDP_OET_HIGH_ALARM; 6796 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6797 flags |= RDP_OET_LOW_ALARM; 6798 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6799 flags |= RDP_OET_HIGH_WARNING; 6800 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6801 flags |= RDP_OET_LOW_WARNING; 6802 6803 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6804 desc->oed_info.function_flags = cpu_to_be32(flags); 6805 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6806 return sizeof(struct fc_rdp_oed_sfp_desc); 6807 } 6808 6809 6810 static uint32_t 6811 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6812 struct fc_rdp_oed_sfp_desc *desc, 6813 uint8_t *page_a2) 6814 { 6815 uint32_t flags = 0; 6816 6817 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6818 6819 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6820 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6821 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6822 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6823 6824 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6825 flags |= RDP_OET_HIGH_ALARM; 6826 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6827 flags |= RDP_OET_LOW_ALARM; 6828 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6829 flags |= RDP_OET_HIGH_WARNING; 6830 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6831 flags |= RDP_OET_LOW_WARNING; 6832 6833 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6834 desc->oed_info.function_flags = cpu_to_be32(flags); 6835 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6836 return sizeof(struct fc_rdp_oed_sfp_desc); 6837 } 6838 6839 static uint32_t 6840 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6841 uint8_t *page_a0, struct lpfc_vport *vport) 6842 { 6843 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6844 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6845 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6846 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6847 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6848 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6849 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6850 return sizeof(struct fc_rdp_opd_sfp_desc); 6851 } 6852 6853 static uint32_t 6854 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6855 { 6856 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6857 return 0; 6858 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6859 6860 desc->info.CorrectedBlocks = 6861 cpu_to_be32(stat->fecCorrBlkCount); 6862 desc->info.UncorrectableBlocks = 6863 cpu_to_be32(stat->fecUncorrBlkCount); 6864 6865 desc->length = cpu_to_be32(sizeof(desc->info)); 6866 6867 return sizeof(struct fc_fec_rdp_desc); 6868 } 6869 6870 static uint32_t 6871 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6872 { 6873 uint16_t rdp_cap = 0; 6874 uint16_t rdp_speed; 6875 6876 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6877 6878 switch (phba->fc_linkspeed) { 6879 case LPFC_LINK_SPEED_1GHZ: 6880 rdp_speed = RDP_PS_1GB; 6881 break; 6882 case LPFC_LINK_SPEED_2GHZ: 6883 rdp_speed = RDP_PS_2GB; 6884 break; 6885 case LPFC_LINK_SPEED_4GHZ: 6886 rdp_speed = RDP_PS_4GB; 6887 break; 6888 case LPFC_LINK_SPEED_8GHZ: 6889 rdp_speed = RDP_PS_8GB; 6890 break; 6891 case LPFC_LINK_SPEED_10GHZ: 6892 rdp_speed = RDP_PS_10GB; 6893 break; 6894 case LPFC_LINK_SPEED_16GHZ: 6895 rdp_speed = RDP_PS_16GB; 6896 break; 6897 case LPFC_LINK_SPEED_32GHZ: 6898 rdp_speed = RDP_PS_32GB; 6899 break; 6900 case LPFC_LINK_SPEED_64GHZ: 6901 rdp_speed = RDP_PS_64GB; 6902 break; 6903 case LPFC_LINK_SPEED_128GHZ: 6904 rdp_speed = RDP_PS_128GB; 6905 break; 6906 case LPFC_LINK_SPEED_256GHZ: 6907 rdp_speed = RDP_PS_256GB; 6908 break; 6909 default: 6910 rdp_speed = RDP_PS_UNKNOWN; 6911 break; 6912 } 6913 6914 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6915 6916 if (phba->lmt & LMT_256Gb) 6917 rdp_cap |= RDP_PS_256GB; 6918 if (phba->lmt & LMT_128Gb) 6919 rdp_cap |= RDP_PS_128GB; 6920 if (phba->lmt & LMT_64Gb) 6921 rdp_cap |= RDP_PS_64GB; 6922 if (phba->lmt & LMT_32Gb) 6923 rdp_cap |= RDP_PS_32GB; 6924 if (phba->lmt & LMT_16Gb) 6925 rdp_cap |= RDP_PS_16GB; 6926 if (phba->lmt & LMT_10Gb) 6927 rdp_cap |= RDP_PS_10GB; 6928 if (phba->lmt & LMT_8Gb) 6929 rdp_cap |= RDP_PS_8GB; 6930 if (phba->lmt & LMT_4Gb) 6931 rdp_cap |= RDP_PS_4GB; 6932 if (phba->lmt & LMT_2Gb) 6933 rdp_cap |= RDP_PS_2GB; 6934 if (phba->lmt & LMT_1Gb) 6935 rdp_cap |= RDP_PS_1GB; 6936 6937 if (rdp_cap == 0) 6938 rdp_cap = RDP_CAP_UNKNOWN; 6939 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6940 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6941 6942 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6943 desc->length = cpu_to_be32(sizeof(desc->info)); 6944 return sizeof(struct fc_rdp_port_speed_desc); 6945 } 6946 6947 static uint32_t 6948 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6949 struct lpfc_vport *vport) 6950 { 6951 6952 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6953 6954 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6955 sizeof(desc->port_names.wwnn)); 6956 6957 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6958 sizeof(desc->port_names.wwpn)); 6959 6960 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6961 return sizeof(struct fc_rdp_port_name_desc); 6962 } 6963 6964 static uint32_t 6965 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6966 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6967 { 6968 6969 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6970 if (test_bit(FC_FABRIC, &vport->fc_flag)) { 6971 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6972 sizeof(desc->port_names.wwnn)); 6973 6974 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6975 sizeof(desc->port_names.wwpn)); 6976 } else { /* Point to Point */ 6977 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6978 sizeof(desc->port_names.wwnn)); 6979 6980 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6981 sizeof(desc->port_names.wwpn)); 6982 } 6983 6984 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6985 return sizeof(struct fc_rdp_port_name_desc); 6986 } 6987 6988 static void 6989 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6990 int status) 6991 { 6992 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6993 struct lpfc_vport *vport = ndlp->vport; 6994 struct lpfc_iocbq *elsiocb; 6995 struct ulp_bde64 *bpl; 6996 IOCB_t *icmd; 6997 union lpfc_wqe128 *wqe; 6998 uint8_t *pcmd; 6999 struct ls_rjt *stat; 7000 struct fc_rdp_res_frame *rdp_res; 7001 uint32_t cmdsize, len; 7002 uint16_t *flag_ptr; 7003 int rc; 7004 u32 ulp_context; 7005 7006 if (status != SUCCESS) 7007 goto error; 7008 7009 /* This will change once we know the true size of the RDP payload */ 7010 cmdsize = sizeof(struct fc_rdp_res_frame); 7011 7012 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7013 lpfc_max_els_tries, rdp_context->ndlp, 7014 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7015 if (!elsiocb) 7016 goto free_rdp_context; 7017 7018 ulp_context = get_job_ulpcontext(phba, elsiocb); 7019 if (phba->sli_rev == LPFC_SLI_REV4) { 7020 wqe = &elsiocb->wqe; 7021 /* ox-id of the frame */ 7022 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7023 rdp_context->ox_id); 7024 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7025 rdp_context->rx_id); 7026 } else { 7027 icmd = &elsiocb->iocb; 7028 icmd->ulpContext = rdp_context->rx_id; 7029 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7030 } 7031 7032 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7033 "2171 Xmit RDP response tag x%x xri x%x, " 7034 "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x", 7035 elsiocb->iotag, ulp_context, 7036 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7037 ndlp->nlp_rpi); 7038 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7039 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7040 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7041 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7042 7043 /* Update Alarm and Warning */ 7044 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7045 phba->sfp_alarm |= *flag_ptr; 7046 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7047 phba->sfp_warning |= *flag_ptr; 7048 7049 /* For RDP payload */ 7050 len = 8; 7051 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7052 (len + pcmd), ELS_CMD_RDP); 7053 7054 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7055 rdp_context->page_a0, rdp_context->page_a2); 7056 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7057 phba); 7058 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7059 (len + pcmd), &rdp_context->link_stat); 7060 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7061 (len + pcmd), vport); 7062 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7063 (len + pcmd), vport, ndlp); 7064 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7065 &rdp_context->link_stat); 7066 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7067 &rdp_context->link_stat, vport); 7068 len += lpfc_rdp_res_oed_temp_desc(phba, 7069 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7070 rdp_context->page_a2); 7071 len += lpfc_rdp_res_oed_voltage_desc(phba, 7072 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7073 rdp_context->page_a2); 7074 len += lpfc_rdp_res_oed_txbias_desc(phba, 7075 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7076 rdp_context->page_a2); 7077 len += lpfc_rdp_res_oed_txpower_desc(phba, 7078 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7079 rdp_context->page_a2); 7080 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7081 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7082 rdp_context->page_a2); 7083 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7084 rdp_context->page_a0, vport); 7085 7086 rdp_res->length = cpu_to_be32(len - 8); 7087 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7088 7089 /* Now that we know the true size of the payload, update the BPL */ 7090 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7091 bpl->tus.f.bdeSize = len; 7092 bpl->tus.f.bdeFlags = 0; 7093 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7094 7095 phba->fc_stat.elsXmitACC++; 7096 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7097 if (!elsiocb->ndlp) { 7098 lpfc_els_free_iocb(phba, elsiocb); 7099 goto free_rdp_context; 7100 } 7101 7102 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7103 if (rc == IOCB_ERROR) { 7104 lpfc_els_free_iocb(phba, elsiocb); 7105 lpfc_nlp_put(ndlp); 7106 } 7107 7108 goto free_rdp_context; 7109 7110 error: 7111 cmdsize = 2 * sizeof(uint32_t); 7112 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7113 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7114 if (!elsiocb) 7115 goto free_rdp_context; 7116 7117 if (phba->sli_rev == LPFC_SLI_REV4) { 7118 wqe = &elsiocb->wqe; 7119 /* ox-id of the frame */ 7120 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7121 rdp_context->ox_id); 7122 bf_set(wqe_ctxt_tag, 7123 &wqe->xmit_els_rsp.wqe_com, 7124 rdp_context->rx_id); 7125 } else { 7126 icmd = &elsiocb->iocb; 7127 icmd->ulpContext = rdp_context->rx_id; 7128 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7129 } 7130 7131 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7132 7133 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7134 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7135 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7136 7137 phba->fc_stat.elsXmitLSRJT++; 7138 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7139 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7140 if (!elsiocb->ndlp) { 7141 lpfc_els_free_iocb(phba, elsiocb); 7142 goto free_rdp_context; 7143 } 7144 7145 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7146 if (rc == IOCB_ERROR) { 7147 lpfc_els_free_iocb(phba, elsiocb); 7148 lpfc_nlp_put(ndlp); 7149 } 7150 7151 free_rdp_context: 7152 /* This reference put is for the original unsolicited RDP. If the 7153 * prep failed, there is no reference to remove. 7154 */ 7155 lpfc_nlp_put(ndlp); 7156 kfree(rdp_context); 7157 } 7158 7159 static int 7160 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7161 { 7162 LPFC_MBOXQ_t *mbox = NULL; 7163 int rc; 7164 7165 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7166 if (!mbox) { 7167 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7168 "7105 failed to allocate mailbox memory"); 7169 return 1; 7170 } 7171 7172 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7173 goto rdp_fail; 7174 mbox->vport = rdp_context->ndlp->vport; 7175 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7176 mbox->ctx_u.rdp = rdp_context; 7177 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7178 if (rc == MBX_NOT_FINISHED) { 7179 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7180 return 1; 7181 } 7182 7183 return 0; 7184 7185 rdp_fail: 7186 mempool_free(mbox, phba->mbox_mem_pool); 7187 return 1; 7188 } 7189 7190 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, 7191 struct lpfc_rdp_context *rdp_context) 7192 { 7193 LPFC_MBOXQ_t *mbox = NULL; 7194 int rc; 7195 struct lpfc_dmabuf *mp; 7196 struct lpfc_dmabuf *mpsave; 7197 void *virt; 7198 MAILBOX_t *mb; 7199 7200 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7201 if (!mbox) { 7202 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7203 "7205 failed to allocate mailbox memory"); 7204 return 1; 7205 } 7206 7207 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7208 goto sfp_fail; 7209 mp = mbox->ctx_buf; 7210 mpsave = mp; 7211 virt = mp->virt; 7212 if (phba->sli_rev < LPFC_SLI_REV4) { 7213 mb = &mbox->u.mb; 7214 mb->un.varDmp.cv = 1; 7215 mb->un.varDmp.co = 1; 7216 mb->un.varWords[2] = 0; 7217 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; 7218 mb->un.varWords[4] = 0; 7219 mb->un.varWords[5] = 0; 7220 mb->un.varWords[6] = 0; 7221 mb->un.varWords[7] = 0; 7222 mb->un.varWords[8] = 0; 7223 mb->un.varWords[9] = 0; 7224 mb->un.varWords[10] = 0; 7225 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7226 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7227 mbox->mbox_offset_word = 5; 7228 mbox->ext_buf = virt; 7229 } else { 7230 bf_set(lpfc_mbx_memory_dump_type3_length, 7231 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); 7232 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7233 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7234 } 7235 mbox->vport = phba->pport; 7236 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); 7237 if (rc == MBX_NOT_FINISHED) { 7238 rc = 1; 7239 goto error; 7240 } 7241 if (rc == MBX_TIMEOUT) 7242 goto error; 7243 if (phba->sli_rev == LPFC_SLI_REV4) 7244 mp = mbox->ctx_buf; 7245 else 7246 mp = mpsave; 7247 7248 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7249 rc = 1; 7250 goto error; 7251 } 7252 7253 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, 7254 DMP_SFF_PAGE_A0_SIZE); 7255 7256 memset(mbox, 0, sizeof(*mbox)); 7257 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); 7258 INIT_LIST_HEAD(&mp->list); 7259 7260 /* save address for completion */ 7261 mbox->ctx_buf = mp; 7262 mbox->vport = phba->pport; 7263 7264 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 7265 bf_set(lpfc_mbx_memory_dump_type3_type, 7266 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 7267 bf_set(lpfc_mbx_memory_dump_type3_link, 7268 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); 7269 bf_set(lpfc_mbx_memory_dump_type3_page_no, 7270 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); 7271 if (phba->sli_rev < LPFC_SLI_REV4) { 7272 mb = &mbox->u.mb; 7273 mb->un.varDmp.cv = 1; 7274 mb->un.varDmp.co = 1; 7275 mb->un.varWords[2] = 0; 7276 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; 7277 mb->un.varWords[4] = 0; 7278 mb->un.varWords[5] = 0; 7279 mb->un.varWords[6] = 0; 7280 mb->un.varWords[7] = 0; 7281 mb->un.varWords[8] = 0; 7282 mb->un.varWords[9] = 0; 7283 mb->un.varWords[10] = 0; 7284 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7285 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7286 mbox->mbox_offset_word = 5; 7287 mbox->ext_buf = virt; 7288 } else { 7289 bf_set(lpfc_mbx_memory_dump_type3_length, 7290 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); 7291 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7292 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7293 } 7294 7295 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); 7296 7297 if (rc == MBX_TIMEOUT) 7298 goto error; 7299 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7300 rc = 1; 7301 goto error; 7302 } 7303 rc = 0; 7304 7305 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 7306 DMP_SFF_PAGE_A2_SIZE); 7307 7308 error: 7309 if (mbox->mbox_flag & LPFC_MBX_WAKE) { 7310 mbox->ctx_buf = mpsave; 7311 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7312 } 7313 7314 return rc; 7315 7316 sfp_fail: 7317 mempool_free(mbox, phba->mbox_mem_pool); 7318 return 1; 7319 } 7320 7321 /* 7322 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7323 * @vport: pointer to a host virtual N_Port data structure. 7324 * @cmdiocb: pointer to lpfc command iocb data structure. 7325 * @ndlp: pointer to a node-list data structure. 7326 * 7327 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7328 * IOCB. First, the payload of the unsolicited RDP is checked. 7329 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7330 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7331 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7332 * gather all data and send RDP response. 7333 * 7334 * Return code 7335 * 0 - Sent the acc response 7336 * 1 - Sent the reject response. 7337 */ 7338 static int 7339 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7340 struct lpfc_nodelist *ndlp) 7341 { 7342 struct lpfc_hba *phba = vport->phba; 7343 struct lpfc_dmabuf *pcmd; 7344 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7345 struct fc_rdp_req_frame *rdp_req; 7346 struct lpfc_rdp_context *rdp_context; 7347 union lpfc_wqe128 *cmd = NULL; 7348 struct ls_rjt stat; 7349 7350 if (phba->sli_rev < LPFC_SLI_REV4 || 7351 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7352 LPFC_SLI_INTF_IF_TYPE_2) { 7353 rjt_err = LSRJT_UNABLE_TPC; 7354 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7355 goto error; 7356 } 7357 7358 if (phba->sli_rev < LPFC_SLI_REV4 || 7359 test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 7360 rjt_err = LSRJT_UNABLE_TPC; 7361 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7362 goto error; 7363 } 7364 7365 pcmd = cmdiocb->cmd_dmabuf; 7366 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7367 7368 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7369 "2422 ELS RDP Request " 7370 "dec len %d tag x%x port_id %d len %d\n", 7371 be32_to_cpu(rdp_req->rdp_des_length), 7372 be32_to_cpu(rdp_req->nport_id_desc.tag), 7373 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7374 be32_to_cpu(rdp_req->nport_id_desc.length)); 7375 7376 if (sizeof(struct fc_rdp_nport_desc) != 7377 be32_to_cpu(rdp_req->rdp_des_length)) 7378 goto rjt_logerr; 7379 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7380 goto rjt_logerr; 7381 if (RDP_NPORT_ID_SIZE != 7382 be32_to_cpu(rdp_req->nport_id_desc.length)) 7383 goto rjt_logerr; 7384 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7385 if (!rdp_context) { 7386 rjt_err = LSRJT_UNABLE_TPC; 7387 goto error; 7388 } 7389 7390 cmd = &cmdiocb->wqe; 7391 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7392 if (!rdp_context->ndlp) { 7393 kfree(rdp_context); 7394 rjt_err = LSRJT_UNABLE_TPC; 7395 goto error; 7396 } 7397 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7398 &cmd->xmit_els_rsp.wqe_com); 7399 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7400 &cmd->xmit_els_rsp.wqe_com); 7401 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7402 if (lpfc_get_rdp_info(phba, rdp_context)) { 7403 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7404 "2423 Unable to send mailbox"); 7405 kfree(rdp_context); 7406 rjt_err = LSRJT_UNABLE_TPC; 7407 lpfc_nlp_put(ndlp); 7408 goto error; 7409 } 7410 7411 return 0; 7412 7413 rjt_logerr: 7414 rjt_err = LSRJT_LOGICAL_ERR; 7415 7416 error: 7417 memset(&stat, 0, sizeof(stat)); 7418 stat.un.b.lsRjtRsnCode = rjt_err; 7419 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7420 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7421 return 1; 7422 } 7423 7424 7425 static void 7426 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7427 { 7428 MAILBOX_t *mb; 7429 IOCB_t *icmd; 7430 union lpfc_wqe128 *wqe; 7431 uint8_t *pcmd; 7432 struct lpfc_iocbq *elsiocb; 7433 struct lpfc_nodelist *ndlp; 7434 struct ls_rjt *stat; 7435 union lpfc_sli4_cfg_shdr *shdr; 7436 struct lpfc_lcb_context *lcb_context; 7437 struct fc_lcb_res_frame *lcb_res; 7438 uint32_t cmdsize, shdr_status, shdr_add_status; 7439 int rc; 7440 7441 mb = &pmb->u.mb; 7442 lcb_context = pmb->ctx_u.lcb; 7443 ndlp = lcb_context->ndlp; 7444 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u)); 7445 pmb->ctx_buf = NULL; 7446 7447 shdr = (union lpfc_sli4_cfg_shdr *) 7448 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7449 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7450 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7451 7452 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7453 "0194 SET_BEACON_CONFIG mailbox " 7454 "completed with status x%x add_status x%x," 7455 " mbx status x%x\n", 7456 shdr_status, shdr_add_status, mb->mbxStatus); 7457 7458 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7459 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7460 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7461 mempool_free(pmb, phba->mbox_mem_pool); 7462 goto error; 7463 } 7464 7465 mempool_free(pmb, phba->mbox_mem_pool); 7466 cmdsize = sizeof(struct fc_lcb_res_frame); 7467 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7468 lpfc_max_els_tries, ndlp, 7469 ndlp->nlp_DID, ELS_CMD_ACC); 7470 7471 /* Decrement the ndlp reference count from previous mbox command */ 7472 lpfc_nlp_put(ndlp); 7473 7474 if (!elsiocb) 7475 goto free_lcb_context; 7476 7477 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7478 7479 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7480 7481 if (phba->sli_rev == LPFC_SLI_REV4) { 7482 wqe = &elsiocb->wqe; 7483 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7484 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7485 lcb_context->ox_id); 7486 } else { 7487 icmd = &elsiocb->iocb; 7488 icmd->ulpContext = lcb_context->rx_id; 7489 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7490 } 7491 7492 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7493 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7494 lcb_res->lcb_sub_command = lcb_context->sub_command; 7495 lcb_res->lcb_type = lcb_context->type; 7496 lcb_res->capability = lcb_context->capability; 7497 lcb_res->lcb_frequency = lcb_context->frequency; 7498 lcb_res->lcb_duration = lcb_context->duration; 7499 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7500 phba->fc_stat.elsXmitACC++; 7501 7502 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7503 if (!elsiocb->ndlp) { 7504 lpfc_els_free_iocb(phba, elsiocb); 7505 goto out; 7506 } 7507 7508 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7509 if (rc == IOCB_ERROR) { 7510 lpfc_els_free_iocb(phba, elsiocb); 7511 lpfc_nlp_put(ndlp); 7512 } 7513 out: 7514 kfree(lcb_context); 7515 return; 7516 7517 error: 7518 cmdsize = sizeof(struct fc_lcb_res_frame); 7519 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7520 lpfc_max_els_tries, ndlp, 7521 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7522 lpfc_nlp_put(ndlp); 7523 if (!elsiocb) 7524 goto free_lcb_context; 7525 7526 if (phba->sli_rev == LPFC_SLI_REV4) { 7527 wqe = &elsiocb->wqe; 7528 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7529 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7530 lcb_context->ox_id); 7531 } else { 7532 icmd = &elsiocb->iocb; 7533 icmd->ulpContext = lcb_context->rx_id; 7534 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7535 } 7536 7537 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7538 7539 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7540 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7541 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7542 7543 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7544 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7545 7546 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7547 phba->fc_stat.elsXmitLSRJT++; 7548 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7549 if (!elsiocb->ndlp) { 7550 lpfc_els_free_iocb(phba, elsiocb); 7551 goto free_lcb_context; 7552 } 7553 7554 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7555 if (rc == IOCB_ERROR) { 7556 lpfc_els_free_iocb(phba, elsiocb); 7557 lpfc_nlp_put(ndlp); 7558 } 7559 free_lcb_context: 7560 kfree(lcb_context); 7561 } 7562 7563 static int 7564 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7565 struct lpfc_lcb_context *lcb_context, 7566 uint32_t beacon_state) 7567 { 7568 struct lpfc_hba *phba = vport->phba; 7569 union lpfc_sli4_cfg_shdr *cfg_shdr; 7570 LPFC_MBOXQ_t *mbox = NULL; 7571 uint32_t len; 7572 int rc; 7573 7574 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7575 if (!mbox) 7576 return 1; 7577 7578 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7579 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7580 sizeof(struct lpfc_sli4_cfg_mhdr); 7581 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7582 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7583 LPFC_SLI4_MBX_EMBED); 7584 mbox->ctx_u.lcb = lcb_context; 7585 mbox->vport = phba->pport; 7586 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7587 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7588 phba->sli4_hba.physical_port); 7589 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7590 beacon_state); 7591 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7592 7593 /* 7594 * Check bv1s bit before issuing the mailbox 7595 * if bv1s == 1, LCB V1 supported 7596 * else, LCB V0 supported 7597 */ 7598 7599 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7600 /* COMMON_SET_BEACON_CONFIG_V1 */ 7601 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7602 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7603 bf_set(lpfc_mbx_set_beacon_port_type, 7604 &mbox->u.mqe.un.beacon_config, 0); 7605 bf_set(lpfc_mbx_set_beacon_duration_v1, 7606 &mbox->u.mqe.un.beacon_config, 7607 be16_to_cpu(lcb_context->duration)); 7608 } else { 7609 /* COMMON_SET_BEACON_CONFIG_V0 */ 7610 if (be16_to_cpu(lcb_context->duration) != 0) { 7611 mempool_free(mbox, phba->mbox_mem_pool); 7612 return 1; 7613 } 7614 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7615 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7616 bf_set(lpfc_mbx_set_beacon_state, 7617 &mbox->u.mqe.un.beacon_config, beacon_state); 7618 bf_set(lpfc_mbx_set_beacon_port_type, 7619 &mbox->u.mqe.un.beacon_config, 1); 7620 bf_set(lpfc_mbx_set_beacon_duration, 7621 &mbox->u.mqe.un.beacon_config, 7622 be16_to_cpu(lcb_context->duration)); 7623 } 7624 7625 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7626 if (rc == MBX_NOT_FINISHED) { 7627 mempool_free(mbox, phba->mbox_mem_pool); 7628 return 1; 7629 } 7630 7631 return 0; 7632 } 7633 7634 7635 /** 7636 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7637 * @vport: pointer to a host virtual N_Port data structure. 7638 * @cmdiocb: pointer to lpfc command iocb data structure. 7639 * @ndlp: pointer to a node-list data structure. 7640 * 7641 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7642 * First, the payload of the unsolicited LCB is checked. 7643 * Then based on Subcommand beacon will either turn on or off. 7644 * 7645 * Return code 7646 * 0 - Sent the acc response 7647 * 1 - Sent the reject response. 7648 **/ 7649 static int 7650 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7651 struct lpfc_nodelist *ndlp) 7652 { 7653 struct lpfc_hba *phba = vport->phba; 7654 struct lpfc_dmabuf *pcmd; 7655 uint8_t *lp; 7656 struct fc_lcb_request_frame *beacon; 7657 struct lpfc_lcb_context *lcb_context; 7658 u8 state, rjt_err = 0; 7659 struct ls_rjt stat; 7660 7661 pcmd = cmdiocb->cmd_dmabuf; 7662 lp = (uint8_t *)pcmd->virt; 7663 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7664 7665 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7666 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7667 "type x%x frequency %x duration x%x\n", 7668 lp[0], lp[1], lp[2], 7669 beacon->lcb_command, 7670 beacon->lcb_sub_command, 7671 beacon->lcb_type, 7672 beacon->lcb_frequency, 7673 be16_to_cpu(beacon->lcb_duration)); 7674 7675 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7676 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7677 rjt_err = LSRJT_CMD_UNSUPPORTED; 7678 goto rjt; 7679 } 7680 7681 if (phba->sli_rev < LPFC_SLI_REV4 || 7682 test_bit(HBA_FCOE_MODE, &phba->hba_flag) || 7683 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7684 LPFC_SLI_INTF_IF_TYPE_2)) { 7685 rjt_err = LSRJT_CMD_UNSUPPORTED; 7686 goto rjt; 7687 } 7688 7689 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7690 if (!lcb_context) { 7691 rjt_err = LSRJT_UNABLE_TPC; 7692 goto rjt; 7693 } 7694 7695 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7696 lcb_context->sub_command = beacon->lcb_sub_command; 7697 lcb_context->capability = 0; 7698 lcb_context->type = beacon->lcb_type; 7699 lcb_context->frequency = beacon->lcb_frequency; 7700 lcb_context->duration = beacon->lcb_duration; 7701 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7702 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7703 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7704 if (!lcb_context->ndlp) { 7705 rjt_err = LSRJT_UNABLE_TPC; 7706 goto rjt_free; 7707 } 7708 7709 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7710 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7711 "0193 failed to send mail box"); 7712 lpfc_nlp_put(ndlp); 7713 rjt_err = LSRJT_UNABLE_TPC; 7714 goto rjt_free; 7715 } 7716 return 0; 7717 7718 rjt_free: 7719 kfree(lcb_context); 7720 rjt: 7721 memset(&stat, 0, sizeof(stat)); 7722 stat.un.b.lsRjtRsnCode = rjt_err; 7723 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7724 return 1; 7725 } 7726 7727 7728 /** 7729 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7730 * @vport: pointer to a host virtual N_Port data structure. 7731 * 7732 * This routine cleans up any Registration State Change Notification 7733 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7734 * @vport together with the host_lock is used to prevent multiple thread 7735 * trying to access the RSCN array on a same @vport at the same time. 7736 **/ 7737 void 7738 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7739 { 7740 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7741 struct lpfc_hba *phba = vport->phba; 7742 int i; 7743 7744 spin_lock_irq(shost->host_lock); 7745 if (vport->fc_rscn_flush) { 7746 /* Another thread is walking fc_rscn_id_list on this vport */ 7747 spin_unlock_irq(shost->host_lock); 7748 return; 7749 } 7750 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7751 vport->fc_rscn_flush = 1; 7752 spin_unlock_irq(shost->host_lock); 7753 7754 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7755 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7756 vport->fc_rscn_id_list[i] = NULL; 7757 } 7758 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 7759 clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 7760 spin_lock_irq(shost->host_lock); 7761 vport->fc_rscn_id_cnt = 0; 7762 spin_unlock_irq(shost->host_lock); 7763 lpfc_can_disctmo(vport); 7764 /* Indicate we are done walking this fc_rscn_id_list */ 7765 vport->fc_rscn_flush = 0; 7766 } 7767 7768 /** 7769 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7770 * @vport: pointer to a host virtual N_Port data structure. 7771 * @did: remote destination port identifier. 7772 * 7773 * This routine checks whether there is any pending Registration State 7774 * Configuration Notification (RSCN) to a @did on @vport. 7775 * 7776 * Return code 7777 * None zero - The @did matched with a pending rscn 7778 * 0 - not able to match @did with a pending rscn 7779 **/ 7780 int 7781 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7782 { 7783 D_ID ns_did; 7784 D_ID rscn_did; 7785 uint32_t *lp; 7786 uint32_t payload_len, i; 7787 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7788 7789 ns_did.un.word = did; 7790 7791 /* Never match fabric nodes for RSCNs */ 7792 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7793 return 0; 7794 7795 /* If we are doing a FULL RSCN rediscovery, match everything */ 7796 if (test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) 7797 return did; 7798 7799 spin_lock_irq(shost->host_lock); 7800 if (vport->fc_rscn_flush) { 7801 /* Another thread is walking fc_rscn_id_list on this vport */ 7802 spin_unlock_irq(shost->host_lock); 7803 return 0; 7804 } 7805 /* Indicate we are walking fc_rscn_id_list on this vport */ 7806 vport->fc_rscn_flush = 1; 7807 spin_unlock_irq(shost->host_lock); 7808 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7809 lp = vport->fc_rscn_id_list[i]->virt; 7810 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7811 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7812 while (payload_len) { 7813 rscn_did.un.word = be32_to_cpu(*lp++); 7814 payload_len -= sizeof(uint32_t); 7815 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7816 case RSCN_ADDRESS_FORMAT_PORT: 7817 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7818 && (ns_did.un.b.area == rscn_did.un.b.area) 7819 && (ns_did.un.b.id == rscn_did.un.b.id)) 7820 goto return_did_out; 7821 break; 7822 case RSCN_ADDRESS_FORMAT_AREA: 7823 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7824 && (ns_did.un.b.area == rscn_did.un.b.area)) 7825 goto return_did_out; 7826 break; 7827 case RSCN_ADDRESS_FORMAT_DOMAIN: 7828 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7829 goto return_did_out; 7830 break; 7831 case RSCN_ADDRESS_FORMAT_FABRIC: 7832 goto return_did_out; 7833 } 7834 } 7835 } 7836 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7837 vport->fc_rscn_flush = 0; 7838 return 0; 7839 return_did_out: 7840 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7841 vport->fc_rscn_flush = 0; 7842 return did; 7843 } 7844 7845 /** 7846 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7847 * @vport: pointer to a host virtual N_Port data structure. 7848 * 7849 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7850 * state machine for a @vport's nodes that are with pending RSCN (Registration 7851 * State Change Notification). 7852 * 7853 * Return code 7854 * 0 - Successful (currently alway return 0) 7855 **/ 7856 static int 7857 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7858 { 7859 struct lpfc_nodelist *ndlp = NULL, *n; 7860 7861 /* Move all affected nodes by pending RSCNs to NPR state. */ 7862 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7863 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7864 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7865 continue; 7866 7867 /* NVME Target mode does not do RSCN Recovery. */ 7868 if (vport->phba->nvmet_support) 7869 continue; 7870 7871 /* If we are in the process of doing discovery on this 7872 * NPort, let it continue on its own. 7873 */ 7874 switch (ndlp->nlp_state) { 7875 case NLP_STE_PLOGI_ISSUE: 7876 case NLP_STE_ADISC_ISSUE: 7877 case NLP_STE_REG_LOGIN_ISSUE: 7878 case NLP_STE_PRLI_ISSUE: 7879 case NLP_STE_LOGO_ISSUE: 7880 continue; 7881 } 7882 7883 lpfc_disc_state_machine(vport, ndlp, NULL, 7884 NLP_EVT_DEVICE_RECOVERY); 7885 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7886 } 7887 return 0; 7888 } 7889 7890 /** 7891 * lpfc_send_rscn_event - Send an RSCN event to management application 7892 * @vport: pointer to a host virtual N_Port data structure. 7893 * @cmdiocb: pointer to lpfc command iocb data structure. 7894 * 7895 * lpfc_send_rscn_event sends an RSCN netlink event to management 7896 * applications. 7897 */ 7898 static void 7899 lpfc_send_rscn_event(struct lpfc_vport *vport, 7900 struct lpfc_iocbq *cmdiocb) 7901 { 7902 struct lpfc_dmabuf *pcmd; 7903 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7904 uint32_t *payload_ptr; 7905 uint32_t payload_len; 7906 struct lpfc_rscn_event_header *rscn_event_data; 7907 7908 pcmd = cmdiocb->cmd_dmabuf; 7909 payload_ptr = (uint32_t *) pcmd->virt; 7910 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7911 7912 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7913 payload_len, GFP_KERNEL); 7914 if (!rscn_event_data) { 7915 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7916 "0147 Failed to allocate memory for RSCN event\n"); 7917 return; 7918 } 7919 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7920 rscn_event_data->payload_length = payload_len; 7921 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7922 payload_len); 7923 7924 fc_host_post_vendor_event(shost, 7925 fc_get_event_number(), 7926 sizeof(struct lpfc_rscn_event_header) + payload_len, 7927 (char *)rscn_event_data, 7928 LPFC_NL_VENDOR_ID); 7929 7930 kfree(rscn_event_data); 7931 } 7932 7933 /** 7934 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7935 * @vport: pointer to a host virtual N_Port data structure. 7936 * @cmdiocb: pointer to lpfc command iocb data structure. 7937 * @ndlp: pointer to a node-list data structure. 7938 * 7939 * This routine processes an unsolicited RSCN (Registration State Change 7940 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7941 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7942 * discover state machine is about to begin discovery, it just accepts the 7943 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7944 * contains N_Port IDs for other vports on this HBA, it just accepts the 7945 * RSCN and ignore processing it. If the state machine is in the recovery 7946 * state, the fc_rscn_id_list of this @vport is walked and the 7947 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7948 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7949 * routine is invoked to handle the RSCN event. 7950 * 7951 * Return code 7952 * 0 - Just sent the acc response 7953 * 1 - Sent the acc response and waited for name server completion 7954 **/ 7955 static int 7956 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7957 struct lpfc_nodelist *ndlp) 7958 { 7959 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7960 struct lpfc_hba *phba = vport->phba; 7961 struct lpfc_dmabuf *pcmd; 7962 uint32_t *lp, *datap; 7963 uint32_t payload_len, length, nportid, *cmd; 7964 int rscn_cnt; 7965 int rscn_id = 0, hba_id = 0; 7966 int i, tmo; 7967 7968 pcmd = cmdiocb->cmd_dmabuf; 7969 lp = (uint32_t *) pcmd->virt; 7970 7971 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7972 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7973 /* RSCN received */ 7974 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7975 "0214 RSCN received Data: x%lx x%x x%x x%x\n", 7976 vport->fc_flag, payload_len, *lp, 7977 vport->fc_rscn_id_cnt); 7978 7979 /* Send an RSCN event to the management application */ 7980 lpfc_send_rscn_event(vport, cmdiocb); 7981 7982 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7983 fc_host_post_event(shost, fc_get_event_number(), 7984 FCH_EVT_RSCN, lp[i]); 7985 7986 /* Check if RSCN is coming from a direct-connected remote NPort */ 7987 if (test_bit(FC_PT2PT, &vport->fc_flag)) { 7988 /* If so, just ACC it, no other action needed for now */ 7989 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7990 "2024 pt2pt RSCN %08x Data: x%lx x%x\n", 7991 *lp, vport->fc_flag, payload_len); 7992 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7993 7994 /* Check to see if we need to NVME rescan this target 7995 * remoteport. 7996 */ 7997 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7998 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7999 lpfc_nvme_rescan_port(vport, ndlp); 8000 return 0; 8001 } 8002 8003 /* If we are about to begin discovery, just ACC the RSCN. 8004 * Discovery processing will satisfy it. 8005 */ 8006 if (vport->port_state <= LPFC_NS_QRY) { 8007 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8008 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%lx", 8009 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8010 8011 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8012 return 0; 8013 } 8014 8015 /* If this RSCN just contains NPortIDs for other vports on this HBA, 8016 * just ACC and ignore it. 8017 */ 8018 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8019 !(vport->cfg_peer_port_login)) { 8020 i = payload_len; 8021 datap = lp; 8022 while (i > 0) { 8023 nportid = *datap++; 8024 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 8025 i -= sizeof(uint32_t); 8026 rscn_id++; 8027 if (lpfc_find_vport_by_did(phba, nportid)) 8028 hba_id++; 8029 } 8030 if (rscn_id == hba_id) { 8031 /* ALL NPortIDs in RSCN are on HBA */ 8032 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8033 "0219 Ignore RSCN " 8034 "Data: x%lx x%x x%x x%x\n", 8035 vport->fc_flag, payload_len, 8036 *lp, vport->fc_rscn_id_cnt); 8037 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8038 "RCV RSCN vport: did:x%x/ste:x%x flg:x%lx", 8039 ndlp->nlp_DID, vport->port_state, 8040 ndlp->nlp_flag); 8041 8042 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 8043 ndlp, NULL); 8044 /* Restart disctmo if its already running */ 8045 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { 8046 tmo = ((phba->fc_ratov * 3) + 3); 8047 mod_timer(&vport->fc_disctmo, 8048 jiffies + 8049 msecs_to_jiffies(1000 * tmo)); 8050 } 8051 return 0; 8052 } 8053 } 8054 8055 spin_lock_irq(shost->host_lock); 8056 if (vport->fc_rscn_flush) { 8057 /* Another thread is walking fc_rscn_id_list on this vport */ 8058 spin_unlock_irq(shost->host_lock); 8059 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 8060 /* Send back ACC */ 8061 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8062 return 0; 8063 } 8064 /* Indicate we are walking fc_rscn_id_list on this vport */ 8065 vport->fc_rscn_flush = 1; 8066 spin_unlock_irq(shost->host_lock); 8067 /* Get the array count after successfully have the token */ 8068 rscn_cnt = vport->fc_rscn_id_cnt; 8069 /* If we are already processing an RSCN, save the received 8070 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 8071 */ 8072 if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || 8073 test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { 8074 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8075 "RCV RSCN defer: did:x%x/ste:x%x flg:x%lx", 8076 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8077 8078 set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); 8079 8080 /* Restart disctmo if its already running */ 8081 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { 8082 tmo = ((phba->fc_ratov * 3) + 3); 8083 mod_timer(&vport->fc_disctmo, 8084 jiffies + msecs_to_jiffies(1000 * tmo)); 8085 } 8086 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 8087 !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) { 8088 set_bit(FC_RSCN_MODE, &vport->fc_flag); 8089 if (rscn_cnt) { 8090 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 8091 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 8092 } 8093 if ((rscn_cnt) && 8094 (payload_len + length <= LPFC_BPL_SIZE)) { 8095 *cmd &= ELS_CMD_MASK; 8096 *cmd |= cpu_to_be32(payload_len + length); 8097 memcpy(((uint8_t *)cmd) + length, lp, 8098 payload_len); 8099 } else { 8100 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 8101 vport->fc_rscn_id_cnt++; 8102 /* If we zero, cmdiocb->cmd_dmabuf, the calling 8103 * routine will not try to free it. 8104 */ 8105 cmdiocb->cmd_dmabuf = NULL; 8106 } 8107 /* Deferred RSCN */ 8108 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8109 "0235 Deferred RSCN " 8110 "Data: x%x x%lx x%x\n", 8111 vport->fc_rscn_id_cnt, vport->fc_flag, 8112 vport->port_state); 8113 } else { 8114 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 8115 /* ReDiscovery RSCN */ 8116 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8117 "0234 ReDiscovery RSCN " 8118 "Data: x%x x%lx x%x\n", 8119 vport->fc_rscn_id_cnt, vport->fc_flag, 8120 vport->port_state); 8121 } 8122 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8123 vport->fc_rscn_flush = 0; 8124 /* Send back ACC */ 8125 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8126 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8127 lpfc_rscn_recovery_check(vport); 8128 return 0; 8129 } 8130 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8131 "RCV RSCN: did:x%x/ste:x%x flg:x%lx", 8132 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8133 8134 set_bit(FC_RSCN_MODE, &vport->fc_flag); 8135 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8136 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8137 vport->fc_rscn_flush = 0; 8138 /* 8139 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8140 * not try to free it. 8141 */ 8142 cmdiocb->cmd_dmabuf = NULL; 8143 lpfc_set_disctmo(vport); 8144 /* Send back ACC */ 8145 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8146 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8147 lpfc_rscn_recovery_check(vport); 8148 return lpfc_els_handle_rscn(vport); 8149 } 8150 8151 /** 8152 * lpfc_els_handle_rscn - Handle rscn for a vport 8153 * @vport: pointer to a host virtual N_Port data structure. 8154 * 8155 * This routine handles the Registration State Configuration Notification 8156 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8157 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8158 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8159 * NameServer shall be issued. If CT command to the NameServer fails to be 8160 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8161 * RSCN activities with the @vport. 8162 * 8163 * Return code 8164 * 0 - Cleaned up rscn on the @vport 8165 * 1 - Wait for plogi to name server before proceed 8166 **/ 8167 int 8168 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8169 { 8170 struct lpfc_nodelist *ndlp; 8171 struct lpfc_hba *phba = vport->phba; 8172 8173 /* Ignore RSCN if the port is being torn down. */ 8174 if (test_bit(FC_UNLOADING, &vport->load_flag)) { 8175 lpfc_els_flush_rscn(vport); 8176 return 0; 8177 } 8178 8179 /* Start timer for RSCN processing */ 8180 lpfc_set_disctmo(vport); 8181 8182 /* RSCN processed */ 8183 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8184 "0215 RSCN processed Data: x%lx x%x x%x x%x x%x x%x\n", 8185 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8186 vport->port_state, vport->num_disc_nodes, 8187 vport->gidft_inp); 8188 8189 /* To process RSCN, first compare RSCN data with NameServer */ 8190 vport->fc_ns_retry = 0; 8191 vport->num_disc_nodes = 0; 8192 8193 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8194 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8195 /* Good ndlp, issue CT Request to NameServer. Need to 8196 * know how many gidfts were issued. If none, then just 8197 * flush the RSCN. Otherwise, the outstanding requests 8198 * need to complete. 8199 */ 8200 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8201 if (lpfc_issue_gidft(vport) > 0) 8202 return 1; 8203 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8204 if (lpfc_issue_gidpt(vport) > 0) 8205 return 1; 8206 } else { 8207 return 1; 8208 } 8209 } else { 8210 /* Nameserver login in question. Revalidate. */ 8211 if (ndlp) { 8212 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8213 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8214 } else { 8215 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8216 if (!ndlp) { 8217 lpfc_els_flush_rscn(vport); 8218 return 0; 8219 } 8220 ndlp->nlp_prev_state = ndlp->nlp_state; 8221 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8222 } 8223 ndlp->nlp_type |= NLP_FABRIC; 8224 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8225 /* Wait for NameServer login cmpl before we can 8226 * continue 8227 */ 8228 return 1; 8229 } 8230 8231 lpfc_els_flush_rscn(vport); 8232 return 0; 8233 } 8234 8235 /** 8236 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8237 * @vport: pointer to a host virtual N_Port data structure. 8238 * @cmdiocb: pointer to lpfc command iocb data structure. 8239 * @ndlp: pointer to a node-list data structure. 8240 * 8241 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8242 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8243 * point topology. As an unsolicited FLOGI should not be received in a loop 8244 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8245 * lpfc_check_sparm() routine is invoked to check the parameters in the 8246 * unsolicited FLOGI. If parameters validation failed, the routine 8247 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8248 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8249 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8250 * will initiate PLOGI. The higher lexicographical value party shall has 8251 * higher priority (as the winning port) and will initiate PLOGI and 8252 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8253 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8254 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8255 * 8256 * Return code 8257 * 0 - Successfully processed the unsolicited flogi 8258 * 1 - Failed to process the unsolicited flogi 8259 **/ 8260 static int 8261 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8262 struct lpfc_nodelist *ndlp) 8263 { 8264 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8265 struct lpfc_hba *phba = vport->phba; 8266 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8267 uint32_t *lp = (uint32_t *) pcmd->virt; 8268 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8269 struct serv_parm *sp; 8270 LPFC_MBOXQ_t *mbox; 8271 uint32_t cmd, did; 8272 int rc; 8273 unsigned long fc_flag = 0; 8274 uint32_t port_state = 0; 8275 8276 /* Clear external loopback plug detected flag */ 8277 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8278 8279 cmd = *lp++; 8280 sp = (struct serv_parm *) lp; 8281 8282 /* FLOGI received */ 8283 8284 lpfc_set_disctmo(vport); 8285 8286 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8287 /* We should never receive a FLOGI in loop mode, ignore it */ 8288 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8289 8290 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8291 Loop Mode */ 8292 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8293 "0113 An FLOGI ELS command x%x was " 8294 "received from DID x%x in Loop Mode\n", 8295 cmd, did); 8296 return 1; 8297 } 8298 8299 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8300 8301 /* 8302 * If our portname is greater than the remote portname, 8303 * then we initiate Nport login. 8304 */ 8305 8306 rc = memcmp(&vport->fc_portname, &sp->portName, 8307 sizeof(struct lpfc_name)); 8308 8309 if (!rc) { 8310 if (phba->sli_rev < LPFC_SLI_REV4) { 8311 mbox = mempool_alloc(phba->mbox_mem_pool, 8312 GFP_KERNEL); 8313 if (!mbox) 8314 return 1; 8315 lpfc_linkdown(phba); 8316 lpfc_init_link(phba, mbox, 8317 phba->cfg_topology, 8318 phba->cfg_link_speed); 8319 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8320 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8321 mbox->vport = vport; 8322 rc = lpfc_sli_issue_mbox(phba, mbox, 8323 MBX_NOWAIT); 8324 lpfc_set_loopback_flag(phba); 8325 if (rc == MBX_NOT_FINISHED) 8326 mempool_free(mbox, phba->mbox_mem_pool); 8327 return 1; 8328 } 8329 8330 /* External loopback plug insertion detected */ 8331 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8332 8333 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8334 "1119 External Loopback plug detected\n"); 8335 8336 /* abort the flogi coming back to ourselves 8337 * due to external loopback on the port. 8338 */ 8339 lpfc_els_abort_flogi(phba); 8340 return 0; 8341 8342 } else if (rc > 0) { /* greater than */ 8343 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag); 8344 8345 /* If we have the high WWPN we can assign our own 8346 * myDID; otherwise, we have to WAIT for a PLOGI 8347 * from the remote NPort to find out what it 8348 * will be. 8349 */ 8350 vport->fc_myDID = PT2PT_LocalID; 8351 } else { 8352 vport->fc_myDID = PT2PT_RemoteID; 8353 } 8354 8355 /* 8356 * The vport state should go to LPFC_FLOGI only 8357 * AFTER we issue a FLOGI, not receive one. 8358 */ 8359 spin_lock_irq(shost->host_lock); 8360 fc_flag = vport->fc_flag; 8361 port_state = vport->port_state; 8362 /* Acking an unsol FLOGI. Count 1 for link bounce 8363 * work-around. 8364 */ 8365 vport->rcv_flogi_cnt++; 8366 spin_unlock_irq(shost->host_lock); 8367 set_bit(FC_PT2PT, &vport->fc_flag); 8368 clear_bit(FC_FABRIC, &vport->fc_flag); 8369 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 8370 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8371 "3311 Rcv Flogi PS x%x new PS x%x " 8372 "fc_flag x%lx new fc_flag x%lx\n", 8373 port_state, vport->port_state, 8374 fc_flag, vport->fc_flag); 8375 8376 /* 8377 * We temporarily set fc_myDID to make it look like we are 8378 * a Fabric. This is done just so we end up with the right 8379 * did / sid on the FLOGI ACC rsp. 8380 */ 8381 did = vport->fc_myDID; 8382 vport->fc_myDID = Fabric_DID; 8383 8384 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8385 8386 /* Defer ACC response until AFTER we issue a FLOGI */ 8387 if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) { 8388 phba->defer_flogi_acc.rx_id = bf_get(wqe_ctxt_tag, 8389 &wqe->xmit_els_rsp.wqe_com); 8390 phba->defer_flogi_acc.ox_id = bf_get(wqe_rcvoxid, 8391 &wqe->xmit_els_rsp.wqe_com); 8392 8393 vport->fc_myDID = did; 8394 8395 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8396 "3344 Deferring FLOGI ACC: rx_id: x%x," 8397 " ox_id: x%x, hba_flag x%lx\n", 8398 phba->defer_flogi_acc.rx_id, 8399 phba->defer_flogi_acc.ox_id, phba->hba_flag); 8400 8401 phba->defer_flogi_acc.flag = true; 8402 8403 /* This nlp_get is paired with nlp_puts that reset the 8404 * defer_flogi_acc.flag back to false. We need to retain 8405 * a kref on the ndlp until the deferred FLOGI ACC is 8406 * processed or cancelled. 8407 */ 8408 phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp); 8409 return 0; 8410 } 8411 8412 /* Send back ACC */ 8413 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8414 8415 /* Now lets put fc_myDID back to what its supposed to be */ 8416 vport->fc_myDID = did; 8417 8418 return 0; 8419 } 8420 8421 /** 8422 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8423 * @vport: pointer to a host virtual N_Port data structure. 8424 * @cmdiocb: pointer to lpfc command iocb data structure. 8425 * @ndlp: pointer to a node-list data structure. 8426 * 8427 * This routine processes Request Node Identification Data (RNID) IOCB 8428 * received as an ELS unsolicited event. Only when the RNID specified format 8429 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8430 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8431 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8432 * rejected by invoking the lpfc_els_rsp_reject() routine. 8433 * 8434 * Return code 8435 * 0 - Successfully processed rnid iocb (currently always return 0) 8436 **/ 8437 static int 8438 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8439 struct lpfc_nodelist *ndlp) 8440 { 8441 struct lpfc_dmabuf *pcmd; 8442 uint32_t *lp; 8443 RNID *rn; 8444 struct ls_rjt stat; 8445 8446 pcmd = cmdiocb->cmd_dmabuf; 8447 lp = (uint32_t *) pcmd->virt; 8448 8449 lp++; 8450 rn = (RNID *) lp; 8451 8452 /* RNID received */ 8453 8454 switch (rn->Format) { 8455 case 0: 8456 case RNID_TOPOLOGY_DISC: 8457 /* Send back ACC */ 8458 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8459 break; 8460 default: 8461 /* Reject this request because format not supported */ 8462 stat.un.b.lsRjtRsvd0 = 0; 8463 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8464 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8465 stat.un.b.vendorUnique = 0; 8466 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8467 NULL); 8468 } 8469 return 0; 8470 } 8471 8472 /** 8473 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8474 * @vport: pointer to a host virtual N_Port data structure. 8475 * @cmdiocb: pointer to lpfc command iocb data structure. 8476 * @ndlp: pointer to a node-list data structure. 8477 * 8478 * Return code 8479 * 0 - Successfully processed echo iocb (currently always return 0) 8480 **/ 8481 static int 8482 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8483 struct lpfc_nodelist *ndlp) 8484 { 8485 uint8_t *pcmd; 8486 8487 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8488 8489 /* skip over first word of echo command to find echo data */ 8490 pcmd += sizeof(uint32_t); 8491 8492 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8493 return 0; 8494 } 8495 8496 /** 8497 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8498 * @vport: pointer to a host virtual N_Port data structure. 8499 * @cmdiocb: pointer to lpfc command iocb data structure. 8500 * @ndlp: pointer to a node-list data structure. 8501 * 8502 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8503 * received as an ELS unsolicited event. Currently, this function just invokes 8504 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8505 * 8506 * Return code 8507 * 0 - Successfully processed lirr iocb (currently always return 0) 8508 **/ 8509 static int 8510 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8511 struct lpfc_nodelist *ndlp) 8512 { 8513 struct ls_rjt stat; 8514 8515 /* For now, unconditionally reject this command */ 8516 stat.un.b.lsRjtRsvd0 = 0; 8517 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8518 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8519 stat.un.b.vendorUnique = 0; 8520 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8521 return 0; 8522 } 8523 8524 /** 8525 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8526 * @vport: pointer to a host virtual N_Port data structure. 8527 * @cmdiocb: pointer to lpfc command iocb data structure. 8528 * @ndlp: pointer to a node-list data structure. 8529 * 8530 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8531 * received as an ELS unsolicited event. A request to RRQ shall only 8532 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8533 * Nx_Port N_Port_ID of the target Exchange is the same as the 8534 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8535 * not accepted, an LS_RJT with reason code "Unable to perform 8536 * command request" and reason code explanation "Invalid Originator 8537 * S_ID" shall be returned. For now, we just unconditionally accept 8538 * RRQ from the target. 8539 **/ 8540 static void 8541 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8542 struct lpfc_nodelist *ndlp) 8543 { 8544 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8545 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8546 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8547 } 8548 8549 /** 8550 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8551 * @phba: pointer to lpfc hba data structure. 8552 * @pmb: pointer to the driver internal queue element for mailbox command. 8553 * 8554 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8555 * mailbox command. This callback function is to actually send the Accept 8556 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8557 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8558 * mailbox command, constructs the RLS response with the link statistics 8559 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8560 * response to the RLS. 8561 * 8562 * Note that the ndlp reference count will be incremented by 1 for holding the 8563 * ndlp and the reference to ndlp will be stored into the ndlp field of 8564 * the IOCB for the completion callback function to the RLS Accept Response 8565 * ELS IOCB command. 8566 * 8567 **/ 8568 static void 8569 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8570 { 8571 int rc = 0; 8572 MAILBOX_t *mb; 8573 IOCB_t *icmd; 8574 union lpfc_wqe128 *wqe; 8575 struct RLS_RSP *rls_rsp; 8576 uint8_t *pcmd; 8577 struct lpfc_iocbq *elsiocb; 8578 struct lpfc_nodelist *ndlp; 8579 uint16_t oxid; 8580 uint16_t rxid; 8581 uint32_t cmdsize; 8582 u32 ulp_context; 8583 8584 mb = &pmb->u.mb; 8585 8586 ndlp = pmb->ctx_ndlp; 8587 rxid = (uint16_t)(pmb->ctx_u.ox_rx_id & 0xffff); 8588 oxid = (uint16_t)((pmb->ctx_u.ox_rx_id >> 16) & 0xffff); 8589 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u)); 8590 pmb->ctx_ndlp = NULL; 8591 8592 if (mb->mbxStatus) { 8593 mempool_free(pmb, phba->mbox_mem_pool); 8594 return; 8595 } 8596 8597 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8598 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8599 lpfc_max_els_tries, ndlp, 8600 ndlp->nlp_DID, ELS_CMD_ACC); 8601 8602 /* Decrement the ndlp reference count from previous mbox command */ 8603 lpfc_nlp_put(ndlp); 8604 8605 if (!elsiocb) { 8606 mempool_free(pmb, phba->mbox_mem_pool); 8607 return; 8608 } 8609 8610 ulp_context = get_job_ulpcontext(phba, elsiocb); 8611 if (phba->sli_rev == LPFC_SLI_REV4) { 8612 wqe = &elsiocb->wqe; 8613 /* Xri / rx_id */ 8614 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8615 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8616 } else { 8617 icmd = &elsiocb->iocb; 8618 icmd->ulpContext = rxid; 8619 icmd->unsli3.rcvsli3.ox_id = oxid; 8620 } 8621 8622 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8623 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8624 pcmd += sizeof(uint32_t); /* Skip past command */ 8625 rls_rsp = (struct RLS_RSP *)pcmd; 8626 8627 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8628 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8629 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8630 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8631 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8632 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8633 mempool_free(pmb, phba->mbox_mem_pool); 8634 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8635 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8636 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8637 "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", 8638 elsiocb->iotag, ulp_context, 8639 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8640 ndlp->nlp_rpi); 8641 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8642 phba->fc_stat.elsXmitACC++; 8643 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8644 if (!elsiocb->ndlp) { 8645 lpfc_els_free_iocb(phba, elsiocb); 8646 return; 8647 } 8648 8649 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8650 if (rc == IOCB_ERROR) { 8651 lpfc_els_free_iocb(phba, elsiocb); 8652 lpfc_nlp_put(ndlp); 8653 } 8654 return; 8655 } 8656 8657 /** 8658 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8659 * @vport: pointer to a host virtual N_Port data structure. 8660 * @cmdiocb: pointer to lpfc command iocb data structure. 8661 * @ndlp: pointer to a node-list data structure. 8662 * 8663 * This routine processes Read Link Status (RLS) IOCB received as an 8664 * ELS unsolicited event. It first checks the remote port state. If the 8665 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8666 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8667 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8668 * for reading the HBA link statistics. It is for the callback function, 8669 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8670 * to actually sending out RPL Accept (ACC) response. 8671 * 8672 * Return codes 8673 * 0 - Successfully processed rls iocb (currently always return 0) 8674 **/ 8675 static int 8676 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8677 struct lpfc_nodelist *ndlp) 8678 { 8679 struct lpfc_hba *phba = vport->phba; 8680 LPFC_MBOXQ_t *mbox; 8681 struct ls_rjt stat; 8682 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8683 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8684 8685 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8686 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8687 /* reject the unsolicited RLS request and done with it */ 8688 goto reject_out; 8689 8690 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8691 if (mbox) { 8692 lpfc_read_lnk_stat(phba, mbox); 8693 mbox->ctx_u.ox_rx_id = ox_id << 16 | ctx; 8694 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8695 if (!mbox->ctx_ndlp) 8696 goto node_err; 8697 mbox->vport = vport; 8698 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8699 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8700 != MBX_NOT_FINISHED) 8701 /* Mbox completion will send ELS Response */ 8702 return 0; 8703 /* Decrement reference count used for the failed mbox 8704 * command. 8705 */ 8706 lpfc_nlp_put(ndlp); 8707 node_err: 8708 mempool_free(mbox, phba->mbox_mem_pool); 8709 } 8710 reject_out: 8711 /* issue rejection response */ 8712 stat.un.b.lsRjtRsvd0 = 0; 8713 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8714 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8715 stat.un.b.vendorUnique = 0; 8716 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8717 return 0; 8718 } 8719 8720 /** 8721 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8722 * @vport: pointer to a host virtual N_Port data structure. 8723 * @cmdiocb: pointer to lpfc command iocb data structure. 8724 * @ndlp: pointer to a node-list data structure. 8725 * 8726 * This routine processes Read Timout Value (RTV) IOCB received as an 8727 * ELS unsolicited event. It first checks the remote port state. If the 8728 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8729 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8730 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8731 * Value (RTV) unsolicited IOCB event. 8732 * 8733 * Note that the ndlp reference count will be incremented by 1 for holding the 8734 * ndlp and the reference to ndlp will be stored into the ndlp field of 8735 * the IOCB for the completion callback function to the RTV Accept Response 8736 * ELS IOCB command. 8737 * 8738 * Return codes 8739 * 0 - Successfully processed rtv iocb (currently always return 0) 8740 **/ 8741 static int 8742 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8743 struct lpfc_nodelist *ndlp) 8744 { 8745 int rc = 0; 8746 IOCB_t *icmd; 8747 union lpfc_wqe128 *wqe; 8748 struct lpfc_hba *phba = vport->phba; 8749 struct ls_rjt stat; 8750 struct RTV_RSP *rtv_rsp; 8751 uint8_t *pcmd; 8752 struct lpfc_iocbq *elsiocb; 8753 uint32_t cmdsize; 8754 u32 ulp_context; 8755 8756 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8757 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8758 /* reject the unsolicited RTV request and done with it */ 8759 goto reject_out; 8760 8761 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8762 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8763 lpfc_max_els_tries, ndlp, 8764 ndlp->nlp_DID, ELS_CMD_ACC); 8765 8766 if (!elsiocb) 8767 return 1; 8768 8769 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8770 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8771 pcmd += sizeof(uint32_t); /* Skip past command */ 8772 8773 ulp_context = get_job_ulpcontext(phba, elsiocb); 8774 /* use the command's xri in the response */ 8775 if (phba->sli_rev == LPFC_SLI_REV4) { 8776 wqe = &elsiocb->wqe; 8777 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8778 get_job_ulpcontext(phba, cmdiocb)); 8779 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8780 get_job_rcvoxid(phba, cmdiocb)); 8781 } else { 8782 icmd = &elsiocb->iocb; 8783 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8784 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8785 } 8786 8787 rtv_rsp = (struct RTV_RSP *)pcmd; 8788 8789 /* populate RTV payload */ 8790 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8791 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8792 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8793 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8794 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8795 8796 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8797 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8798 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8799 "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x, " 8800 "Data: x%x x%x x%x\n", 8801 elsiocb->iotag, ulp_context, 8802 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8803 ndlp->nlp_rpi, 8804 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8805 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8806 phba->fc_stat.elsXmitACC++; 8807 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8808 if (!elsiocb->ndlp) { 8809 lpfc_els_free_iocb(phba, elsiocb); 8810 return 0; 8811 } 8812 8813 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8814 if (rc == IOCB_ERROR) { 8815 lpfc_els_free_iocb(phba, elsiocb); 8816 lpfc_nlp_put(ndlp); 8817 } 8818 return 0; 8819 8820 reject_out: 8821 /* issue rejection response */ 8822 stat.un.b.lsRjtRsvd0 = 0; 8823 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8824 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8825 stat.un.b.vendorUnique = 0; 8826 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8827 return 0; 8828 } 8829 8830 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8831 * @vport: pointer to a host virtual N_Port data structure. 8832 * @ndlp: pointer to a node-list data structure. 8833 * @did: DID of the target. 8834 * @rrq: Pointer to the rrq struct. 8835 * 8836 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8837 * successful, the completion handler will clear the RRQ. 8838 * 8839 * Return codes 8840 * 0 - Successfully sent rrq els iocb. 8841 * 1 - Failed to send rrq els iocb. 8842 **/ 8843 static int 8844 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8845 uint32_t did, struct lpfc_node_rrq *rrq) 8846 { 8847 struct lpfc_hba *phba = vport->phba; 8848 struct RRQ *els_rrq; 8849 struct lpfc_iocbq *elsiocb; 8850 uint8_t *pcmd; 8851 uint16_t cmdsize; 8852 int ret; 8853 8854 if (!ndlp) 8855 return 1; 8856 8857 /* If ndlp is not NULL, we will bump the reference count on it */ 8858 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8859 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8860 ELS_CMD_RRQ); 8861 if (!elsiocb) 8862 return 1; 8863 8864 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8865 8866 /* For RRQ request, remainder of payload is Exchange IDs */ 8867 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8868 pcmd += sizeof(uint32_t); 8869 els_rrq = (struct RRQ *) pcmd; 8870 8871 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8872 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8873 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8874 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8875 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8876 8877 8878 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8879 "Issue RRQ: did:x%x", 8880 did, rrq->xritag, rrq->rxid); 8881 elsiocb->context_un.rrq = rrq; 8882 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8883 8884 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8885 if (!elsiocb->ndlp) 8886 goto io_err; 8887 8888 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8889 if (ret == IOCB_ERROR) { 8890 lpfc_nlp_put(ndlp); 8891 goto io_err; 8892 } 8893 return 0; 8894 8895 io_err: 8896 lpfc_els_free_iocb(phba, elsiocb); 8897 return 1; 8898 } 8899 8900 /** 8901 * lpfc_send_rrq - Sends ELS RRQ if needed. 8902 * @phba: pointer to lpfc hba data structure. 8903 * @rrq: pointer to the active rrq. 8904 * 8905 * This routine will call the lpfc_issue_els_rrq if the rrq is 8906 * still active for the xri. If this function returns a failure then 8907 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8908 * 8909 * Returns 0 Success. 8910 * 1 Failure. 8911 **/ 8912 int 8913 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8914 { 8915 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8916 rrq->nlp_DID); 8917 if (!ndlp) 8918 return 1; 8919 8920 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8921 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8922 rrq->nlp_DID, rrq); 8923 else 8924 return 1; 8925 } 8926 8927 /** 8928 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8929 * @vport: pointer to a host virtual N_Port data structure. 8930 * @cmdsize: size of the ELS command. 8931 * @oldiocb: pointer to the original lpfc command iocb data structure. 8932 * @ndlp: pointer to a node-list data structure. 8933 * 8934 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8935 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8936 * 8937 * Note that the ndlp reference count will be incremented by 1 for holding the 8938 * ndlp and the reference to ndlp will be stored into the ndlp field of 8939 * the IOCB for the completion callback function to the RPL Accept Response 8940 * ELS command. 8941 * 8942 * Return code 8943 * 0 - Successfully issued ACC RPL ELS command 8944 * 1 - Failed to issue ACC RPL ELS command 8945 **/ 8946 static int 8947 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8948 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8949 { 8950 int rc = 0; 8951 struct lpfc_hba *phba = vport->phba; 8952 IOCB_t *icmd; 8953 union lpfc_wqe128 *wqe; 8954 RPL_RSP rpl_rsp; 8955 struct lpfc_iocbq *elsiocb; 8956 uint8_t *pcmd; 8957 u32 ulp_context; 8958 8959 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8960 ndlp->nlp_DID, ELS_CMD_ACC); 8961 8962 if (!elsiocb) 8963 return 1; 8964 8965 ulp_context = get_job_ulpcontext(phba, elsiocb); 8966 if (phba->sli_rev == LPFC_SLI_REV4) { 8967 wqe = &elsiocb->wqe; 8968 /* Xri / rx_id */ 8969 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8970 get_job_ulpcontext(phba, oldiocb)); 8971 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8972 get_job_rcvoxid(phba, oldiocb)); 8973 } else { 8974 icmd = &elsiocb->iocb; 8975 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 8976 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 8977 } 8978 8979 pcmd = elsiocb->cmd_dmabuf->virt; 8980 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8981 pcmd += sizeof(uint16_t); 8982 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8983 pcmd += sizeof(uint16_t); 8984 8985 /* Setup the RPL ACC payload */ 8986 rpl_rsp.listLen = be32_to_cpu(1); 8987 rpl_rsp.index = 0; 8988 rpl_rsp.port_num_blk.portNum = 0; 8989 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8990 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8991 sizeof(struct lpfc_name)); 8992 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8993 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8994 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8995 "0120 Xmit ELS RPL ACC response tag x%x " 8996 "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " 8997 "rpi x%x\n", 8998 elsiocb->iotag, ulp_context, 8999 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 9000 ndlp->nlp_rpi); 9001 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 9002 phba->fc_stat.elsXmitACC++; 9003 elsiocb->ndlp = lpfc_nlp_get(ndlp); 9004 if (!elsiocb->ndlp) { 9005 lpfc_els_free_iocb(phba, elsiocb); 9006 return 1; 9007 } 9008 9009 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9010 if (rc == IOCB_ERROR) { 9011 lpfc_els_free_iocb(phba, elsiocb); 9012 lpfc_nlp_put(ndlp); 9013 return 1; 9014 } 9015 9016 return 0; 9017 } 9018 9019 /** 9020 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 9021 * @vport: pointer to a host virtual N_Port data structure. 9022 * @cmdiocb: pointer to lpfc command iocb data structure. 9023 * @ndlp: pointer to a node-list data structure. 9024 * 9025 * This routine processes Read Port List (RPL) IOCB received as an ELS 9026 * unsolicited event. It first checks the remote port state. If the remote 9027 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 9028 * invokes the lpfc_els_rsp_reject() routine to send reject response. 9029 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 9030 * to accept the RPL. 9031 * 9032 * Return code 9033 * 0 - Successfully processed rpl iocb (currently always return 0) 9034 **/ 9035 static int 9036 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9037 struct lpfc_nodelist *ndlp) 9038 { 9039 struct lpfc_dmabuf *pcmd; 9040 uint32_t *lp; 9041 uint32_t maxsize; 9042 uint16_t cmdsize; 9043 RPL *rpl; 9044 struct ls_rjt stat; 9045 9046 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 9047 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 9048 /* issue rejection response */ 9049 stat.un.b.lsRjtRsvd0 = 0; 9050 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 9051 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 9052 stat.un.b.vendorUnique = 0; 9053 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 9054 NULL); 9055 /* rejected the unsolicited RPL request and done with it */ 9056 return 0; 9057 } 9058 9059 pcmd = cmdiocb->cmd_dmabuf; 9060 lp = (uint32_t *) pcmd->virt; 9061 rpl = (RPL *) (lp + 1); 9062 maxsize = be32_to_cpu(rpl->maxsize); 9063 9064 /* We support only one port */ 9065 if ((rpl->index == 0) && 9066 ((maxsize == 0) || 9067 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 9068 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 9069 } else { 9070 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 9071 } 9072 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 9073 9074 return 0; 9075 } 9076 9077 /** 9078 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 9079 * @vport: pointer to a virtual N_Port data structure. 9080 * @cmdiocb: pointer to lpfc command iocb data structure. 9081 * @ndlp: pointer to a node-list data structure. 9082 * 9083 * This routine processes Fibre Channel Address Resolution Protocol 9084 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 9085 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 9086 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 9087 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 9088 * remote PortName is compared against the FC PortName stored in the @vport 9089 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 9090 * compared against the FC NodeName stored in the @vport data structure. 9091 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 9092 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 9093 * invoked to send out FARP Response to the remote node. Before sending the 9094 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 9095 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 9096 * routine is invoked to log into the remote port first. 9097 * 9098 * Return code 9099 * 0 - Either the FARP Match Mode not supported or successfully processed 9100 **/ 9101 static int 9102 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9103 struct lpfc_nodelist *ndlp) 9104 { 9105 struct lpfc_dmabuf *pcmd; 9106 uint32_t *lp; 9107 FARP *fp; 9108 uint32_t cnt, did; 9109 9110 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9111 pcmd = cmdiocb->cmd_dmabuf; 9112 lp = (uint32_t *) pcmd->virt; 9113 9114 lp++; 9115 fp = (FARP *) lp; 9116 /* FARP-REQ received from DID <did> */ 9117 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9118 "0601 FARP-REQ received from DID x%x\n", did); 9119 /* We will only support match on WWPN or WWNN */ 9120 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 9121 return 0; 9122 } 9123 9124 cnt = 0; 9125 /* If this FARP command is searching for my portname */ 9126 if (fp->Mflags & FARP_MATCH_PORT) { 9127 if (memcmp(&fp->RportName, &vport->fc_portname, 9128 sizeof(struct lpfc_name)) == 0) 9129 cnt = 1; 9130 } 9131 9132 /* If this FARP command is searching for my nodename */ 9133 if (fp->Mflags & FARP_MATCH_NODE) { 9134 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9135 sizeof(struct lpfc_name)) == 0) 9136 cnt = 1; 9137 } 9138 9139 if (cnt) { 9140 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9141 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9142 /* Log back into the node before sending the FARP. */ 9143 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9144 ndlp->nlp_prev_state = ndlp->nlp_state; 9145 lpfc_nlp_set_state(vport, ndlp, 9146 NLP_STE_PLOGI_ISSUE); 9147 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9148 } 9149 9150 /* Send a FARP response to that node */ 9151 if (fp->Rflags & FARP_REQUEST_FARPR) 9152 lpfc_issue_els_farpr(vport, did, 0); 9153 } 9154 } 9155 return 0; 9156 } 9157 9158 /** 9159 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9160 * @vport: pointer to a host virtual N_Port data structure. 9161 * @cmdiocb: pointer to lpfc command iocb data structure. 9162 * @ndlp: pointer to a node-list data structure. 9163 * 9164 * This routine processes Fibre Channel Address Resolution Protocol 9165 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9166 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9167 * the FARP response request. 9168 * 9169 * Return code 9170 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9171 **/ 9172 static int 9173 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9174 struct lpfc_nodelist *ndlp) 9175 { 9176 uint32_t did; 9177 9178 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9179 9180 /* FARP-RSP received from DID <did> */ 9181 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9182 "0600 FARP-RSP received from DID x%x\n", did); 9183 /* ACCEPT the Farp resp request */ 9184 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9185 9186 return 0; 9187 } 9188 9189 /** 9190 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9191 * @vport: pointer to a host virtual N_Port data structure. 9192 * @cmdiocb: pointer to lpfc command iocb data structure. 9193 * @fan_ndlp: pointer to a node-list data structure. 9194 * 9195 * This routine processes a Fabric Address Notification (FAN) IOCB 9196 * command received as an ELS unsolicited event. The FAN ELS command will 9197 * only be processed on a physical port (i.e., the @vport represents the 9198 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9199 * compared against those in the phba data structure. If any of those is 9200 * different, the lpfc_initial_flogi() routine is invoked to initialize 9201 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9202 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9203 * is invoked to register login to the fabric. 9204 * 9205 * Return code 9206 * 0 - Successfully processed fan iocb (currently always return 0). 9207 **/ 9208 static int 9209 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9210 struct lpfc_nodelist *fan_ndlp) 9211 { 9212 struct lpfc_hba *phba = vport->phba; 9213 uint32_t *lp; 9214 FAN *fp; 9215 9216 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9217 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9218 fp = (FAN *) ++lp; 9219 /* FAN received; Fan does not have a reply sequence */ 9220 if ((vport == phba->pport) && 9221 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9222 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9223 sizeof(struct lpfc_name))) || 9224 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9225 sizeof(struct lpfc_name)))) { 9226 /* This port has switched fabrics. FLOGI is required */ 9227 lpfc_issue_init_vfi(vport); 9228 } else { 9229 /* FAN verified - skip FLOGI */ 9230 vport->fc_myDID = vport->fc_prevDID; 9231 if (phba->sli_rev < LPFC_SLI_REV4) 9232 lpfc_issue_fabric_reglogin(vport); 9233 else { 9234 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9235 "3138 Need register VFI: (x%x/%x)\n", 9236 vport->fc_prevDID, vport->fc_myDID); 9237 lpfc_issue_reg_vfi(vport); 9238 } 9239 } 9240 } 9241 return 0; 9242 } 9243 9244 /** 9245 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9246 * @vport: pointer to a host virtual N_Port data structure. 9247 * @cmdiocb: pointer to lpfc command iocb data structure. 9248 * @ndlp: pointer to a node-list data structure. 9249 * 9250 * Return code 9251 * 0 - Successfully processed echo iocb (currently always return 0) 9252 **/ 9253 static int 9254 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9255 struct lpfc_nodelist *ndlp) 9256 { 9257 struct lpfc_hba *phba = vport->phba; 9258 struct fc_els_edc *edc_req; 9259 struct fc_tlv_desc *tlv; 9260 uint8_t *payload; 9261 uint32_t *ptr, dtag; 9262 const char *dtag_nm; 9263 int desc_cnt = 0, bytes_remain; 9264 struct fc_diag_lnkflt_desc *plnkflt; 9265 9266 payload = cmdiocb->cmd_dmabuf->virt; 9267 9268 edc_req = (struct fc_els_edc *)payload; 9269 bytes_remain = be32_to_cpu(edc_req->desc_len); 9270 9271 ptr = (uint32_t *)payload; 9272 lpfc_printf_vlog(vport, KERN_INFO, 9273 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9274 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9275 bytes_remain, be32_to_cpu(*ptr), 9276 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9277 9278 /* No signal support unless there is a congestion descriptor */ 9279 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9280 phba->cgn_sig_freq = 0; 9281 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9282 9283 if (bytes_remain <= 0) 9284 goto out; 9285 9286 tlv = edc_req->desc; 9287 9288 /* 9289 * cycle through EDC diagnostic descriptors to find the 9290 * congestion signaling capability descriptor 9291 */ 9292 while (bytes_remain) { 9293 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9294 lpfc_printf_log(phba, KERN_WARNING, 9295 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9296 "6464 Truncated TLV hdr on " 9297 "Diagnostic descriptor[%d]\n", 9298 desc_cnt); 9299 goto out; 9300 } 9301 9302 dtag = be32_to_cpu(tlv->desc_tag); 9303 switch (dtag) { 9304 case ELS_DTAG_LNK_FAULT_CAP: 9305 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9306 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9307 sizeof(struct fc_diag_lnkflt_desc)) { 9308 lpfc_printf_log(phba, KERN_WARNING, 9309 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9310 "6465 Truncated Link Fault Diagnostic " 9311 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9312 desc_cnt, bytes_remain, 9313 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9314 sizeof(struct fc_diag_lnkflt_desc)); 9315 goto out; 9316 } 9317 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9318 lpfc_printf_log(phba, KERN_INFO, 9319 LOG_ELS | LOG_LDS_EVENT, 9320 "4626 Link Fault Desc Data: x%08x len x%x " 9321 "da x%x dd x%x interval x%x\n", 9322 be32_to_cpu(plnkflt->desc_tag), 9323 be32_to_cpu(plnkflt->desc_len), 9324 be32_to_cpu( 9325 plnkflt->degrade_activate_threshold), 9326 be32_to_cpu( 9327 plnkflt->degrade_deactivate_threshold), 9328 be32_to_cpu(plnkflt->fec_degrade_interval)); 9329 break; 9330 case ELS_DTAG_CG_SIGNAL_CAP: 9331 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9332 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9333 sizeof(struct fc_diag_cg_sig_desc)) { 9334 lpfc_printf_log( 9335 phba, KERN_WARNING, LOG_CGN_MGMT, 9336 "6466 Truncated cgn signal Diagnostic " 9337 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9338 desc_cnt, bytes_remain, 9339 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9340 sizeof(struct fc_diag_cg_sig_desc)); 9341 goto out; 9342 } 9343 9344 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9345 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9346 9347 /* We start negotiation with lpfc_fabric_cgn_frequency. 9348 * When we process the EDC, we will settle on the 9349 * higher frequency. 9350 */ 9351 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9352 9353 lpfc_least_capable_settings( 9354 phba, (struct fc_diag_cg_sig_desc *)tlv); 9355 break; 9356 default: 9357 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9358 lpfc_printf_log(phba, KERN_WARNING, 9359 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9360 "6467 unknown Diagnostic " 9361 "Descriptor[%d]: tag x%x (%s)\n", 9362 desc_cnt, dtag, dtag_nm); 9363 } 9364 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9365 tlv = fc_tlv_next_desc(tlv); 9366 desc_cnt++; 9367 } 9368 out: 9369 /* Need to send back an ACC */ 9370 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9371 9372 lpfc_config_cgn_signal(phba); 9373 return 0; 9374 } 9375 9376 /** 9377 * lpfc_els_timeout - Handler funciton to the els timer 9378 * @t: timer context used to obtain the vport. 9379 * 9380 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9381 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9382 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9383 * up the worker thread. It is for the worker thread to invoke the routine 9384 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9385 **/ 9386 void 9387 lpfc_els_timeout(struct timer_list *t) 9388 { 9389 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9390 struct lpfc_hba *phba = vport->phba; 9391 uint32_t tmo_posted; 9392 unsigned long iflag; 9393 9394 spin_lock_irqsave(&vport->work_port_lock, iflag); 9395 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9396 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag)) 9397 vport->work_port_events |= WORKER_ELS_TMO; 9398 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9399 9400 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag)) 9401 lpfc_worker_wake_up(phba); 9402 return; 9403 } 9404 9405 9406 /** 9407 * lpfc_els_timeout_handler - Process an els timeout event 9408 * @vport: pointer to a virtual N_Port data structure. 9409 * 9410 * This routine is the actual handler function that processes an ELS timeout 9411 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9412 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9413 * invoking the lpfc_sli_issue_abort_iotag() routine. 9414 **/ 9415 void 9416 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9417 { 9418 struct lpfc_hba *phba = vport->phba; 9419 struct lpfc_sli_ring *pring; 9420 struct lpfc_iocbq *tmp_iocb, *piocb; 9421 IOCB_t *cmd = NULL; 9422 struct lpfc_dmabuf *pcmd; 9423 uint32_t els_command = 0; 9424 uint32_t timeout; 9425 uint32_t remote_ID = 0xffffffff; 9426 LIST_HEAD(abort_list); 9427 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9428 9429 9430 timeout = (uint32_t)(phba->fc_ratov << 1); 9431 9432 pring = lpfc_phba_elsring(phba); 9433 if (unlikely(!pring)) 9434 return; 9435 9436 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) 9437 return; 9438 9439 spin_lock_irq(&phba->hbalock); 9440 if (phba->sli_rev == LPFC_SLI_REV4) 9441 spin_lock(&pring->ring_lock); 9442 9443 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9444 ulp_command = get_job_cmnd(phba, piocb); 9445 ulp_context = get_job_ulpcontext(phba, piocb); 9446 did = get_job_els_rsp64_did(phba, piocb); 9447 9448 if (phba->sli_rev == LPFC_SLI_REV4) { 9449 iotag = get_wqe_reqtag(piocb); 9450 } else { 9451 cmd = &piocb->iocb; 9452 iotag = cmd->ulpIoTag; 9453 } 9454 9455 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9456 ulp_command == CMD_ABORT_XRI_CX || 9457 ulp_command == CMD_ABORT_XRI_CN || 9458 ulp_command == CMD_CLOSE_XRI_CN) 9459 continue; 9460 9461 if (piocb->vport != vport) 9462 continue; 9463 9464 pcmd = piocb->cmd_dmabuf; 9465 if (pcmd) 9466 els_command = *(uint32_t *) (pcmd->virt); 9467 9468 if (els_command == ELS_CMD_FARP || 9469 els_command == ELS_CMD_FARPR || 9470 els_command == ELS_CMD_FDISC) 9471 continue; 9472 9473 if (piocb->drvrTimeout > 0) { 9474 if (piocb->drvrTimeout >= timeout) 9475 piocb->drvrTimeout -= timeout; 9476 else 9477 piocb->drvrTimeout = 0; 9478 continue; 9479 } 9480 9481 remote_ID = 0xffffffff; 9482 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9483 remote_ID = did; 9484 } else { 9485 struct lpfc_nodelist *ndlp; 9486 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9487 if (ndlp) 9488 remote_ID = ndlp->nlp_DID; 9489 } 9490 list_add_tail(&piocb->dlist, &abort_list); 9491 } 9492 if (phba->sli_rev == LPFC_SLI_REV4) 9493 spin_unlock(&pring->ring_lock); 9494 spin_unlock_irq(&phba->hbalock); 9495 9496 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9497 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9498 "0127 ELS timeout Data: x%x x%x x%x " 9499 "x%x\n", els_command, 9500 remote_ID, ulp_command, iotag); 9501 9502 spin_lock_irq(&phba->hbalock); 9503 list_del_init(&piocb->dlist); 9504 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9505 spin_unlock_irq(&phba->hbalock); 9506 } 9507 9508 /* Make sure HBA is alive */ 9509 lpfc_issue_hb_tmo(phba); 9510 9511 if (!list_empty(&pring->txcmplq)) 9512 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 9513 mod_timer(&vport->els_tmofunc, 9514 jiffies + msecs_to_jiffies(1000 * timeout)); 9515 } 9516 9517 /** 9518 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9519 * @vport: pointer to a host virtual N_Port data structure. 9520 * 9521 * This routine is used to clean up all the outstanding ELS commands on a 9522 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9523 * routine. After that, it walks the ELS transmit queue to remove all the 9524 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9525 * the IOCBs with a non-NULL completion callback function, the callback 9526 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9527 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9528 * callback function, the IOCB will simply be released. Finally, it walks 9529 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9530 * completion queue IOCB that is associated with the @vport and is not 9531 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9532 * part of the discovery state machine) out to HBA by invoking the 9533 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9534 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9535 * the IOCBs are aborted when this function returns. 9536 **/ 9537 void 9538 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9539 { 9540 LIST_HEAD(abort_list); 9541 LIST_HEAD(cancel_list); 9542 struct lpfc_hba *phba = vport->phba; 9543 struct lpfc_sli_ring *pring; 9544 struct lpfc_iocbq *tmp_iocb, *piocb; 9545 u32 ulp_command; 9546 unsigned long iflags = 0; 9547 bool mbx_tmo_err; 9548 9549 lpfc_fabric_abort_vport(vport); 9550 9551 /* 9552 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9553 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9554 * ultimately grabs the ring_lock, the driver must splice the list into 9555 * a working list and release the locks before calling the abort. 9556 */ 9557 spin_lock_irqsave(&phba->hbalock, iflags); 9558 pring = lpfc_phba_elsring(phba); 9559 9560 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9561 if (unlikely(!pring)) { 9562 spin_unlock_irqrestore(&phba->hbalock, iflags); 9563 return; 9564 } 9565 9566 if (phba->sli_rev == LPFC_SLI_REV4) 9567 spin_lock(&pring->ring_lock); 9568 9569 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); 9570 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9571 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9572 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9573 "2243 iotag = 0x%x cmd_flag = 0x%x " 9574 "ulp_command = 0x%x this_vport %x " 9575 "sli_flag = 0x%x\n", 9576 piocb->iotag, piocb->cmd_flag, 9577 get_job_cmnd(phba, piocb), 9578 (piocb->vport == vport), 9579 phba->sli.sli_flag); 9580 9581 if (piocb->vport != vport) 9582 continue; 9583 9584 if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) { 9585 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9586 continue; 9587 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) 9588 continue; 9589 } 9590 9591 /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs, 9592 * or GEN_REQUESTs waiting for a CQE response. 9593 */ 9594 ulp_command = get_job_cmnd(phba, piocb); 9595 if (ulp_command == CMD_ELS_REQUEST64_WQE || 9596 ulp_command == CMD_XMIT_ELS_RSP64_WQE) { 9597 list_add_tail(&piocb->dlist, &abort_list); 9598 9599 /* If the link is down when flushing ELS commands 9600 * the firmware will not complete them till after 9601 * the link comes back up. This may confuse 9602 * discovery for the new link up, so we need to 9603 * change the compl routine to just clean up the iocb 9604 * and avoid any retry logic. 9605 */ 9606 if (phba->link_state == LPFC_LINK_DOWN) 9607 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9608 } else if (ulp_command == CMD_GEN_REQUEST64_CR || 9609 mbx_tmo_err) 9610 list_add_tail(&piocb->dlist, &abort_list); 9611 } 9612 9613 if (phba->sli_rev == LPFC_SLI_REV4) 9614 spin_unlock(&pring->ring_lock); 9615 spin_unlock_irqrestore(&phba->hbalock, iflags); 9616 9617 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9618 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9619 spin_lock_irqsave(&phba->hbalock, iflags); 9620 list_del_init(&piocb->dlist); 9621 if (mbx_tmo_err || !(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 9622 list_move_tail(&piocb->list, &cancel_list); 9623 else 9624 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9625 9626 spin_unlock_irqrestore(&phba->hbalock, iflags); 9627 } 9628 if (!list_empty(&cancel_list)) 9629 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, 9630 IOERR_SLI_ABORTED); 9631 else 9632 /* Make sure HBA is alive */ 9633 lpfc_issue_hb_tmo(phba); 9634 9635 if (!list_empty(&abort_list)) 9636 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9637 "3387 abort list for txq not empty\n"); 9638 INIT_LIST_HEAD(&abort_list); 9639 9640 spin_lock_irqsave(&phba->hbalock, iflags); 9641 if (phba->sli_rev == LPFC_SLI_REV4) 9642 spin_lock(&pring->ring_lock); 9643 9644 /* No need to abort the txq list, 9645 * just queue them up for lpfc_sli_cancel_iocbs 9646 */ 9647 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9648 ulp_command = get_job_cmnd(phba, piocb); 9649 9650 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9651 continue; 9652 9653 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9654 if (ulp_command == CMD_QUE_RING_BUF_CN || 9655 ulp_command == CMD_QUE_RING_BUF64_CN || 9656 ulp_command == CMD_CLOSE_XRI_CN || 9657 ulp_command == CMD_ABORT_XRI_CN || 9658 ulp_command == CMD_ABORT_XRI_CX) 9659 continue; 9660 9661 if (piocb->vport != vport) 9662 continue; 9663 9664 list_del_init(&piocb->list); 9665 list_add_tail(&piocb->list, &abort_list); 9666 } 9667 9668 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9669 if (vport == phba->pport) { 9670 list_for_each_entry_safe(piocb, tmp_iocb, 9671 &phba->fabric_iocb_list, list) { 9672 list_del_init(&piocb->list); 9673 list_add_tail(&piocb->list, &abort_list); 9674 } 9675 } 9676 9677 if (phba->sli_rev == LPFC_SLI_REV4) 9678 spin_unlock(&pring->ring_lock); 9679 spin_unlock_irqrestore(&phba->hbalock, iflags); 9680 9681 /* Cancel all the IOCBs from the completions list */ 9682 lpfc_sli_cancel_iocbs(phba, &abort_list, 9683 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9684 9685 return; 9686 } 9687 9688 /** 9689 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9690 * @phba: pointer to lpfc hba data structure. 9691 * 9692 * This routine is used to clean up all the outstanding ELS commands on a 9693 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9694 * routine. After that, it walks the ELS transmit queue to remove all the 9695 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9696 * the IOCBs with the completion callback function associated, the callback 9697 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9698 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9699 * callback function associated, the IOCB will simply be released. Finally, 9700 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9701 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9702 * management plane IOCBs that are not part of the discovery state machine) 9703 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9704 **/ 9705 void 9706 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9707 { 9708 struct lpfc_vport *vport; 9709 9710 spin_lock_irq(&phba->port_list_lock); 9711 list_for_each_entry(vport, &phba->port_list, listentry) 9712 lpfc_els_flush_cmd(vport); 9713 spin_unlock_irq(&phba->port_list_lock); 9714 9715 return; 9716 } 9717 9718 /** 9719 * lpfc_send_els_failure_event - Posts an ELS command failure event 9720 * @phba: Pointer to hba context object. 9721 * @cmdiocbp: Pointer to command iocb which reported error. 9722 * @rspiocbp: Pointer to response iocb which reported error. 9723 * 9724 * This function sends an event when there is an ELS command 9725 * failure. 9726 **/ 9727 void 9728 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9729 struct lpfc_iocbq *cmdiocbp, 9730 struct lpfc_iocbq *rspiocbp) 9731 { 9732 struct lpfc_vport *vport = cmdiocbp->vport; 9733 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9734 struct lpfc_lsrjt_event lsrjt_event; 9735 struct lpfc_fabric_event_header fabric_event; 9736 struct ls_rjt stat; 9737 struct lpfc_nodelist *ndlp; 9738 uint32_t *pcmd; 9739 u32 ulp_status, ulp_word4; 9740 9741 ndlp = cmdiocbp->ndlp; 9742 if (!ndlp) 9743 return; 9744 9745 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9746 ulp_word4 = get_job_word4(phba, rspiocbp); 9747 9748 if (ulp_status == IOSTAT_LS_RJT) { 9749 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9750 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9751 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9752 sizeof(struct lpfc_name)); 9753 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9754 sizeof(struct lpfc_name)); 9755 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9756 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9757 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9758 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9759 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9760 fc_host_post_vendor_event(shost, 9761 fc_get_event_number(), 9762 sizeof(lsrjt_event), 9763 (char *)&lsrjt_event, 9764 LPFC_NL_VENDOR_ID); 9765 return; 9766 } 9767 if (ulp_status == IOSTAT_NPORT_BSY || 9768 ulp_status == IOSTAT_FABRIC_BSY) { 9769 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9770 if (ulp_status == IOSTAT_NPORT_BSY) 9771 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9772 else 9773 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9774 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9775 sizeof(struct lpfc_name)); 9776 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9777 sizeof(struct lpfc_name)); 9778 fc_host_post_vendor_event(shost, 9779 fc_get_event_number(), 9780 sizeof(fabric_event), 9781 (char *)&fabric_event, 9782 LPFC_NL_VENDOR_ID); 9783 return; 9784 } 9785 9786 } 9787 9788 /** 9789 * lpfc_send_els_event - Posts unsolicited els event 9790 * @vport: Pointer to vport object. 9791 * @ndlp: Pointer FC node object. 9792 * @payload: ELS command code type. 9793 * 9794 * This function posts an event when there is an incoming 9795 * unsolicited ELS command. 9796 **/ 9797 static void 9798 lpfc_send_els_event(struct lpfc_vport *vport, 9799 struct lpfc_nodelist *ndlp, 9800 uint32_t *payload) 9801 { 9802 struct lpfc_els_event_header *els_data = NULL; 9803 struct lpfc_logo_event *logo_data = NULL; 9804 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9805 9806 if (*payload == ELS_CMD_LOGO) { 9807 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9808 if (!logo_data) { 9809 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9810 "0148 Failed to allocate memory " 9811 "for LOGO event\n"); 9812 return; 9813 } 9814 els_data = &logo_data->header; 9815 } else { 9816 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9817 GFP_KERNEL); 9818 if (!els_data) { 9819 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9820 "0149 Failed to allocate memory " 9821 "for ELS event\n"); 9822 return; 9823 } 9824 } 9825 els_data->event_type = FC_REG_ELS_EVENT; 9826 switch (*payload) { 9827 case ELS_CMD_PLOGI: 9828 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9829 break; 9830 case ELS_CMD_PRLO: 9831 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9832 break; 9833 case ELS_CMD_ADISC: 9834 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9835 break; 9836 case ELS_CMD_LOGO: 9837 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9838 /* Copy the WWPN in the LOGO payload */ 9839 memcpy(logo_data->logo_wwpn, &payload[2], 9840 sizeof(struct lpfc_name)); 9841 break; 9842 default: 9843 kfree(els_data); 9844 return; 9845 } 9846 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9847 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9848 if (*payload == ELS_CMD_LOGO) { 9849 fc_host_post_vendor_event(shost, 9850 fc_get_event_number(), 9851 sizeof(struct lpfc_logo_event), 9852 (char *)logo_data, 9853 LPFC_NL_VENDOR_ID); 9854 kfree(logo_data); 9855 } else { 9856 fc_host_post_vendor_event(shost, 9857 fc_get_event_number(), 9858 sizeof(struct lpfc_els_event_header), 9859 (char *)els_data, 9860 LPFC_NL_VENDOR_ID); 9861 kfree(els_data); 9862 } 9863 9864 return; 9865 } 9866 9867 9868 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9869 FC_FPIN_LI_EVT_TYPES_INIT); 9870 9871 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9872 FC_FPIN_DELI_EVT_TYPES_INIT); 9873 9874 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9875 FC_FPIN_CONGN_EVT_TYPES_INIT); 9876 9877 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9878 fc_fpin_congn_severity_types, 9879 FC_FPIN_CONGN_SEVERITY_INIT); 9880 9881 9882 /** 9883 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9884 * @phba: Pointer to phba object. 9885 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9886 * @cnt: count of WWPNs in FPIN payload 9887 * 9888 * This routine is called by LI and PC descriptors. 9889 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9890 */ 9891 static void 9892 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9893 { 9894 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9895 __be64 wwn; 9896 u64 wwpn; 9897 int i, len; 9898 int line = 0; 9899 int wcnt = 0; 9900 bool endit = false; 9901 9902 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9903 for (i = 0; i < cnt; i++) { 9904 /* Are we on the last WWPN */ 9905 if (i == (cnt - 1)) 9906 endit = true; 9907 9908 /* Extract the next WWPN from the payload */ 9909 wwn = *wwnlist++; 9910 wwpn = be64_to_cpu(wwn); 9911 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9912 " %016llx", wwpn); 9913 9914 /* Log a message if we are on the last WWPN 9915 * or if we hit the max allowed per message. 9916 */ 9917 wcnt++; 9918 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9919 buf[len] = 0; 9920 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9921 "4686 %s\n", buf); 9922 9923 /* Check if we reached the last WWPN */ 9924 if (endit) 9925 return; 9926 9927 /* Limit the number of log message displayed per FPIN */ 9928 line++; 9929 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9930 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9931 "4687 %d WWPNs Truncated\n", 9932 cnt - i - 1); 9933 return; 9934 } 9935 9936 /* Start over with next log message */ 9937 wcnt = 0; 9938 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9939 "Additional WWPNs:"); 9940 } 9941 } 9942 } 9943 9944 /** 9945 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9946 * @phba: Pointer to phba object. 9947 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9948 * 9949 * This function processes a Link Integrity FPIN event by logging a message. 9950 **/ 9951 static void 9952 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9953 { 9954 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9955 const char *li_evt_str; 9956 u32 li_evt, cnt; 9957 9958 li_evt = be16_to_cpu(li->event_type); 9959 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9960 cnt = be32_to_cpu(li->pname_count); 9961 9962 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9963 "4680 FPIN Link Integrity %s (x%x) " 9964 "Detecting PN x%016llx Attached PN x%016llx " 9965 "Duration %d mSecs Count %d Port Cnt %d\n", 9966 li_evt_str, li_evt, 9967 be64_to_cpu(li->detecting_wwpn), 9968 be64_to_cpu(li->attached_wwpn), 9969 be32_to_cpu(li->event_threshold), 9970 be32_to_cpu(li->event_count), cnt); 9971 9972 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9973 } 9974 9975 /** 9976 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9977 * @phba: Pointer to hba object. 9978 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9979 * 9980 * This function processes a Delivery FPIN event by logging a message. 9981 **/ 9982 static void 9983 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9984 { 9985 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9986 const char *del_rsn_str; 9987 u32 del_rsn; 9988 __be32 *frame; 9989 9990 del_rsn = be16_to_cpu(del->deli_reason_code); 9991 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9992 9993 /* Skip over desc_tag/desc_len header to payload */ 9994 frame = (__be32 *)(del + 1); 9995 9996 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9997 "4681 FPIN Delivery %s (x%x) " 9998 "Detecting PN x%016llx Attached PN x%016llx " 9999 "DiscHdr0 x%08x " 10000 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 10001 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 10002 del_rsn_str, del_rsn, 10003 be64_to_cpu(del->detecting_wwpn), 10004 be64_to_cpu(del->attached_wwpn), 10005 be32_to_cpu(frame[0]), 10006 be32_to_cpu(frame[1]), 10007 be32_to_cpu(frame[2]), 10008 be32_to_cpu(frame[3]), 10009 be32_to_cpu(frame[4]), 10010 be32_to_cpu(frame[5])); 10011 } 10012 10013 /** 10014 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 10015 * @phba: Pointer to hba object. 10016 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 10017 * 10018 * This function processes a Peer Congestion FPIN event by logging a message. 10019 **/ 10020 static void 10021 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10022 { 10023 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 10024 const char *pc_evt_str; 10025 u32 pc_evt, cnt; 10026 10027 pc_evt = be16_to_cpu(pc->event_type); 10028 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 10029 cnt = be32_to_cpu(pc->pname_count); 10030 10031 /* Capture FPIN frequency */ 10032 phba->cgn_fpin_frequency = be32_to_cpu(pc->event_period); 10033 10034 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 10035 "4684 FPIN Peer Congestion %s (x%x) " 10036 "Duration %d mSecs " 10037 "Detecting PN x%016llx Attached PN x%016llx " 10038 "Impacted Port Cnt %d\n", 10039 pc_evt_str, pc_evt, 10040 be32_to_cpu(pc->event_period), 10041 be64_to_cpu(pc->detecting_wwpn), 10042 be64_to_cpu(pc->attached_wwpn), 10043 cnt); 10044 10045 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 10046 } 10047 10048 /** 10049 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 10050 * @phba: Pointer to hba object. 10051 * @tlv: Pointer to the Congestion Notification Descriptor TLV 10052 * 10053 * This function processes an FPIN Congestion Notifiction. The notification 10054 * could be an Alarm or Warning. This routine feeds that data into driver's 10055 * running congestion algorithm. It also processes the FPIN by 10056 * logging a message. It returns 1 to indicate deliver this message 10057 * to the upper layer or 0 to indicate don't deliver it. 10058 **/ 10059 static int 10060 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10061 { 10062 struct lpfc_cgn_info *cp; 10063 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 10064 const char *cgn_evt_str; 10065 u32 cgn_evt; 10066 const char *cgn_sev_str; 10067 u32 cgn_sev; 10068 uint16_t value; 10069 u32 crc; 10070 bool nm_log = false; 10071 int rc = 1; 10072 10073 cgn_evt = be16_to_cpu(cgn->event_type); 10074 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 10075 cgn_sev = cgn->severity; 10076 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 10077 10078 /* The driver only takes action on a Credit Stall or Oversubscription 10079 * event type to engage the IO algorithm. The driver prints an 10080 * unmaskable message only for Lost Credit and Credit Stall. 10081 * TODO: Still need to have definition of host action on clear, 10082 * lost credit and device specific event types. 10083 */ 10084 switch (cgn_evt) { 10085 case FPIN_CONGN_LOST_CREDIT: 10086 nm_log = true; 10087 break; 10088 case FPIN_CONGN_CREDIT_STALL: 10089 nm_log = true; 10090 fallthrough; 10091 case FPIN_CONGN_OVERSUBSCRIPTION: 10092 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 10093 nm_log = false; 10094 switch (cgn_sev) { 10095 case FPIN_CONGN_SEVERITY_ERROR: 10096 /* Take action here for an Alarm event */ 10097 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10098 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 10099 /* Track of alarm cnt for SYNC_WQE */ 10100 atomic_inc(&phba->cgn_sync_alarm_cnt); 10101 } 10102 /* Track alarm cnt for cgn_info regardless 10103 * of whether CMF is configured for Signals 10104 * or FPINs. 10105 */ 10106 atomic_inc(&phba->cgn_fabric_alarm_cnt); 10107 goto cleanup; 10108 } 10109 break; 10110 case FPIN_CONGN_SEVERITY_WARNING: 10111 /* Take action here for a Warning event */ 10112 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10113 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 10114 /* Track of warning cnt for SYNC_WQE */ 10115 atomic_inc(&phba->cgn_sync_warn_cnt); 10116 } 10117 /* Track warning cnt and freq for cgn_info 10118 * regardless of whether CMF is configured for 10119 * Signals or FPINs. 10120 */ 10121 atomic_inc(&phba->cgn_fabric_warn_cnt); 10122 cleanup: 10123 /* Save frequency in ms */ 10124 phba->cgn_fpin_frequency = 10125 be32_to_cpu(cgn->event_period); 10126 value = phba->cgn_fpin_frequency; 10127 if (phba->cgn_i) { 10128 cp = (struct lpfc_cgn_info *) 10129 phba->cgn_i->virt; 10130 cp->cgn_alarm_freq = 10131 cpu_to_le16(value); 10132 cp->cgn_warn_freq = 10133 cpu_to_le16(value); 10134 crc = lpfc_cgn_calc_crc32 10135 (cp, 10136 LPFC_CGN_INFO_SZ, 10137 LPFC_CGN_CRC32_SEED); 10138 cp->cgn_info_crc = cpu_to_le32(crc); 10139 } 10140 10141 /* Don't deliver to upper layer since 10142 * driver took action on this tlv. 10143 */ 10144 rc = 0; 10145 } 10146 break; 10147 } 10148 break; 10149 } 10150 10151 /* Change the log level to unmaskable for the following event types. */ 10152 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10153 LOG_CGN_MGMT | LOG_ELS, 10154 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10155 "Duration %d mSecs\n", 10156 cgn_sev_str, cgn_evt_str, cgn_evt, 10157 be32_to_cpu(cgn->event_period)); 10158 return rc; 10159 } 10160 10161 void 10162 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10163 { 10164 struct lpfc_hba *phba = vport->phba; 10165 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10166 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10167 const char *dtag_nm; 10168 int desc_cnt = 0, bytes_remain, cnt; 10169 u32 dtag, deliver = 0; 10170 int len; 10171 10172 /* FPINs handled only if we are in the right discovery state */ 10173 if (vport->port_state < LPFC_DISC_AUTH) 10174 return; 10175 10176 /* make sure there is the full fpin header */ 10177 if (fpin_length < sizeof(struct fc_els_fpin)) 10178 return; 10179 10180 /* Sanity check descriptor length. The desc_len value does not 10181 * include space for the ELS command and the desc_len fields. 10182 */ 10183 len = be32_to_cpu(fpin->desc_len); 10184 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10185 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10186 "4671 Bad ELS FPIN length %d: %d\n", 10187 len, fpin_length); 10188 return; 10189 } 10190 10191 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10192 first_tlv = tlv; 10193 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10194 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10195 10196 /* process each descriptor separately */ 10197 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10198 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10199 dtag = be32_to_cpu(tlv->desc_tag); 10200 switch (dtag) { 10201 case ELS_DTAG_LNK_INTEGRITY: 10202 lpfc_els_rcv_fpin_li(phba, tlv); 10203 deliver = 1; 10204 break; 10205 case ELS_DTAG_DELIVERY: 10206 lpfc_els_rcv_fpin_del(phba, tlv); 10207 deliver = 1; 10208 break; 10209 case ELS_DTAG_PEER_CONGEST: 10210 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10211 deliver = 1; 10212 break; 10213 case ELS_DTAG_CONGESTION: 10214 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10215 break; 10216 default: 10217 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10218 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10219 "4678 unknown FPIN descriptor[%d]: " 10220 "tag x%x (%s)\n", 10221 desc_cnt, dtag, dtag_nm); 10222 10223 /* If descriptor is bad, drop the rest of the data */ 10224 return; 10225 } 10226 lpfc_cgn_update_stat(phba, dtag); 10227 cnt = be32_to_cpu(tlv->desc_len); 10228 10229 /* Sanity check descriptor length. The desc_len value does not 10230 * include space for the desc_tag and the desc_len fields. 10231 */ 10232 len -= (cnt + sizeof(struct fc_tlv_desc)); 10233 if (len < 0) { 10234 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10235 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10236 "4672 Bad FPIN descriptor TLV length " 10237 "%d: %d %d %s\n", 10238 cnt, len, fpin_length, dtag_nm); 10239 return; 10240 } 10241 10242 current_tlv = tlv; 10243 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10244 tlv = fc_tlv_next_desc(tlv); 10245 10246 /* Format payload such that the FPIN delivered to the 10247 * upper layer is a single descriptor FPIN. 10248 */ 10249 if (desc_cnt) 10250 memcpy(first_tlv, current_tlv, 10251 (cnt + sizeof(struct fc_els_fpin))); 10252 10253 /* Adjust the length so that it only reflects a 10254 * single descriptor FPIN. 10255 */ 10256 fpin_length = cnt + sizeof(struct fc_els_fpin); 10257 fpin->desc_len = cpu_to_be32(fpin_length); 10258 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10259 10260 /* Send every descriptor individually to the upper layer */ 10261 if (deliver) 10262 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10263 fpin_length, (char *)fpin, 0); 10264 desc_cnt++; 10265 } 10266 } 10267 10268 /** 10269 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10270 * @phba: pointer to lpfc hba data structure. 10271 * @pring: pointer to a SLI ring. 10272 * @vport: pointer to a host virtual N_Port data structure. 10273 * @elsiocb: pointer to lpfc els command iocb data structure. 10274 * 10275 * This routine is used for processing the IOCB associated with a unsolicited 10276 * event. It first determines whether there is an existing ndlp that matches 10277 * the DID from the unsolicited IOCB. If not, it will create a new one with 10278 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10279 * IOCB is then used to invoke the proper routine and to set up proper state 10280 * of the discovery state machine. 10281 **/ 10282 static void 10283 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10284 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10285 { 10286 struct lpfc_nodelist *ndlp; 10287 struct ls_rjt stat; 10288 u32 *payload, payload_len; 10289 u32 cmd = 0, did = 0, newnode, status = 0; 10290 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10291 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10292 LPFC_MBOXQ_t *mbox; 10293 10294 if (!vport || !elsiocb->cmd_dmabuf) 10295 goto dropit; 10296 10297 newnode = 0; 10298 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10299 payload = elsiocb->cmd_dmabuf->virt; 10300 if (phba->sli_rev == LPFC_SLI_REV4) 10301 payload_len = wcqe_cmpl->total_data_placed; 10302 else 10303 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10304 status = get_job_ulpstatus(phba, elsiocb); 10305 cmd = *payload; 10306 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10307 lpfc_sli3_post_buffer(phba, pring, 1); 10308 10309 did = get_job_els_rsp64_did(phba, elsiocb); 10310 if (status) { 10311 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10312 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10313 status, get_job_word4(phba, elsiocb), did); 10314 goto dropit; 10315 } 10316 10317 /* Check to see if link went down during discovery */ 10318 if (lpfc_els_chk_latt(vport)) 10319 goto dropit; 10320 10321 /* Ignore traffic received during vport shutdown. */ 10322 if (test_bit(FC_UNLOADING, &vport->load_flag)) 10323 goto dropit; 10324 10325 /* If NPort discovery is delayed drop incoming ELS */ 10326 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag) && 10327 cmd != ELS_CMD_PLOGI) 10328 goto dropit; 10329 10330 ndlp = lpfc_findnode_did(vport, did); 10331 if (!ndlp) { 10332 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10333 ndlp = lpfc_nlp_init(vport, did); 10334 if (!ndlp) 10335 goto dropit; 10336 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10337 newnode = 1; 10338 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10339 ndlp->nlp_type |= NLP_FABRIC; 10340 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10341 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10342 newnode = 1; 10343 } 10344 10345 phba->fc_stat.elsRcvFrame++; 10346 10347 /* 10348 * Do not process any unsolicited ELS commands 10349 * if the ndlp is in DEV_LOSS 10350 */ 10351 if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { 10352 if (newnode) 10353 lpfc_nlp_put(ndlp); 10354 goto dropit; 10355 } 10356 10357 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10358 if (!elsiocb->ndlp) 10359 goto dropit; 10360 elsiocb->vport = vport; 10361 10362 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10363 cmd &= ELS_CMD_MASK; 10364 } 10365 /* ELS command <elsCmd> received from NPORT <did> */ 10366 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10367 "0112 ELS command x%x received from NPORT x%x " 10368 "refcnt %d Data: x%x x%lx x%x x%x\n", 10369 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10370 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10371 10372 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10373 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10374 (cmd != ELS_CMD_FLOGI) && 10375 !((cmd == ELS_CMD_PLOGI) && test_bit(FC_PT2PT, &vport->fc_flag))) { 10376 rjt_err = LSRJT_LOGICAL_BSY; 10377 rjt_exp = LSEXP_NOTHING_MORE; 10378 goto lsrjt; 10379 } 10380 10381 switch (cmd) { 10382 case ELS_CMD_PLOGI: 10383 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10384 "RCV PLOGI: did:x%x/ste:x%x flg:x%lx", 10385 did, vport->port_state, ndlp->nlp_flag); 10386 10387 phba->fc_stat.elsRcvPLOGI++; 10388 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10389 if (phba->sli_rev == LPFC_SLI_REV4 && 10390 test_bit(FC_PT2PT, &phba->pport->fc_flag)) { 10391 vport->fc_prevDID = vport->fc_myDID; 10392 /* Our DID needs to be updated before registering 10393 * the vfi. This is done in lpfc_rcv_plogi but 10394 * that is called after the reg_vfi. 10395 */ 10396 vport->fc_myDID = 10397 bf_get(els_rsp64_sid, 10398 &elsiocb->wqe.xmit_els_rsp); 10399 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10400 "3312 Remote port assigned DID x%x " 10401 "%x\n", vport->fc_myDID, 10402 vport->fc_prevDID); 10403 } 10404 10405 lpfc_send_els_event(vport, ndlp, payload); 10406 10407 /* If Nport discovery is delayed, reject PLOGIs */ 10408 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) { 10409 rjt_err = LSRJT_UNABLE_TPC; 10410 rjt_exp = LSEXP_NOTHING_MORE; 10411 break; 10412 } 10413 10414 if (vport->port_state < LPFC_DISC_AUTH) { 10415 if (!test_bit(FC_PT2PT, &phba->pport->fc_flag) || 10416 test_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag)) { 10417 rjt_err = LSRJT_UNABLE_TPC; 10418 rjt_exp = LSEXP_NOTHING_MORE; 10419 break; 10420 } 10421 } 10422 10423 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10424 NLP_EVT_RCV_PLOGI); 10425 10426 break; 10427 case ELS_CMD_FLOGI: 10428 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10429 "RCV FLOGI: did:x%x/ste:x%x flg:x%lx", 10430 did, vport->port_state, ndlp->nlp_flag); 10431 10432 phba->fc_stat.elsRcvFLOGI++; 10433 10434 /* If the driver believes fabric discovery is done and is ready, 10435 * bounce the link. There is some descrepancy. 10436 */ 10437 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10438 test_bit(FC_PT2PT, &vport->fc_flag) && 10439 vport->rcv_flogi_cnt >= 1) { 10440 rjt_err = LSRJT_LOGICAL_BSY; 10441 rjt_exp = LSEXP_NOTHING_MORE; 10442 init_link++; 10443 goto lsrjt; 10444 } 10445 10446 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10447 /* retain node if our response is deferred */ 10448 if (phba->defer_flogi_acc.flag) 10449 break; 10450 if (newnode) 10451 lpfc_disc_state_machine(vport, ndlp, NULL, 10452 NLP_EVT_DEVICE_RM); 10453 break; 10454 case ELS_CMD_LOGO: 10455 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10456 "RCV LOGO: did:x%x/ste:x%x flg:x%lx", 10457 did, vport->port_state, ndlp->nlp_flag); 10458 10459 phba->fc_stat.elsRcvLOGO++; 10460 lpfc_send_els_event(vport, ndlp, payload); 10461 if (vport->port_state < LPFC_DISC_AUTH) { 10462 rjt_err = LSRJT_UNABLE_TPC; 10463 rjt_exp = LSEXP_NOTHING_MORE; 10464 break; 10465 } 10466 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10467 if (newnode) 10468 lpfc_disc_state_machine(vport, ndlp, NULL, 10469 NLP_EVT_DEVICE_RM); 10470 break; 10471 case ELS_CMD_PRLO: 10472 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10473 "RCV PRLO: did:x%x/ste:x%x flg:x%lx", 10474 did, vport->port_state, ndlp->nlp_flag); 10475 10476 phba->fc_stat.elsRcvPRLO++; 10477 lpfc_send_els_event(vport, ndlp, payload); 10478 if (vport->port_state < LPFC_DISC_AUTH) { 10479 rjt_err = LSRJT_UNABLE_TPC; 10480 rjt_exp = LSEXP_NOTHING_MORE; 10481 break; 10482 } 10483 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10484 break; 10485 case ELS_CMD_LCB: 10486 phba->fc_stat.elsRcvLCB++; 10487 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10488 break; 10489 case ELS_CMD_RDP: 10490 phba->fc_stat.elsRcvRDP++; 10491 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10492 break; 10493 case ELS_CMD_RSCN: 10494 phba->fc_stat.elsRcvRSCN++; 10495 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10496 if (newnode) 10497 lpfc_disc_state_machine(vport, ndlp, NULL, 10498 NLP_EVT_DEVICE_RM); 10499 break; 10500 case ELS_CMD_ADISC: 10501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10502 "RCV ADISC: did:x%x/ste:x%x flg:x%lx", 10503 did, vport->port_state, ndlp->nlp_flag); 10504 10505 lpfc_send_els_event(vport, ndlp, payload); 10506 phba->fc_stat.elsRcvADISC++; 10507 if (vport->port_state < LPFC_DISC_AUTH) { 10508 rjt_err = LSRJT_UNABLE_TPC; 10509 rjt_exp = LSEXP_NOTHING_MORE; 10510 break; 10511 } 10512 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10513 NLP_EVT_RCV_ADISC); 10514 break; 10515 case ELS_CMD_PDISC: 10516 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10517 "RCV PDISC: did:x%x/ste:x%x flg:x%lx", 10518 did, vport->port_state, ndlp->nlp_flag); 10519 10520 phba->fc_stat.elsRcvPDISC++; 10521 if (vport->port_state < LPFC_DISC_AUTH) { 10522 rjt_err = LSRJT_UNABLE_TPC; 10523 rjt_exp = LSEXP_NOTHING_MORE; 10524 break; 10525 } 10526 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10527 NLP_EVT_RCV_PDISC); 10528 break; 10529 case ELS_CMD_FARPR: 10530 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10531 "RCV FARPR: did:x%x/ste:x%x flg:x%lx", 10532 did, vport->port_state, ndlp->nlp_flag); 10533 10534 phba->fc_stat.elsRcvFARPR++; 10535 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10536 break; 10537 case ELS_CMD_FARP: 10538 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10539 "RCV FARP: did:x%x/ste:x%x flg:x%lx", 10540 did, vport->port_state, ndlp->nlp_flag); 10541 10542 phba->fc_stat.elsRcvFARP++; 10543 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10544 break; 10545 case ELS_CMD_FAN: 10546 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10547 "RCV FAN: did:x%x/ste:x%x flg:x%lx", 10548 did, vport->port_state, ndlp->nlp_flag); 10549 10550 phba->fc_stat.elsRcvFAN++; 10551 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10552 break; 10553 case ELS_CMD_PRLI: 10554 case ELS_CMD_NVMEPRLI: 10555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10556 "RCV PRLI: did:x%x/ste:x%x flg:x%lx", 10557 did, vport->port_state, ndlp->nlp_flag); 10558 10559 phba->fc_stat.elsRcvPRLI++; 10560 if ((vport->port_state < LPFC_DISC_AUTH) && 10561 test_bit(FC_FABRIC, &vport->fc_flag)) { 10562 rjt_err = LSRJT_UNABLE_TPC; 10563 rjt_exp = LSEXP_NOTHING_MORE; 10564 break; 10565 } 10566 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10567 break; 10568 case ELS_CMD_LIRR: 10569 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10570 "RCV LIRR: did:x%x/ste:x%x flg:x%lx", 10571 did, vport->port_state, ndlp->nlp_flag); 10572 10573 phba->fc_stat.elsRcvLIRR++; 10574 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10575 if (newnode) 10576 lpfc_disc_state_machine(vport, ndlp, NULL, 10577 NLP_EVT_DEVICE_RM); 10578 break; 10579 case ELS_CMD_RLS: 10580 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10581 "RCV RLS: did:x%x/ste:x%x flg:x%lx", 10582 did, vport->port_state, ndlp->nlp_flag); 10583 10584 phba->fc_stat.elsRcvRLS++; 10585 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10586 if (newnode) 10587 lpfc_disc_state_machine(vport, ndlp, NULL, 10588 NLP_EVT_DEVICE_RM); 10589 break; 10590 case ELS_CMD_RPL: 10591 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10592 "RCV RPL: did:x%x/ste:x%x flg:x%lx", 10593 did, vport->port_state, ndlp->nlp_flag); 10594 10595 phba->fc_stat.elsRcvRPL++; 10596 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10597 if (newnode) 10598 lpfc_disc_state_machine(vport, ndlp, NULL, 10599 NLP_EVT_DEVICE_RM); 10600 break; 10601 case ELS_CMD_RNID: 10602 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10603 "RCV RNID: did:x%x/ste:x%x flg:x%lx", 10604 did, vport->port_state, ndlp->nlp_flag); 10605 10606 phba->fc_stat.elsRcvRNID++; 10607 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10608 if (newnode) 10609 lpfc_disc_state_machine(vport, ndlp, NULL, 10610 NLP_EVT_DEVICE_RM); 10611 break; 10612 case ELS_CMD_RTV: 10613 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10614 "RCV RTV: did:x%x/ste:x%x flg:x%lx", 10615 did, vport->port_state, ndlp->nlp_flag); 10616 phba->fc_stat.elsRcvRTV++; 10617 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10618 if (newnode) 10619 lpfc_disc_state_machine(vport, ndlp, NULL, 10620 NLP_EVT_DEVICE_RM); 10621 break; 10622 case ELS_CMD_RRQ: 10623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10624 "RCV RRQ: did:x%x/ste:x%x flg:x%lx", 10625 did, vport->port_state, ndlp->nlp_flag); 10626 10627 phba->fc_stat.elsRcvRRQ++; 10628 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10629 if (newnode) 10630 lpfc_disc_state_machine(vport, ndlp, NULL, 10631 NLP_EVT_DEVICE_RM); 10632 break; 10633 case ELS_CMD_ECHO: 10634 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10635 "RCV ECHO: did:x%x/ste:x%x flg:x%lx", 10636 did, vport->port_state, ndlp->nlp_flag); 10637 10638 phba->fc_stat.elsRcvECHO++; 10639 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10640 if (newnode) 10641 lpfc_disc_state_machine(vport, ndlp, NULL, 10642 NLP_EVT_DEVICE_RM); 10643 break; 10644 case ELS_CMD_REC: 10645 /* receive this due to exchange closed */ 10646 rjt_err = LSRJT_UNABLE_TPC; 10647 rjt_exp = LSEXP_INVALID_OX_RX; 10648 break; 10649 case ELS_CMD_FPIN: 10650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10651 "RCV FPIN: did:x%x/ste:x%x " 10652 "flg:x%lx", 10653 did, vport->port_state, ndlp->nlp_flag); 10654 10655 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10656 payload_len); 10657 10658 /* There are no replies, so no rjt codes */ 10659 break; 10660 case ELS_CMD_EDC: 10661 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10662 break; 10663 case ELS_CMD_RDF: 10664 phba->fc_stat.elsRcvRDF++; 10665 /* Accept RDF only from fabric controller */ 10666 if (did != Fabric_Cntl_DID) { 10667 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10668 "1115 Received RDF from invalid DID " 10669 "x%x\n", did); 10670 rjt_err = LSRJT_PROTOCOL_ERR; 10671 rjt_exp = LSEXP_NOTHING_MORE; 10672 goto lsrjt; 10673 } 10674 10675 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10676 break; 10677 default: 10678 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10679 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10680 cmd, did, vport->port_state); 10681 10682 /* Unsupported ELS command, reject */ 10683 rjt_err = LSRJT_CMD_UNSUPPORTED; 10684 rjt_exp = LSEXP_NOTHING_MORE; 10685 10686 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10687 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 10688 "0115 Unknown ELS command x%x " 10689 "received from NPORT x%x\n", cmd, did); 10690 if (newnode) 10691 lpfc_disc_state_machine(vport, ndlp, NULL, 10692 NLP_EVT_DEVICE_RM); 10693 break; 10694 } 10695 10696 lsrjt: 10697 /* check if need to LS_RJT received ELS cmd */ 10698 if (rjt_err) { 10699 memset(&stat, 0, sizeof(stat)); 10700 stat.un.b.lsRjtRsnCode = rjt_err; 10701 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10702 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10703 NULL); 10704 /* Remove the reference from above for new nodes. */ 10705 if (newnode) 10706 lpfc_disc_state_machine(vport, ndlp, NULL, 10707 NLP_EVT_DEVICE_RM); 10708 } 10709 10710 /* Release the reference on this elsiocb, not the ndlp. */ 10711 lpfc_nlp_put(elsiocb->ndlp); 10712 elsiocb->ndlp = NULL; 10713 10714 /* Special case. Driver received an unsolicited command that 10715 * unsupportable given the driver's current state. Reset the 10716 * link and start over. 10717 */ 10718 if (init_link) { 10719 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10720 if (!mbox) 10721 return; 10722 lpfc_linkdown(phba); 10723 lpfc_init_link(phba, mbox, 10724 phba->cfg_topology, 10725 phba->cfg_link_speed); 10726 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10727 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10728 mbox->vport = vport; 10729 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10730 MBX_NOT_FINISHED) 10731 mempool_free(mbox, phba->mbox_mem_pool); 10732 } 10733 10734 return; 10735 10736 dropit: 10737 if (vport && !test_bit(FC_UNLOADING, &vport->load_flag)) 10738 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10739 "0111 Dropping received ELS cmd " 10740 "Data: x%x x%x x%x x%x\n", 10741 cmd, status, get_job_word4(phba, elsiocb), did); 10742 10743 phba->fc_stat.elsRcvDrop++; 10744 } 10745 10746 /** 10747 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10748 * @phba: pointer to lpfc hba data structure. 10749 * @pring: pointer to a SLI ring. 10750 * @elsiocb: pointer to lpfc els iocb data structure. 10751 * 10752 * This routine is used to process an unsolicited event received from a SLI 10753 * (Service Level Interface) ring. The actual processing of the data buffer 10754 * associated with the unsolicited event is done by invoking the routine 10755 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10756 * SLI ring on which the unsolicited event was received. 10757 **/ 10758 void 10759 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10760 struct lpfc_iocbq *elsiocb) 10761 { 10762 struct lpfc_vport *vport = elsiocb->vport; 10763 u32 ulp_command, status, parameter, bde_count = 0; 10764 IOCB_t *icmd; 10765 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10766 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10767 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10768 dma_addr_t paddr; 10769 10770 elsiocb->cmd_dmabuf = NULL; 10771 elsiocb->rsp_dmabuf = NULL; 10772 elsiocb->bpl_dmabuf = NULL; 10773 10774 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10775 ulp_command = get_job_cmnd(phba, elsiocb); 10776 status = get_job_ulpstatus(phba, elsiocb); 10777 parameter = get_job_word4(phba, elsiocb); 10778 if (phba->sli_rev == LPFC_SLI_REV4) 10779 bde_count = wcqe_cmpl->word3; 10780 else 10781 bde_count = elsiocb->iocb.ulpBdeCount; 10782 10783 if (status == IOSTAT_NEED_BUFFER) { 10784 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10785 } else if (status == IOSTAT_LOCAL_REJECT && 10786 (parameter & IOERR_PARAM_MASK) == 10787 IOERR_RCV_BUFFER_WAITING) { 10788 phba->fc_stat.NoRcvBuf++; 10789 /* Not enough posted buffers; Try posting more buffers */ 10790 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10791 lpfc_sli3_post_buffer(phba, pring, 0); 10792 return; 10793 } 10794 10795 if (phba->sli_rev == LPFC_SLI_REV3) { 10796 icmd = &elsiocb->iocb; 10797 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10798 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10799 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10800 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10801 vport = phba->pport; 10802 else 10803 vport = lpfc_find_vport_by_vpid(phba, 10804 icmd->unsli3.rcvsli3.vpi); 10805 } 10806 } 10807 10808 /* If there are no BDEs associated 10809 * with this IOCB, there is nothing to do. 10810 */ 10811 if (bde_count == 0) 10812 return; 10813 10814 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10815 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10816 elsiocb->cmd_dmabuf = bdeBuf1; 10817 if (bde_count == 2) 10818 elsiocb->bpl_dmabuf = bdeBuf2; 10819 } else { 10820 icmd = &elsiocb->iocb; 10821 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10822 icmd->un.cont64[0].addrLow); 10823 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10824 paddr); 10825 if (bde_count == 2) { 10826 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10827 icmd->un.cont64[1].addrLow); 10828 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10829 pring, 10830 paddr); 10831 } 10832 } 10833 10834 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10835 /* 10836 * The different unsolicited event handlers would tell us 10837 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10838 */ 10839 if (elsiocb->cmd_dmabuf) { 10840 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10841 elsiocb->cmd_dmabuf = NULL; 10842 } 10843 10844 if (elsiocb->bpl_dmabuf) { 10845 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10846 elsiocb->bpl_dmabuf = NULL; 10847 } 10848 10849 } 10850 10851 static void 10852 lpfc_start_fdmi(struct lpfc_vport *vport) 10853 { 10854 struct lpfc_nodelist *ndlp; 10855 10856 /* If this is the first time, allocate an ndlp and initialize 10857 * it. Otherwise, make sure the node is enabled and then do the 10858 * login. 10859 */ 10860 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10861 if (!ndlp) { 10862 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10863 if (ndlp) { 10864 ndlp->nlp_type |= NLP_FABRIC; 10865 } else { 10866 return; 10867 } 10868 } 10869 10870 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10871 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10872 } 10873 10874 /** 10875 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10876 * @phba: pointer to lpfc hba data structure. 10877 * @vport: pointer to a virtual N_Port data structure. 10878 * 10879 * This routine issues a Port Login (PLOGI) to the Name Server with 10880 * State Change Request (SCR) for a @vport. This routine will create an 10881 * ndlp for the Name Server associated to the @vport if such node does 10882 * not already exist. The PLOGI to Name Server is issued by invoking the 10883 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10884 * (FDMI) is configured to the @vport, a FDMI node will be created and 10885 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10886 **/ 10887 void 10888 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10889 { 10890 struct lpfc_nodelist *ndlp; 10891 10892 /* 10893 * If lpfc_delay_discovery parameter is set and the clean address 10894 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10895 * discovery. 10896 */ 10897 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) { 10898 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10899 "3334 Delay fc port discovery for %d secs\n", 10900 phba->fc_ratov); 10901 mod_timer(&vport->delayed_disc_tmo, 10902 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10903 return; 10904 } 10905 10906 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10907 if (!ndlp) { 10908 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10909 if (!ndlp) { 10910 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10911 lpfc_disc_start(vport); 10912 return; 10913 } 10914 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10915 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10916 "0251 NameServer login: no memory\n"); 10917 return; 10918 } 10919 } 10920 10921 ndlp->nlp_type |= NLP_FABRIC; 10922 10923 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10924 10925 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10926 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10927 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10928 "0252 Cannot issue NameServer login\n"); 10929 return; 10930 } 10931 10932 if ((phba->cfg_enable_SmartSAN || 10933 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) && 10934 test_bit(FC_ALLOW_FDMI, &vport->load_flag)) 10935 lpfc_start_fdmi(vport); 10936 } 10937 10938 /** 10939 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10940 * @phba: pointer to lpfc hba data structure. 10941 * @pmb: pointer to the driver internal queue element for mailbox command. 10942 * 10943 * This routine is the completion callback function to register new vport 10944 * mailbox command. If the new vport mailbox command completes successfully, 10945 * the fabric registration login shall be performed on physical port (the 10946 * new vport created is actually a physical port, with VPI 0) or the port 10947 * login to Name Server for State Change Request (SCR) will be performed 10948 * on virtual port (real virtual port, with VPI greater than 0). 10949 **/ 10950 static void 10951 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10952 { 10953 struct lpfc_vport *vport = pmb->vport; 10954 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10955 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 10956 MAILBOX_t *mb = &pmb->u.mb; 10957 int rc; 10958 10959 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 10960 10961 if (mb->mbxStatus) { 10962 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10963 "0915 Register VPI failed : Status: x%x" 10964 " upd bit: x%x \n", mb->mbxStatus, 10965 mb->un.varRegVpi.upd); 10966 if (phba->sli_rev == LPFC_SLI_REV4 && 10967 mb->un.varRegVpi.upd) 10968 goto mbox_err_exit ; 10969 10970 switch (mb->mbxStatus) { 10971 case 0x11: /* unsupported feature */ 10972 case 0x9603: /* max_vpi exceeded */ 10973 case 0x9602: /* Link event since CLEAR_LA */ 10974 /* giving up on vport registration */ 10975 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10976 clear_bit(FC_FABRIC, &vport->fc_flag); 10977 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 10978 lpfc_can_disctmo(vport); 10979 break; 10980 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10981 case 0x20: 10982 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 10983 lpfc_init_vpi(phba, pmb, vport->vpi); 10984 pmb->vport = vport; 10985 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10986 rc = lpfc_sli_issue_mbox(phba, pmb, 10987 MBX_NOWAIT); 10988 if (rc == MBX_NOT_FINISHED) { 10989 lpfc_printf_vlog(vport, KERN_ERR, 10990 LOG_TRACE_EVENT, 10991 "2732 Failed to issue INIT_VPI" 10992 " mailbox command\n"); 10993 } else { 10994 lpfc_nlp_put(ndlp); 10995 return; 10996 } 10997 fallthrough; 10998 default: 10999 /* Try to recover from this error */ 11000 if (phba->sli_rev == LPFC_SLI_REV4) 11001 lpfc_sli4_unreg_all_rpis(vport); 11002 lpfc_mbx_unreg_vpi(vport); 11003 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11004 if (mb->mbxStatus == MBX_NOT_FINISHED) 11005 break; 11006 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 11007 !test_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag)) { 11008 if (phba->sli_rev == LPFC_SLI_REV4) 11009 lpfc_issue_init_vfi(vport); 11010 else 11011 lpfc_initial_flogi(vport); 11012 } else { 11013 lpfc_initial_fdisc(vport); 11014 } 11015 break; 11016 } 11017 } else { 11018 spin_lock_irq(shost->host_lock); 11019 vport->vpi_state |= LPFC_VPI_REGISTERED; 11020 spin_unlock_irq(shost->host_lock); 11021 if (vport == phba->pport) { 11022 if (phba->sli_rev < LPFC_SLI_REV4) 11023 lpfc_issue_fabric_reglogin(vport); 11024 else { 11025 /* 11026 * If the physical port is instantiated using 11027 * FDISC, do not start vport discovery. 11028 */ 11029 if (vport->port_state != LPFC_FDISC) 11030 lpfc_start_fdiscs(phba); 11031 lpfc_do_scr_ns_plogi(phba, vport); 11032 } 11033 } else { 11034 lpfc_do_scr_ns_plogi(phba, vport); 11035 } 11036 } 11037 mbox_err_exit: 11038 /* Now, we decrement the ndlp reference count held for this 11039 * callback function 11040 */ 11041 lpfc_nlp_put(ndlp); 11042 11043 mempool_free(pmb, phba->mbox_mem_pool); 11044 11045 /* reinitialize the VMID datastructure before returning. 11046 * this is specifically for vport 11047 */ 11048 if (lpfc_is_vmid_enabled(phba)) 11049 lpfc_reinit_vmid(vport); 11050 vport->vmid_flag = vport->phba->pport->vmid_flag; 11051 11052 return; 11053 } 11054 11055 /** 11056 * lpfc_register_new_vport - Register a new vport with a HBA 11057 * @phba: pointer to lpfc hba data structure. 11058 * @vport: pointer to a host virtual N_Port data structure. 11059 * @ndlp: pointer to a node-list data structure. 11060 * 11061 * This routine registers the @vport as a new virtual port with a HBA. 11062 * It is done through a registering vpi mailbox command. 11063 **/ 11064 void 11065 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 11066 struct lpfc_nodelist *ndlp) 11067 { 11068 LPFC_MBOXQ_t *mbox; 11069 11070 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11071 if (mbox) { 11072 lpfc_reg_vpi(vport, mbox); 11073 mbox->vport = vport; 11074 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 11075 if (!mbox->ctx_ndlp) { 11076 mempool_free(mbox, phba->mbox_mem_pool); 11077 goto mbox_err_exit; 11078 } 11079 11080 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 11081 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 11082 == MBX_NOT_FINISHED) { 11083 /* mailbox command not success, decrement ndlp 11084 * reference count for this command 11085 */ 11086 lpfc_nlp_put(ndlp); 11087 mempool_free(mbox, phba->mbox_mem_pool); 11088 11089 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11090 "0253 Register VPI: Can't send mbox\n"); 11091 goto mbox_err_exit; 11092 } 11093 } else { 11094 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11095 "0254 Register VPI: no memory\n"); 11096 goto mbox_err_exit; 11097 } 11098 return; 11099 11100 mbox_err_exit: 11101 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11102 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11103 return; 11104 } 11105 11106 /** 11107 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 11108 * @phba: pointer to lpfc hba data structure. 11109 * 11110 * This routine cancels the retry delay timers to all the vports. 11111 **/ 11112 void 11113 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 11114 { 11115 struct lpfc_vport **vports; 11116 struct lpfc_nodelist *ndlp; 11117 uint32_t link_state; 11118 int i; 11119 11120 /* Treat this failure as linkdown for all vports */ 11121 link_state = phba->link_state; 11122 lpfc_linkdown(phba); 11123 phba->link_state = link_state; 11124 11125 vports = lpfc_create_vport_work_array(phba); 11126 11127 if (vports) { 11128 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11129 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 11130 if (ndlp) 11131 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 11132 lpfc_els_flush_cmd(vports[i]); 11133 } 11134 lpfc_destroy_vport_work_array(phba, vports); 11135 } 11136 } 11137 11138 /** 11139 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11140 * @phba: pointer to lpfc hba data structure. 11141 * 11142 * This routine abort all pending discovery commands and 11143 * start a timer to retry FLOGI for the physical port 11144 * discovery. 11145 **/ 11146 void 11147 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11148 { 11149 struct lpfc_nodelist *ndlp; 11150 11151 /* Cancel the all vports retry delay retry timers */ 11152 lpfc_cancel_all_vport_retry_delay_timer(phba); 11153 11154 /* If fabric require FLOGI, then re-instantiate physical login */ 11155 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11156 if (!ndlp) 11157 return; 11158 11159 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11160 set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); 11161 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11162 phba->pport->port_state = LPFC_FLOGI; 11163 return; 11164 } 11165 11166 /** 11167 * lpfc_fabric_login_reqd - Check if FLOGI required. 11168 * @phba: pointer to lpfc hba data structure. 11169 * @cmdiocb: pointer to FDISC command iocb. 11170 * @rspiocb: pointer to FDISC response iocb. 11171 * 11172 * This routine checks if a FLOGI is reguired for FDISC 11173 * to succeed. 11174 **/ 11175 static int 11176 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11177 struct lpfc_iocbq *cmdiocb, 11178 struct lpfc_iocbq *rspiocb) 11179 { 11180 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11181 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11182 11183 if (ulp_status != IOSTAT_FABRIC_RJT || 11184 ulp_word4 != RJT_LOGIN_REQUIRED) 11185 return 0; 11186 else 11187 return 1; 11188 } 11189 11190 /** 11191 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11192 * @phba: pointer to lpfc hba data structure. 11193 * @cmdiocb: pointer to lpfc command iocb data structure. 11194 * @rspiocb: pointer to lpfc response iocb data structure. 11195 * 11196 * This routine is the completion callback function to a Fabric Discover 11197 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11198 * single threaded, each FDISC completion callback function will reset 11199 * the discovery timer for all vports such that the timers will not get 11200 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11201 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11202 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11203 * assigned to the vport has been changed with the completion of the FDISC 11204 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11205 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11206 * routine is invoked to register new vport with the HBA. Otherwise, the 11207 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11208 * Server for State Change Request (SCR). 11209 **/ 11210 static void 11211 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11212 struct lpfc_iocbq *rspiocb) 11213 { 11214 struct lpfc_vport *vport = cmdiocb->vport; 11215 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11216 struct lpfc_nodelist *np; 11217 struct lpfc_nodelist *next_np; 11218 struct lpfc_iocbq *piocb; 11219 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11220 struct serv_parm *sp; 11221 uint8_t fabric_param_changed; 11222 u32 ulp_status, ulp_word4; 11223 11224 ulp_status = get_job_ulpstatus(phba, rspiocb); 11225 ulp_word4 = get_job_word4(phba, rspiocb); 11226 11227 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11228 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11229 ulp_status, ulp_word4, 11230 vport->fc_prevDID); 11231 /* Since all FDISCs are being single threaded, we 11232 * must reset the discovery timer for ALL vports 11233 * waiting to send FDISC when one completes. 11234 */ 11235 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11236 lpfc_set_disctmo(piocb->vport); 11237 } 11238 11239 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11240 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11241 ulp_status, ulp_word4, vport->fc_prevDID); 11242 11243 if (ulp_status) { 11244 11245 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11246 lpfc_retry_pport_discovery(phba); 11247 goto out; 11248 } 11249 11250 /* Check for retry */ 11251 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11252 goto out; 11253 /* Warn FDISC status */ 11254 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 11255 "0126 FDISC cmpl status: x%x/x%x)\n", 11256 ulp_status, ulp_word4); 11257 goto fdisc_failed; 11258 } 11259 11260 lpfc_check_nlp_post_devloss(vport, ndlp); 11261 11262 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag); 11263 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag); 11264 set_bit(FC_FABRIC, &vport->fc_flag); 11265 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11266 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 11267 11268 vport->fc_myDID = ulp_word4 & Mask_DID; 11269 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11270 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11271 if (!prsp) 11272 goto out; 11273 if (!lpfc_is_els_acc_rsp(prsp)) 11274 goto out; 11275 11276 sp = prsp->virt + sizeof(uint32_t); 11277 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11278 memcpy(&vport->fabric_portname, &sp->portName, 11279 sizeof(struct lpfc_name)); 11280 memcpy(&vport->fabric_nodename, &sp->nodeName, 11281 sizeof(struct lpfc_name)); 11282 if (fabric_param_changed && 11283 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 11284 /* If our NportID changed, we need to ensure all 11285 * remaining NPORTs get unreg_login'ed so we can 11286 * issue unreg_vpi. 11287 */ 11288 list_for_each_entry_safe(np, next_np, 11289 &vport->fc_nodes, nlp_listp) { 11290 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11291 !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) 11292 continue; 11293 clear_bit(NLP_NPR_ADISC, &np->nlp_flag); 11294 lpfc_unreg_rpi(vport, np); 11295 } 11296 lpfc_cleanup_pending_mbox(vport); 11297 11298 if (phba->sli_rev == LPFC_SLI_REV4) 11299 lpfc_sli4_unreg_all_rpis(vport); 11300 11301 lpfc_mbx_unreg_vpi(vport); 11302 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11303 if (phba->sli_rev == LPFC_SLI_REV4) 11304 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 11305 else 11306 set_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag); 11307 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11308 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 11309 /* 11310 * Driver needs to re-reg VPI in order for f/w 11311 * to update the MAC address. 11312 */ 11313 lpfc_register_new_vport(phba, vport, ndlp); 11314 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11315 goto out; 11316 } 11317 11318 if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) 11319 lpfc_issue_init_vpi(vport); 11320 else if (test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) 11321 lpfc_register_new_vport(phba, vport, ndlp); 11322 else 11323 lpfc_do_scr_ns_plogi(phba, vport); 11324 11325 /* The FDISC completed successfully. Move the fabric ndlp to 11326 * UNMAPPED state and register with the transport. 11327 */ 11328 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11329 goto out; 11330 11331 fdisc_failed: 11332 if (vport->fc_vport && 11333 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11334 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11335 /* Cancel discovery timer */ 11336 lpfc_can_disctmo(vport); 11337 out: 11338 lpfc_els_free_iocb(phba, cmdiocb); 11339 lpfc_nlp_put(ndlp); 11340 } 11341 11342 /** 11343 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11344 * @vport: pointer to a virtual N_Port data structure. 11345 * @ndlp: pointer to a node-list data structure. 11346 * @retry: number of retries to the command IOCB. 11347 * 11348 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11349 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11350 * routine to issue the IOCB, which makes sure only one outstanding fabric 11351 * IOCB will be sent off HBA at any given time. 11352 * 11353 * Note that the ndlp reference count will be incremented by 1 for holding the 11354 * ndlp and the reference to ndlp will be stored into the ndlp field of 11355 * the IOCB for the completion callback function to the FDISC ELS command. 11356 * 11357 * Return code 11358 * 0 - Successfully issued fdisc iocb command 11359 * 1 - Failed to issue fdisc iocb command 11360 **/ 11361 static int 11362 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11363 uint8_t retry) 11364 { 11365 struct lpfc_hba *phba = vport->phba; 11366 IOCB_t *icmd; 11367 union lpfc_wqe128 *wqe = NULL; 11368 struct lpfc_iocbq *elsiocb; 11369 struct serv_parm *sp; 11370 uint8_t *pcmd; 11371 uint16_t cmdsize; 11372 int did = ndlp->nlp_DID; 11373 int rc; 11374 11375 vport->port_state = LPFC_FDISC; 11376 vport->fc_myDID = 0; 11377 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11378 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11379 ELS_CMD_FDISC); 11380 if (!elsiocb) { 11381 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11382 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11383 "0255 Issue FDISC: no IOCB\n"); 11384 return 1; 11385 } 11386 11387 if (phba->sli_rev == LPFC_SLI_REV4) { 11388 wqe = &elsiocb->wqe; 11389 bf_set(els_req64_sid, &wqe->els_req, 0); 11390 bf_set(els_req64_sp, &wqe->els_req, 1); 11391 } else { 11392 icmd = &elsiocb->iocb; 11393 icmd->un.elsreq64.myID = 0; 11394 icmd->un.elsreq64.fl = 1; 11395 icmd->ulpCt_h = 1; 11396 icmd->ulpCt_l = 0; 11397 } 11398 11399 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11400 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11401 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11402 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11403 sp = (struct serv_parm *) pcmd; 11404 /* Setup CSPs accordingly for Fabric */ 11405 sp->cmn.e_d_tov = 0; 11406 sp->cmn.w2.r_a_tov = 0; 11407 sp->cmn.virtual_fabric_support = 0; 11408 sp->cls1.classValid = 0; 11409 sp->cls2.seqDelivery = 1; 11410 sp->cls3.seqDelivery = 1; 11411 11412 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11413 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11414 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11415 pcmd += sizeof(uint32_t); /* Port Name */ 11416 memcpy(pcmd, &vport->fc_portname, 8); 11417 pcmd += sizeof(uint32_t); /* Node Name */ 11418 pcmd += sizeof(uint32_t); /* Node Name */ 11419 memcpy(pcmd, &vport->fc_nodename, 8); 11420 sp->cmn.valid_vendor_ver_level = 0; 11421 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11422 lpfc_set_disctmo(vport); 11423 11424 phba->fc_stat.elsXmitFDISC++; 11425 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11426 11427 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11428 "Issue FDISC: did:x%x", 11429 did, 0, 0); 11430 11431 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11432 if (!elsiocb->ndlp) 11433 goto err_out; 11434 11435 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11436 if (rc == IOCB_ERROR) { 11437 lpfc_nlp_put(ndlp); 11438 goto err_out; 11439 } 11440 11441 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11442 return 0; 11443 11444 err_out: 11445 lpfc_els_free_iocb(phba, elsiocb); 11446 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11447 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11448 "0256 Issue FDISC: Cannot send IOCB\n"); 11449 return 1; 11450 } 11451 11452 /** 11453 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11454 * @phba: pointer to lpfc hba data structure. 11455 * @cmdiocb: pointer to lpfc command iocb data structure. 11456 * @rspiocb: pointer to lpfc response iocb data structure. 11457 * 11458 * This routine is the completion callback function to the issuing of a LOGO 11459 * ELS command off a vport. It frees the command IOCB and then decrement the 11460 * reference count held on ndlp for this completion function, indicating that 11461 * the reference to the ndlp is no long needed. Note that the 11462 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11463 * callback function and an additional explicit ndlp reference decrementation 11464 * will trigger the actual release of the ndlp. 11465 **/ 11466 static void 11467 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11468 struct lpfc_iocbq *rspiocb) 11469 { 11470 struct lpfc_vport *vport = cmdiocb->vport; 11471 IOCB_t *irsp; 11472 struct lpfc_nodelist *ndlp; 11473 u32 ulp_status, ulp_word4, did, tmo; 11474 11475 ndlp = cmdiocb->ndlp; 11476 11477 ulp_status = get_job_ulpstatus(phba, rspiocb); 11478 ulp_word4 = get_job_word4(phba, rspiocb); 11479 11480 if (phba->sli_rev == LPFC_SLI_REV4) { 11481 did = get_job_els_rsp64_did(phba, cmdiocb); 11482 tmo = get_wqe_tmo(cmdiocb); 11483 } else { 11484 irsp = &rspiocb->iocb; 11485 did = get_job_els_rsp64_did(phba, rspiocb); 11486 tmo = irsp->ulpTimeout; 11487 } 11488 11489 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11490 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11491 ulp_status, ulp_word4, did); 11492 11493 /* NPIV LOGO completes to NPort <nlp_DID> */ 11494 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11495 "2928 NPIV LOGO completes to NPort x%x " 11496 "Data: x%x x%x x%x x%x x%x x%lx x%x\n", 11497 ndlp->nlp_DID, ulp_status, ulp_word4, 11498 tmo, vport->num_disc_nodes, 11499 kref_read(&ndlp->kref), ndlp->nlp_flag, 11500 ndlp->fc4_xpt_flags); 11501 11502 if (ulp_status == IOSTAT_SUCCESS) { 11503 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 11504 clear_bit(FC_FABRIC, &vport->fc_flag); 11505 lpfc_can_disctmo(vport); 11506 } 11507 11508 if (test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) { 11509 /* Wake up lpfc_vport_delete if waiting...*/ 11510 if (ndlp->logo_waitq) 11511 wake_up(ndlp->logo_waitq); 11512 clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); 11513 clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); 11514 clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags); 11515 } 11516 11517 /* Safe to release resources now. */ 11518 lpfc_els_free_iocb(phba, cmdiocb); 11519 lpfc_nlp_put(ndlp); 11520 } 11521 11522 /** 11523 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11524 * @vport: pointer to a virtual N_Port data structure. 11525 * @ndlp: pointer to a node-list data structure. 11526 * 11527 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11528 * 11529 * Note that the ndlp reference count will be incremented by 1 for holding the 11530 * ndlp and the reference to ndlp will be stored into the ndlp field of 11531 * the IOCB for the completion callback function to the LOGO ELS command. 11532 * 11533 * Return codes 11534 * 0 - Successfully issued logo off the @vport 11535 * 1 - Failed to issue logo off the @vport 11536 **/ 11537 int 11538 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11539 { 11540 int rc = 0; 11541 struct lpfc_hba *phba = vport->phba; 11542 struct lpfc_iocbq *elsiocb; 11543 uint8_t *pcmd; 11544 uint16_t cmdsize; 11545 11546 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11547 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11548 ELS_CMD_LOGO); 11549 if (!elsiocb) 11550 return 1; 11551 11552 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11553 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11554 pcmd += sizeof(uint32_t); 11555 11556 /* Fill in LOGO payload */ 11557 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11558 pcmd += sizeof(uint32_t); 11559 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11560 11561 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11562 "Issue LOGO npiv did:x%x flg:x%lx", 11563 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11564 11565 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11566 set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); 11567 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11568 if (!elsiocb->ndlp) { 11569 lpfc_els_free_iocb(phba, elsiocb); 11570 goto err; 11571 } 11572 11573 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11574 if (rc == IOCB_ERROR) { 11575 lpfc_els_free_iocb(phba, elsiocb); 11576 lpfc_nlp_put(ndlp); 11577 goto err; 11578 } 11579 return 0; 11580 11581 err: 11582 clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); 11583 return 1; 11584 } 11585 11586 /** 11587 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11588 * @t: timer context used to obtain the lpfc hba. 11589 * 11590 * This routine is invoked by the fabric iocb block timer after 11591 * timeout. It posts the fabric iocb block timeout event by setting the 11592 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11593 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11594 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11595 * posted event WORKER_FABRIC_BLOCK_TMO. 11596 **/ 11597 void 11598 lpfc_fabric_block_timeout(struct timer_list *t) 11599 { 11600 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11601 unsigned long iflags; 11602 uint32_t tmo_posted; 11603 11604 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11605 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11606 if (!tmo_posted) 11607 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11608 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11609 11610 if (!tmo_posted) 11611 lpfc_worker_wake_up(phba); 11612 return; 11613 } 11614 11615 /** 11616 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11617 * @phba: pointer to lpfc hba data structure. 11618 * 11619 * This routine issues one fabric iocb from the driver internal list to 11620 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11621 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11622 * remove one pending fabric iocb from the driver internal list and invokes 11623 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11624 **/ 11625 static void 11626 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11627 { 11628 struct lpfc_iocbq *iocb; 11629 unsigned long iflags; 11630 int ret; 11631 11632 repeat: 11633 iocb = NULL; 11634 spin_lock_irqsave(&phba->hbalock, iflags); 11635 /* Post any pending iocb to the SLI layer */ 11636 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11637 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11638 list); 11639 if (iocb) 11640 /* Increment fabric iocb count to hold the position */ 11641 atomic_inc(&phba->fabric_iocb_count); 11642 } 11643 spin_unlock_irqrestore(&phba->hbalock, iflags); 11644 if (iocb) { 11645 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11646 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11647 iocb->cmd_flag |= LPFC_IO_FABRIC; 11648 11649 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11650 "Fabric sched1: ste:x%x", 11651 iocb->vport->port_state, 0, 0); 11652 11653 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11654 11655 if (ret == IOCB_ERROR) { 11656 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11657 iocb->fabric_cmd_cmpl = NULL; 11658 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11659 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11660 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11661 iocb->cmd_cmpl(phba, iocb, iocb); 11662 11663 atomic_dec(&phba->fabric_iocb_count); 11664 goto repeat; 11665 } 11666 } 11667 } 11668 11669 /** 11670 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11671 * @phba: pointer to lpfc hba data structure. 11672 * 11673 * This routine unblocks the issuing fabric iocb command. The function 11674 * will clear the fabric iocb block bit and then invoke the routine 11675 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11676 * from the driver internal fabric iocb list. 11677 **/ 11678 void 11679 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11680 { 11681 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11682 11683 lpfc_resume_fabric_iocbs(phba); 11684 return; 11685 } 11686 11687 /** 11688 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11689 * @phba: pointer to lpfc hba data structure. 11690 * 11691 * This routine blocks the issuing fabric iocb for a specified amount of 11692 * time (currently 100 ms). This is done by set the fabric iocb block bit 11693 * and set up a timeout timer for 100ms. When the block bit is set, no more 11694 * fabric iocb will be issued out of the HBA. 11695 **/ 11696 static void 11697 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11698 { 11699 int blocked; 11700 11701 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11702 /* Start a timer to unblock fabric iocbs after 100ms */ 11703 if (!blocked) 11704 mod_timer(&phba->fabric_block_timer, 11705 jiffies + msecs_to_jiffies(100)); 11706 11707 return; 11708 } 11709 11710 /** 11711 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11712 * @phba: pointer to lpfc hba data structure. 11713 * @cmdiocb: pointer to lpfc command iocb data structure. 11714 * @rspiocb: pointer to lpfc response iocb data structure. 11715 * 11716 * This routine is the callback function that is put to the fabric iocb's 11717 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11718 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11719 * function first restores and invokes the original iocb's callback function 11720 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11721 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11722 **/ 11723 static void 11724 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11725 struct lpfc_iocbq *rspiocb) 11726 { 11727 struct ls_rjt stat; 11728 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11729 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11730 11731 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11732 11733 switch (ulp_status) { 11734 case IOSTAT_NPORT_RJT: 11735 case IOSTAT_FABRIC_RJT: 11736 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11737 lpfc_block_fabric_iocbs(phba); 11738 break; 11739 11740 case IOSTAT_NPORT_BSY: 11741 case IOSTAT_FABRIC_BSY: 11742 lpfc_block_fabric_iocbs(phba); 11743 break; 11744 11745 case IOSTAT_LS_RJT: 11746 stat.un.ls_rjt_error_be = 11747 cpu_to_be32(ulp_word4); 11748 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11749 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11750 lpfc_block_fabric_iocbs(phba); 11751 break; 11752 } 11753 11754 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11755 11756 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11757 cmdiocb->fabric_cmd_cmpl = NULL; 11758 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11759 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11760 11761 atomic_dec(&phba->fabric_iocb_count); 11762 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11763 /* Post any pending iocbs to HBA */ 11764 lpfc_resume_fabric_iocbs(phba); 11765 } 11766 } 11767 11768 /** 11769 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11770 * @phba: pointer to lpfc hba data structure. 11771 * @iocb: pointer to lpfc command iocb data structure. 11772 * 11773 * This routine is used as the top-level API for issuing a fabric iocb command 11774 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11775 * function makes sure that only one fabric bound iocb will be outstanding at 11776 * any given time. As such, this function will first check to see whether there 11777 * is already an outstanding fabric iocb on the wire. If so, it will put the 11778 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11779 * issued later. Otherwise, it will issue the iocb on the wire and update the 11780 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11781 * 11782 * Note, this implementation has a potential sending out fabric IOCBs out of 11783 * order. The problem is caused by the construction of the "ready" boolen does 11784 * not include the condition that the internal fabric IOCB list is empty. As 11785 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11786 * ahead of the fabric IOCBs in the internal list. 11787 * 11788 * Return code 11789 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11790 * IOCB_ERROR - failed to issue fabric iocb 11791 **/ 11792 static int 11793 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11794 { 11795 unsigned long iflags; 11796 int ready; 11797 int ret; 11798 11799 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11800 11801 spin_lock_irqsave(&phba->hbalock, iflags); 11802 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11803 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11804 11805 if (ready) 11806 /* Increment fabric iocb count to hold the position */ 11807 atomic_inc(&phba->fabric_iocb_count); 11808 spin_unlock_irqrestore(&phba->hbalock, iflags); 11809 if (ready) { 11810 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11811 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11812 iocb->cmd_flag |= LPFC_IO_FABRIC; 11813 11814 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11815 "Fabric sched2: ste:x%x", 11816 iocb->vport->port_state, 0, 0); 11817 11818 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11819 11820 if (ret == IOCB_ERROR) { 11821 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11822 iocb->fabric_cmd_cmpl = NULL; 11823 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11824 atomic_dec(&phba->fabric_iocb_count); 11825 } 11826 } else { 11827 spin_lock_irqsave(&phba->hbalock, iflags); 11828 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11829 spin_unlock_irqrestore(&phba->hbalock, iflags); 11830 ret = IOCB_SUCCESS; 11831 } 11832 return ret; 11833 } 11834 11835 /** 11836 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11837 * @vport: pointer to a virtual N_Port data structure. 11838 * 11839 * This routine aborts all the IOCBs associated with a @vport from the 11840 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11841 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11842 * list, removes each IOCB associated with the @vport off the list, set the 11843 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11844 * associated with the IOCB. 11845 **/ 11846 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11847 { 11848 LIST_HEAD(completions); 11849 struct lpfc_hba *phba = vport->phba; 11850 struct lpfc_iocbq *tmp_iocb, *piocb; 11851 11852 spin_lock_irq(&phba->hbalock); 11853 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11854 list) { 11855 11856 if (piocb->vport != vport) 11857 continue; 11858 11859 list_move_tail(&piocb->list, &completions); 11860 } 11861 spin_unlock_irq(&phba->hbalock); 11862 11863 /* Cancel all the IOCBs from the completions list */ 11864 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11865 IOERR_SLI_ABORTED); 11866 } 11867 11868 /** 11869 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11870 * @ndlp: pointer to a node-list data structure. 11871 * 11872 * This routine aborts all the IOCBs associated with an @ndlp from the 11873 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11874 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11875 * list, removes each IOCB associated with the @ndlp off the list, set the 11876 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11877 * associated with the IOCB. 11878 **/ 11879 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11880 { 11881 LIST_HEAD(completions); 11882 struct lpfc_hba *phba = ndlp->phba; 11883 struct lpfc_iocbq *tmp_iocb, *piocb; 11884 struct lpfc_sli_ring *pring; 11885 11886 pring = lpfc_phba_elsring(phba); 11887 11888 if (unlikely(!pring)) 11889 return; 11890 11891 spin_lock_irq(&phba->hbalock); 11892 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11893 list) { 11894 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11895 11896 list_move_tail(&piocb->list, &completions); 11897 } 11898 } 11899 spin_unlock_irq(&phba->hbalock); 11900 11901 /* Cancel all the IOCBs from the completions list */ 11902 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11903 IOERR_SLI_ABORTED); 11904 } 11905 11906 /** 11907 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11908 * @phba: pointer to lpfc hba data structure. 11909 * 11910 * This routine aborts all the IOCBs currently on the driver internal 11911 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11912 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11913 * list, removes IOCBs off the list, set the status field to 11914 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11915 * the IOCB. 11916 **/ 11917 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11918 { 11919 LIST_HEAD(completions); 11920 11921 spin_lock_irq(&phba->hbalock); 11922 list_splice_init(&phba->fabric_iocb_list, &completions); 11923 spin_unlock_irq(&phba->hbalock); 11924 11925 /* Cancel all the IOCBs from the completions list */ 11926 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11927 IOERR_SLI_ABORTED); 11928 } 11929 11930 /** 11931 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11932 * @vport: pointer to lpfc vport data structure. 11933 * 11934 * This routine is invoked by the vport cleanup for deletions and the cleanup 11935 * for an ndlp on removal. 11936 **/ 11937 void 11938 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11939 { 11940 struct lpfc_hba *phba = vport->phba; 11941 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11942 struct lpfc_nodelist *ndlp = NULL; 11943 unsigned long iflag = 0; 11944 11945 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11946 list_for_each_entry_safe(sglq_entry, sglq_next, 11947 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11948 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11949 lpfc_nlp_put(sglq_entry->ndlp); 11950 ndlp = sglq_entry->ndlp; 11951 sglq_entry->ndlp = NULL; 11952 11953 /* If the xri on the abts_els_sgl list is for the Fport 11954 * node and the vport is unloading, the xri aborted wcqe 11955 * likely isn't coming back. Just release the sgl. 11956 */ 11957 if (test_bit(FC_UNLOADING, &vport->load_flag) && 11958 ndlp->nlp_DID == Fabric_DID) { 11959 list_del(&sglq_entry->list); 11960 sglq_entry->state = SGL_FREED; 11961 list_add_tail(&sglq_entry->list, 11962 &phba->sli4_hba.lpfc_els_sgl_list); 11963 } 11964 } 11965 } 11966 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11967 return; 11968 } 11969 11970 /** 11971 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11972 * @phba: pointer to lpfc hba data structure. 11973 * @axri: pointer to the els xri abort wcqe structure. 11974 * 11975 * This routine is invoked by the worker thread to process a SLI4 slow-path 11976 * ELS aborted xri. 11977 **/ 11978 void 11979 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11980 struct sli4_wcqe_xri_aborted *axri) 11981 { 11982 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11983 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11984 uint16_t lxri = 0; 11985 11986 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11987 unsigned long iflag = 0; 11988 struct lpfc_nodelist *ndlp; 11989 struct lpfc_sli_ring *pring; 11990 11991 pring = lpfc_phba_elsring(phba); 11992 11993 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11994 list_for_each_entry_safe(sglq_entry, sglq_next, 11995 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11996 if (sglq_entry->sli4_xritag == xri) { 11997 list_del(&sglq_entry->list); 11998 ndlp = sglq_entry->ndlp; 11999 sglq_entry->ndlp = NULL; 12000 list_add_tail(&sglq_entry->list, 12001 &phba->sli4_hba.lpfc_els_sgl_list); 12002 sglq_entry->state = SGL_FREED; 12003 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 12004 iflag); 12005 12006 if (ndlp) { 12007 lpfc_set_rrq_active(phba, ndlp, 12008 sglq_entry->sli4_lxritag, 12009 rxid, 1); 12010 lpfc_nlp_put(ndlp); 12011 } 12012 12013 /* Check if TXQ queue needs to be serviced */ 12014 if (pring && !list_empty(&pring->txq)) 12015 lpfc_worker_wake_up(phba); 12016 return; 12017 } 12018 } 12019 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12020 lxri = lpfc_sli4_xri_inrange(phba, xri); 12021 if (lxri == NO_XRI) 12022 return; 12023 12024 spin_lock_irqsave(&phba->hbalock, iflag); 12025 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 12026 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 12027 spin_unlock_irqrestore(&phba->hbalock, iflag); 12028 return; 12029 } 12030 sglq_entry->state = SGL_XRI_ABORTED; 12031 spin_unlock_irqrestore(&phba->hbalock, iflag); 12032 return; 12033 } 12034 12035 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 12036 * @vport: pointer to virtual port object. 12037 * @ndlp: nodelist pointer for the impacted node. 12038 * 12039 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 12040 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 12041 * the driver is required to send a LOGO to the remote node before it 12042 * attempts to recover its login to the remote node. 12043 */ 12044 void 12045 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 12046 struct lpfc_nodelist *ndlp) 12047 { 12048 struct Scsi_Host *shost; 12049 struct lpfc_hba *phba; 12050 unsigned long flags = 0; 12051 12052 shost = lpfc_shost_from_vport(vport); 12053 phba = vport->phba; 12054 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 12055 lpfc_printf_log(phba, KERN_INFO, 12056 LOG_SLI, "3093 No rport recovery needed. " 12057 "rport in state 0x%x\n", ndlp->nlp_state); 12058 return; 12059 } 12060 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12061 "3094 Start rport recovery on shost id 0x%x " 12062 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 12063 "flag 0x%lx\n", 12064 shost->host_no, ndlp->nlp_DID, 12065 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 12066 ndlp->nlp_flag); 12067 /* 12068 * The rport is not responding. Remove the FCP-2 flag to prevent 12069 * an ADISC in the follow-up recovery code. 12070 */ 12071 spin_lock_irqsave(&ndlp->lock, flags); 12072 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 12073 spin_unlock_irqrestore(&ndlp->lock, flags); 12074 set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); 12075 lpfc_unreg_rpi(vport, ndlp); 12076 } 12077 12078 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 12079 { 12080 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 12081 } 12082 12083 static void 12084 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 12085 { 12086 u32 i; 12087 12088 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 12089 return; 12090 12091 for (i = min; i <= max; i++) 12092 set_bit(i, vport->vmid_priority_range); 12093 } 12094 12095 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 12096 { 12097 set_bit(ctcl_vmid, vport->vmid_priority_range); 12098 } 12099 12100 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 12101 { 12102 u32 i; 12103 12104 i = find_first_bit(vport->vmid_priority_range, 12105 LPFC_VMID_MAX_PRIORITY_RANGE); 12106 12107 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 12108 return 0; 12109 12110 clear_bit(i, vport->vmid_priority_range); 12111 return i; 12112 } 12113 12114 #define MAX_PRIORITY_DESC 255 12115 12116 static void 12117 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12118 struct lpfc_iocbq *rspiocb) 12119 { 12120 struct lpfc_vport *vport = cmdiocb->vport; 12121 struct priority_range_desc *desc; 12122 struct lpfc_dmabuf *prsp = NULL; 12123 struct lpfc_vmid_priority_range *vmid_range = NULL; 12124 u32 *data; 12125 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12126 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12127 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12128 u8 *pcmd, max_desc; 12129 u32 len, i; 12130 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12131 12132 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12133 if (!prsp) 12134 goto out; 12135 12136 pcmd = prsp->virt; 12137 data = (u32 *)pcmd; 12138 if (data[0] == ELS_CMD_LS_RJT) { 12139 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12140 "3277 QFPA LS_RJT x%x x%x\n", 12141 data[0], data[1]); 12142 goto out; 12143 } 12144 if (ulp_status) { 12145 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12146 "6529 QFPA failed with status x%x x%x\n", 12147 ulp_status, ulp_word4); 12148 goto out; 12149 } 12150 12151 if (!vport->qfpa_res) { 12152 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12153 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12154 GFP_KERNEL); 12155 if (!vport->qfpa_res) 12156 goto out; 12157 } 12158 12159 len = *((u32 *)(pcmd + 4)); 12160 len = be32_to_cpu(len); 12161 memcpy(vport->qfpa_res, pcmd, len + 8); 12162 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12163 12164 desc = (struct priority_range_desc *)(pcmd + 8); 12165 vmid_range = vport->vmid_priority.vmid_range; 12166 if (!vmid_range) { 12167 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12168 GFP_KERNEL); 12169 if (!vmid_range) { 12170 kfree(vport->qfpa_res); 12171 goto out; 12172 } 12173 vport->vmid_priority.vmid_range = vmid_range; 12174 } 12175 vport->vmid_priority.num_descriptors = len; 12176 12177 for (i = 0; i < len; i++, vmid_range++, desc++) { 12178 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12179 "6539 vmid values low=%d, high=%d, qos=%d, " 12180 "local ve id=%d\n", desc->lo_range, 12181 desc->hi_range, desc->qos_priority, 12182 desc->local_ve_id); 12183 12184 vmid_range->low = desc->lo_range << 1; 12185 if (desc->local_ve_id == QFPA_ODD_ONLY) 12186 vmid_range->low++; 12187 if (desc->qos_priority) 12188 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12189 vmid_range->qos = desc->qos_priority; 12190 12191 vmid_range->high = desc->hi_range << 1; 12192 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12193 (desc->local_ve_id == QFPA_EVEN_ODD)) 12194 vmid_range->high++; 12195 } 12196 lpfc_init_cs_ctl_bitmap(vport); 12197 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12198 lpfc_vmid_set_cs_ctl_range(vport, 12199 vport->vmid_priority.vmid_range[i].low, 12200 vport->vmid_priority.vmid_range[i].high); 12201 } 12202 12203 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12204 out: 12205 lpfc_els_free_iocb(phba, cmdiocb); 12206 lpfc_nlp_put(ndlp); 12207 } 12208 12209 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12210 { 12211 struct lpfc_hba *phba = vport->phba; 12212 struct lpfc_nodelist *ndlp; 12213 struct lpfc_iocbq *elsiocb; 12214 u8 *pcmd; 12215 int ret; 12216 12217 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12218 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12219 return -ENXIO; 12220 12221 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12222 ndlp->nlp_DID, ELS_CMD_QFPA); 12223 if (!elsiocb) 12224 return -ENOMEM; 12225 12226 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12227 12228 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12229 pcmd += 4; 12230 12231 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12232 12233 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12234 if (!elsiocb->ndlp) { 12235 lpfc_els_free_iocb(vport->phba, elsiocb); 12236 return -ENXIO; 12237 } 12238 12239 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12240 if (ret != IOCB_SUCCESS) { 12241 lpfc_els_free_iocb(phba, elsiocb); 12242 lpfc_nlp_put(ndlp); 12243 return -EIO; 12244 } 12245 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12246 return 0; 12247 } 12248 12249 int 12250 lpfc_vmid_uvem(struct lpfc_vport *vport, 12251 struct lpfc_vmid *vmid, bool instantiated) 12252 { 12253 struct lpfc_vem_id_desc *vem_id_desc; 12254 struct lpfc_nodelist *ndlp; 12255 struct lpfc_iocbq *elsiocb; 12256 struct instantiated_ve_desc *inst_desc; 12257 struct lpfc_vmid_context *vmid_context; 12258 u8 *pcmd; 12259 u32 *len; 12260 int ret = 0; 12261 12262 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12263 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12264 return -ENXIO; 12265 12266 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12267 if (!vmid_context) 12268 return -ENOMEM; 12269 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12270 ndlp, Fabric_DID, ELS_CMD_UVEM); 12271 if (!elsiocb) 12272 goto out; 12273 12274 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12275 "3427 Host vmid %s %d\n", 12276 vmid->host_vmid, instantiated); 12277 vmid_context->vmp = vmid; 12278 vmid_context->nlp = ndlp; 12279 vmid_context->instantiated = instantiated; 12280 elsiocb->vmid_tag.vmid_context = vmid_context; 12281 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12282 12283 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 12284 sizeof(vport->lpfc_vmid_host_uuid))) 12285 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12286 sizeof(vport->lpfc_vmid_host_uuid)); 12287 12288 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12289 len = (u32 *)(pcmd + 4); 12290 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12291 12292 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12293 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12294 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12295 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12296 sizeof(vem_id_desc->vem_id)); 12297 12298 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12299 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12300 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12301 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12302 sizeof(inst_desc->global_vem_id)); 12303 12304 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12305 bf_set(lpfc_instantiated_local_id, inst_desc, 12306 vmid->un.cs_ctl_vmid); 12307 if (instantiated) { 12308 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12309 } else { 12310 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12311 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12312 } 12313 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12314 12315 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12316 12317 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12318 if (!elsiocb->ndlp) { 12319 lpfc_els_free_iocb(vport->phba, elsiocb); 12320 goto out; 12321 } 12322 12323 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12324 if (ret != IOCB_SUCCESS) { 12325 lpfc_els_free_iocb(vport->phba, elsiocb); 12326 lpfc_nlp_put(ndlp); 12327 goto out; 12328 } 12329 12330 return 0; 12331 out: 12332 kfree(vmid_context); 12333 return -EIO; 12334 } 12335 12336 static void 12337 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12338 struct lpfc_iocbq *rspiocb) 12339 { 12340 struct lpfc_vport *vport = icmdiocb->vport; 12341 struct lpfc_dmabuf *prsp = NULL; 12342 struct lpfc_vmid_context *vmid_context = 12343 icmdiocb->vmid_tag.vmid_context; 12344 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12345 u8 *pcmd; 12346 u32 *data; 12347 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12348 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12349 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12350 struct lpfc_vmid *vmid; 12351 12352 vmid = vmid_context->vmp; 12353 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12354 ndlp = NULL; 12355 12356 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12357 if (!prsp) 12358 goto out; 12359 pcmd = prsp->virt; 12360 data = (u32 *)pcmd; 12361 if (data[0] == ELS_CMD_LS_RJT) { 12362 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12363 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12364 goto out; 12365 } 12366 if (ulp_status) { 12367 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12368 "4533 UVEM error status %x: %x\n", 12369 ulp_status, ulp_word4); 12370 goto out; 12371 } 12372 spin_lock(&phba->hbalock); 12373 /* Set IN USE flag */ 12374 vport->vmid_flag |= LPFC_VMID_IN_USE; 12375 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12376 spin_unlock(&phba->hbalock); 12377 12378 if (vmid_context->instantiated) { 12379 write_lock(&vport->vmid_lock); 12380 vmid->flag |= LPFC_VMID_REGISTERED; 12381 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12382 write_unlock(&vport->vmid_lock); 12383 } 12384 12385 out: 12386 kfree(vmid_context); 12387 lpfc_els_free_iocb(phba, icmdiocb); 12388 lpfc_nlp_put(ndlp); 12389 } 12390